Add framework of auth-controller
Change-Id: Ifa8cc0e4abb798c63c9d4ac9297e3e32443125e4 Implements: blueprint auth-controller-framework Signed-off-by: mozhuli <21621232@zju.edu.cn>
This commit is contained in:
parent
f3545f50a4
commit
b95dd6a380
600
Godeps/Godeps.json
generated
600
Godeps/Godeps.json
generated
@ -1,5 +1,5 @@
|
||||
{
|
||||
"ImportPath": "github.com/openstack/stackube",
|
||||
"ImportPath": "git.openstack.org/openstack/stackube",
|
||||
"GoVersion": "go1.8",
|
||||
"GodepVersion": "v79",
|
||||
"Packages": [
|
||||
@ -15,16 +15,45 @@
|
||||
"ImportPath": "github.com/PuerkitoBio/urlesc",
|
||||
"Rev": "5bd2802263f21d8788851d5305584c82a5c75d7e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/davecgh/go-spew/spew",
|
||||
"Comment": "v1.1.0",
|
||||
"Rev": "346938d642f2ec3594ed81d874461961cd0faa76"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/distribution/digest",
|
||||
"Comment": "v2.6.1-2-g42ea75c",
|
||||
"Rev": "42ea75ca2d11ff2f34b8b4fffae7e3dc45ccf906"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/distribution/reference",
|
||||
"Comment": "v2.6.1-2-g42ea75c",
|
||||
"Rev": "42ea75ca2d11ff2f34b8b4fffae7e3dc45ccf906"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/emicklei/go-restful",
|
||||
"Comment": "2.2.0-4-gff4f55a",
|
||||
"Rev": "ff4f55a206334ef123e4f79bbf348980da81ca46"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/emicklei/go-restful-swagger12",
|
||||
"Comment": "1.0.1",
|
||||
"Rev": "dcef7f55730566d41eae5db10e7d6981829720f6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/emicklei/go-restful/log",
|
||||
"Comment": "2.2.0-4-gff4f55a",
|
||||
"Rev": "ff4f55a206334ef123e4f79bbf348980da81ca46"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/ghodss/yaml",
|
||||
"Comment": "v1.0.0",
|
||||
"Rev": "0ca9ea5df5451ffdf184b4428c902747c2c11cd7"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/go-openapi/analysis",
|
||||
"Rev": "b44dc874b601d9e4e2f6e19140e794ba24bead3b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/go-openapi/jsonpointer",
|
||||
"Rev": "46af16f9f7b149af66e5d1bd010e3574dc06de98"
|
||||
@ -33,6 +62,10 @@
|
||||
"ImportPath": "github.com/go-openapi/jsonreference",
|
||||
"Rev": "13c6e3589ad90f49bd3e3bbe2c2cb3d7a4142272"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/go-openapi/loads",
|
||||
"Rev": "18441dfa706d924a39a030ee2c3b1d8d81917b38"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/go-openapi/spec",
|
||||
"Rev": "6aced65f8501fe1217321abf0749d354824ba2ff"
|
||||
@ -59,6 +92,59 @@
|
||||
"ImportPath": "github.com/google/gofuzz",
|
||||
"Rev": "44d81051d367757e1c7c6a5a86423ece9afcf63c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/gophercloud/gophercloud",
|
||||
"Rev": "63ea72a718cc3cf0853e41f66003d00456d194a6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/gophercloud/gophercloud/openstack",
|
||||
"Rev": "63ea72a718cc3cf0853e41f66003d00456d194a6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/gophercloud/gophercloud/openstack/identity/v2/tenants",
|
||||
"Rev": "63ea72a718cc3cf0853e41f66003d00456d194a6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/gophercloud/gophercloud/openstack/identity/v2/tokens",
|
||||
"Rev": "63ea72a718cc3cf0853e41f66003d00456d194a6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/gophercloud/gophercloud/openstack/identity/v2/users",
|
||||
"Rev": "63ea72a718cc3cf0853e41f66003d00456d194a6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/gophercloud/gophercloud/openstack/identity/v3/tokens",
|
||||
"Rev": "63ea72a718cc3cf0853e41f66003d00456d194a6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/gophercloud/gophercloud/openstack/utils",
|
||||
"Rev": "63ea72a718cc3cf0853e41f66003d00456d194a6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/gophercloud/gophercloud/pagination",
|
||||
"Rev": "63ea72a718cc3cf0853e41f66003d00456d194a6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/golang-lru",
|
||||
"Rev": "0a025b7e63adc15a622f29b0b2c4c3848243bbf6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/golang-lru/simplelru",
|
||||
"Rev": "0a025b7e63adc15a622f29b0b2c4c3848243bbf6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/howeyc/gopass",
|
||||
"Rev": "bf9dde6d0d2c004a008c27aaee91170c786f6db8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/imdario/mergo",
|
||||
"Comment": "0.2.2-10-gd806ba8",
|
||||
"Rev": "d806ba8c21777d504a2090a2ca4913c750dd3a33"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/juju/ratelimit",
|
||||
"Rev": "5b9ff866471762aa2ab2dced63c9fb6f53921342"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mailru/easyjson/buffer",
|
||||
"Rev": "d5b7844b561a7bc640052f1b935f7b800330d7e0"
|
||||
@ -75,6 +161,22 @@
|
||||
"ImportPath": "github.com/spf13/pflag",
|
||||
"Rev": "9ff6c6923cfffbcd502984b8e0c80539a94968b7"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/ugorji/go/codec",
|
||||
"Rev": "708a42d246822952f38190a8d8c4e6b16a0e600c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/ssh/terminal",
|
||||
"Rev": "d172538b2cfce0c13cee31e647d0367aa8cd2486"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/context",
|
||||
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/context/ctxhttp",
|
||||
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/http2",
|
||||
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||
@ -91,6 +193,34 @@
|
||||
"ImportPath": "golang.org/x/net/lex/httplex",
|
||||
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/oauth2",
|
||||
"Rev": "a6bd8cefa1811bd24b86f8902872e4e8225f74c4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/oauth2/google",
|
||||
"Rev": "a6bd8cefa1811bd24b86f8902872e4e8225f74c4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/oauth2/internal",
|
||||
"Rev": "a6bd8cefa1811bd24b86f8902872e4e8225f74c4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/oauth2/jws",
|
||||
"Rev": "a6bd8cefa1811bd24b86f8902872e4e8225f74c4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/oauth2/jwt",
|
||||
"Rev": "a6bd8cefa1811bd24b86f8902872e4e8225f74c4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/sync/errgroup",
|
||||
"Rev": "5a06fca2c336a4b2b2fcb45702e8c47621b2aa2c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/sys/unix",
|
||||
"Rev": "8f0908ab3b2457e2e15403d3697c9ef5cb4b57a9"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/cases",
|
||||
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||
@ -131,19 +261,72 @@
|
||||
"ImportPath": "golang.org/x/text/width",
|
||||
"Rev": "2910a502d2bf9e43193af9d68ca516529614eed3"
|
||||
},
|
||||
{
|
||||
"ImportPath": "gopkg.in/gcfg.v1",
|
||||
"Comment": "v1.2.0",
|
||||
"Rev": "27e4946190b4a327b539185f2b5b1f7c84730728"
|
||||
},
|
||||
{
|
||||
"ImportPath": "gopkg.in/gcfg.v1/scanner",
|
||||
"Comment": "v1.2.0",
|
||||
"Rev": "27e4946190b4a327b539185f2b5b1f7c84730728"
|
||||
},
|
||||
{
|
||||
"ImportPath": "gopkg.in/gcfg.v1/token",
|
||||
"Comment": "v1.2.0",
|
||||
"Rev": "27e4946190b4a327b539185f2b5b1f7c84730728"
|
||||
},
|
||||
{
|
||||
"ImportPath": "gopkg.in/gcfg.v1/types",
|
||||
"Comment": "v1.2.0",
|
||||
"Rev": "27e4946190b4a327b539185f2b5b1f7c84730728"
|
||||
},
|
||||
{
|
||||
"ImportPath": "gopkg.in/inf.v0",
|
||||
"Comment": "v0.9.0",
|
||||
"Rev": "3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "gopkg.in/warnings.v0",
|
||||
"Comment": "v0.1.1",
|
||||
"Rev": "8a331561fe74dadba6edfc59f3be66c22c3b065d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "gopkg.in/yaml.v2",
|
||||
"Rev": "cd8b52f8269e0feb286dfeef29f8fe4d5b397e0b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/apimachinery/pkg/api/errors",
|
||||
"Rev": "2de00c78cb6d6127fb51b9531c1b3def1cbcac8c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/apimachinery/pkg/api/meta",
|
||||
"Rev": "2de00c78cb6d6127fb51b9531c1b3def1cbcac8c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/apimachinery/pkg/api/resource",
|
||||
"Rev": "2de00c78cb6d6127fb51b9531c1b3def1cbcac8c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/apimachinery/pkg/apimachinery",
|
||||
"Rev": "2de00c78cb6d6127fb51b9531c1b3def1cbcac8c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/apimachinery/pkg/apimachinery/announced",
|
||||
"Rev": "2de00c78cb6d6127fb51b9531c1b3def1cbcac8c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/apimachinery/pkg/apimachinery/registered",
|
||||
"Rev": "2de00c78cb6d6127fb51b9531c1b3def1cbcac8c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"Rev": "2de00c78cb6d6127fb51b9531c1b3def1cbcac8c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured",
|
||||
"Rev": "2de00c78cb6d6127fb51b9531c1b3def1cbcac8c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/apimachinery/pkg/conversion",
|
||||
"Rev": "2de00c78cb6d6127fb51b9531c1b3def1cbcac8c"
|
||||
@ -172,6 +355,30 @@
|
||||
"ImportPath": "k8s.io/apimachinery/pkg/runtime/schema",
|
||||
"Rev": "2de00c78cb6d6127fb51b9531c1b3def1cbcac8c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/apimachinery/pkg/runtime/serializer",
|
||||
"Rev": "2de00c78cb6d6127fb51b9531c1b3def1cbcac8c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/apimachinery/pkg/runtime/serializer/json",
|
||||
"Rev": "2de00c78cb6d6127fb51b9531c1b3def1cbcac8c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/apimachinery/pkg/runtime/serializer/protobuf",
|
||||
"Rev": "2de00c78cb6d6127fb51b9531c1b3def1cbcac8c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/apimachinery/pkg/runtime/serializer/recognizer",
|
||||
"Rev": "2de00c78cb6d6127fb51b9531c1b3def1cbcac8c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/apimachinery/pkg/runtime/serializer/streaming",
|
||||
"Rev": "2de00c78cb6d6127fb51b9531c1b3def1cbcac8c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/apimachinery/pkg/runtime/serializer/versioning",
|
||||
"Rev": "2de00c78cb6d6127fb51b9531c1b3def1cbcac8c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/apimachinery/pkg/selection",
|
||||
"Rev": "2de00c78cb6d6127fb51b9531c1b3def1cbcac8c"
|
||||
@ -180,18 +387,42 @@
|
||||
"ImportPath": "k8s.io/apimachinery/pkg/types",
|
||||
"Rev": "2de00c78cb6d6127fb51b9531c1b3def1cbcac8c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/apimachinery/pkg/util/cache",
|
||||
"Rev": "2de00c78cb6d6127fb51b9531c1b3def1cbcac8c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/apimachinery/pkg/util/clock",
|
||||
"Rev": "2de00c78cb6d6127fb51b9531c1b3def1cbcac8c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/apimachinery/pkg/util/diff",
|
||||
"Rev": "2de00c78cb6d6127fb51b9531c1b3def1cbcac8c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/apimachinery/pkg/util/errors",
|
||||
"Rev": "2de00c78cb6d6127fb51b9531c1b3def1cbcac8c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/apimachinery/pkg/util/framer",
|
||||
"Rev": "2de00c78cb6d6127fb51b9531c1b3def1cbcac8c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/apimachinery/pkg/util/intstr",
|
||||
"Rev": "2de00c78cb6d6127fb51b9531c1b3def1cbcac8c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/apimachinery/pkg/util/json",
|
||||
"Rev": "2de00c78cb6d6127fb51b9531c1b3def1cbcac8c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/apimachinery/pkg/util/net",
|
||||
"Rev": "2de00c78cb6d6127fb51b9531c1b3def1cbcac8c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/apimachinery/pkg/util/rand",
|
||||
"Rev": "2de00c78cb6d6127fb51b9531c1b3def1cbcac8c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/apimachinery/pkg/util/runtime",
|
||||
"Rev": "2de00c78cb6d6127fb51b9531c1b3def1cbcac8c"
|
||||
@ -204,10 +435,22 @@
|
||||
"ImportPath": "k8s.io/apimachinery/pkg/util/validation",
|
||||
"Rev": "2de00c78cb6d6127fb51b9531c1b3def1cbcac8c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/apimachinery/pkg/util/validation/field",
|
||||
"Rev": "2de00c78cb6d6127fb51b9531c1b3def1cbcac8c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/apimachinery/pkg/util/wait",
|
||||
"Rev": "2de00c78cb6d6127fb51b9531c1b3def1cbcac8c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/apimachinery/pkg/util/yaml",
|
||||
"Rev": "2de00c78cb6d6127fb51b9531c1b3def1cbcac8c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/apimachinery/pkg/version",
|
||||
"Rev": "2de00c78cb6d6127fb51b9531c1b3def1cbcac8c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/apimachinery/pkg/watch",
|
||||
"Rev": "2de00c78cb6d6127fb51b9531c1b3def1cbcac8c"
|
||||
@ -215,6 +458,361 @@
|
||||
{
|
||||
"ImportPath": "k8s.io/apimachinery/third_party/forked/golang/reflect",
|
||||
"Rev": "2de00c78cb6d6127fb51b9531c1b3def1cbcac8c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/client-go/discovery",
|
||||
"Comment": "v2.0.0-alpha.0-311-g6b1566c",
|
||||
"Rev": "6b1566ccaa7c069f392461bef8a5fb17087f4729"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/client-go/dynamic",
|
||||
"Comment": "v2.0.0-alpha.0-311-g6b1566c",
|
||||
"Rev": "6b1566ccaa7c069f392461bef8a5fb17087f4729"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/client-go/kubernetes",
|
||||
"Comment": "v2.0.0-alpha.0-311-g6b1566c",
|
||||
"Rev": "6b1566ccaa7c069f392461bef8a5fb17087f4729"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/client-go/kubernetes/scheme",
|
||||
"Comment": "v2.0.0-alpha.0-311-g6b1566c",
|
||||
"Rev": "6b1566ccaa7c069f392461bef8a5fb17087f4729"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/client-go/kubernetes/typed/apps/v1beta1",
|
||||
"Comment": "v2.0.0-alpha.0-311-g6b1566c",
|
||||
"Rev": "6b1566ccaa7c069f392461bef8a5fb17087f4729"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/client-go/kubernetes/typed/authentication/v1",
|
||||
"Comment": "v2.0.0-alpha.0-311-g6b1566c",
|
||||
"Rev": "6b1566ccaa7c069f392461bef8a5fb17087f4729"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/client-go/kubernetes/typed/authentication/v1beta1",
|
||||
"Comment": "v2.0.0-alpha.0-311-g6b1566c",
|
||||
"Rev": "6b1566ccaa7c069f392461bef8a5fb17087f4729"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/client-go/kubernetes/typed/authorization/v1",
|
||||
"Comment": "v2.0.0-alpha.0-311-g6b1566c",
|
||||
"Rev": "6b1566ccaa7c069f392461bef8a5fb17087f4729"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/client-go/kubernetes/typed/authorization/v1beta1",
|
||||
"Comment": "v2.0.0-alpha.0-311-g6b1566c",
|
||||
"Rev": "6b1566ccaa7c069f392461bef8a5fb17087f4729"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/client-go/kubernetes/typed/autoscaling/v1",
|
||||
"Comment": "v2.0.0-alpha.0-311-g6b1566c",
|
||||
"Rev": "6b1566ccaa7c069f392461bef8a5fb17087f4729"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1",
|
||||
"Comment": "v2.0.0-alpha.0-311-g6b1566c",
|
||||
"Rev": "6b1566ccaa7c069f392461bef8a5fb17087f4729"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/client-go/kubernetes/typed/batch/v1",
|
||||
"Comment": "v2.0.0-alpha.0-311-g6b1566c",
|
||||
"Rev": "6b1566ccaa7c069f392461bef8a5fb17087f4729"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/client-go/kubernetes/typed/batch/v2alpha1",
|
||||
"Comment": "v2.0.0-alpha.0-311-g6b1566c",
|
||||
"Rev": "6b1566ccaa7c069f392461bef8a5fb17087f4729"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/client-go/kubernetes/typed/certificates/v1beta1",
|
||||
"Comment": "v2.0.0-alpha.0-311-g6b1566c",
|
||||
"Rev": "6b1566ccaa7c069f392461bef8a5fb17087f4729"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/client-go/kubernetes/typed/core/v1",
|
||||
"Comment": "v2.0.0-alpha.0-311-g6b1566c",
|
||||
"Rev": "6b1566ccaa7c069f392461bef8a5fb17087f4729"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/client-go/kubernetes/typed/extensions/v1beta1",
|
||||
"Comment": "v2.0.0-alpha.0-311-g6b1566c",
|
||||
"Rev": "6b1566ccaa7c069f392461bef8a5fb17087f4729"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/client-go/kubernetes/typed/policy/v1beta1",
|
||||
"Comment": "v2.0.0-alpha.0-311-g6b1566c",
|
||||
"Rev": "6b1566ccaa7c069f392461bef8a5fb17087f4729"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/client-go/kubernetes/typed/rbac/v1alpha1",
|
||||
"Comment": "v2.0.0-alpha.0-311-g6b1566c",
|
||||
"Rev": "6b1566ccaa7c069f392461bef8a5fb17087f4729"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/client-go/kubernetes/typed/rbac/v1beta1",
|
||||
"Comment": "v2.0.0-alpha.0-311-g6b1566c",
|
||||
"Rev": "6b1566ccaa7c069f392461bef8a5fb17087f4729"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/client-go/kubernetes/typed/settings/v1alpha1",
|
||||
"Comment": "v2.0.0-alpha.0-311-g6b1566c",
|
||||
"Rev": "6b1566ccaa7c069f392461bef8a5fb17087f4729"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/client-go/kubernetes/typed/storage/v1",
|
||||
"Comment": "v2.0.0-alpha.0-311-g6b1566c",
|
||||
"Rev": "6b1566ccaa7c069f392461bef8a5fb17087f4729"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/client-go/kubernetes/typed/storage/v1beta1",
|
||||
"Comment": "v2.0.0-alpha.0-311-g6b1566c",
|
||||
"Rev": "6b1566ccaa7c069f392461bef8a5fb17087f4729"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/client-go/pkg/api",
|
||||
"Comment": "v2.0.0-alpha.0-311-g6b1566c",
|
||||
"Rev": "6b1566ccaa7c069f392461bef8a5fb17087f4729"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/client-go/pkg/api/v1",
|
||||
"Comment": "v2.0.0-alpha.0-311-g6b1566c",
|
||||
"Rev": "6b1566ccaa7c069f392461bef8a5fb17087f4729"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/client-go/pkg/api/v1/ref",
|
||||
"Comment": "v2.0.0-alpha.0-311-g6b1566c",
|
||||
"Rev": "6b1566ccaa7c069f392461bef8a5fb17087f4729"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/client-go/pkg/apis/apps",
|
||||
"Comment": "v2.0.0-alpha.0-311-g6b1566c",
|
||||
"Rev": "6b1566ccaa7c069f392461bef8a5fb17087f4729"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/client-go/pkg/apis/apps/v1beta1",
|
||||
"Comment": "v2.0.0-alpha.0-311-g6b1566c",
|
||||
"Rev": "6b1566ccaa7c069f392461bef8a5fb17087f4729"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/client-go/pkg/apis/authentication",
|
||||
"Comment": "v2.0.0-alpha.0-311-g6b1566c",
|
||||
"Rev": "6b1566ccaa7c069f392461bef8a5fb17087f4729"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/client-go/pkg/apis/authentication/v1",
|
||||
"Comment": "v2.0.0-alpha.0-311-g6b1566c",
|
||||
"Rev": "6b1566ccaa7c069f392461bef8a5fb17087f4729"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/client-go/pkg/apis/authentication/v1beta1",
|
||||
"Comment": "v2.0.0-alpha.0-311-g6b1566c",
|
||||
"Rev": "6b1566ccaa7c069f392461bef8a5fb17087f4729"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/client-go/pkg/apis/authorization",
|
||||
"Comment": "v2.0.0-alpha.0-311-g6b1566c",
|
||||
"Rev": "6b1566ccaa7c069f392461bef8a5fb17087f4729"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/client-go/pkg/apis/authorization/v1",
|
||||
"Comment": "v2.0.0-alpha.0-311-g6b1566c",
|
||||
"Rev": "6b1566ccaa7c069f392461bef8a5fb17087f4729"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/client-go/pkg/apis/authorization/v1beta1",
|
||||
"Comment": "v2.0.0-alpha.0-311-g6b1566c",
|
||||
"Rev": "6b1566ccaa7c069f392461bef8a5fb17087f4729"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/client-go/pkg/apis/autoscaling",
|
||||
"Comment": "v2.0.0-alpha.0-311-g6b1566c",
|
||||
"Rev": "6b1566ccaa7c069f392461bef8a5fb17087f4729"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/client-go/pkg/apis/autoscaling/v1",
|
||||
"Comment": "v2.0.0-alpha.0-311-g6b1566c",
|
||||
"Rev": "6b1566ccaa7c069f392461bef8a5fb17087f4729"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/client-go/pkg/apis/autoscaling/v2alpha1",
|
||||
"Comment": "v2.0.0-alpha.0-311-g6b1566c",
|
||||
"Rev": "6b1566ccaa7c069f392461bef8a5fb17087f4729"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/client-go/pkg/apis/batch",
|
||||
"Comment": "v2.0.0-alpha.0-311-g6b1566c",
|
||||
"Rev": "6b1566ccaa7c069f392461bef8a5fb17087f4729"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/client-go/pkg/apis/batch/v1",
|
||||
"Comment": "v2.0.0-alpha.0-311-g6b1566c",
|
||||
"Rev": "6b1566ccaa7c069f392461bef8a5fb17087f4729"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/client-go/pkg/apis/batch/v2alpha1",
|
||||
"Comment": "v2.0.0-alpha.0-311-g6b1566c",
|
||||
"Rev": "6b1566ccaa7c069f392461bef8a5fb17087f4729"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/client-go/pkg/apis/certificates",
|
||||
"Comment": "v2.0.0-alpha.0-311-g6b1566c",
|
||||
"Rev": "6b1566ccaa7c069f392461bef8a5fb17087f4729"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/client-go/pkg/apis/certificates/v1beta1",
|
||||
"Comment": "v2.0.0-alpha.0-311-g6b1566c",
|
||||
"Rev": "6b1566ccaa7c069f392461bef8a5fb17087f4729"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/client-go/pkg/apis/extensions",
|
||||
"Comment": "v2.0.0-alpha.0-311-g6b1566c",
|
||||
"Rev": "6b1566ccaa7c069f392461bef8a5fb17087f4729"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/client-go/pkg/apis/extensions/v1beta1",
|
||||
"Comment": "v2.0.0-alpha.0-311-g6b1566c",
|
||||
"Rev": "6b1566ccaa7c069f392461bef8a5fb17087f4729"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/client-go/pkg/apis/policy",
|
||||
"Comment": "v2.0.0-alpha.0-311-g6b1566c",
|
||||
"Rev": "6b1566ccaa7c069f392461bef8a5fb17087f4729"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/client-go/pkg/apis/policy/v1beta1",
|
||||
"Comment": "v2.0.0-alpha.0-311-g6b1566c",
|
||||
"Rev": "6b1566ccaa7c069f392461bef8a5fb17087f4729"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/client-go/pkg/apis/rbac",
|
||||
"Comment": "v2.0.0-alpha.0-311-g6b1566c",
|
||||
"Rev": "6b1566ccaa7c069f392461bef8a5fb17087f4729"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/client-go/pkg/apis/rbac/v1alpha1",
|
||||
"Comment": "v2.0.0-alpha.0-311-g6b1566c",
|
||||
"Rev": "6b1566ccaa7c069f392461bef8a5fb17087f4729"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/client-go/pkg/apis/rbac/v1beta1",
|
||||
"Comment": "v2.0.0-alpha.0-311-g6b1566c",
|
||||
"Rev": "6b1566ccaa7c069f392461bef8a5fb17087f4729"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/client-go/pkg/apis/settings",
|
||||
"Comment": "v2.0.0-alpha.0-311-g6b1566c",
|
||||
"Rev": "6b1566ccaa7c069f392461bef8a5fb17087f4729"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/client-go/pkg/apis/settings/v1alpha1",
|
||||
"Comment": "v2.0.0-alpha.0-311-g6b1566c",
|
||||
"Rev": "6b1566ccaa7c069f392461bef8a5fb17087f4729"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/client-go/pkg/apis/storage",
|
||||
"Comment": "v2.0.0-alpha.0-311-g6b1566c",
|
||||
"Rev": "6b1566ccaa7c069f392461bef8a5fb17087f4729"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/client-go/pkg/apis/storage/v1",
|
||||
"Comment": "v2.0.0-alpha.0-311-g6b1566c",
|
||||
"Rev": "6b1566ccaa7c069f392461bef8a5fb17087f4729"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/client-go/pkg/apis/storage/v1beta1",
|
||||
"Comment": "v2.0.0-alpha.0-311-g6b1566c",
|
||||
"Rev": "6b1566ccaa7c069f392461bef8a5fb17087f4729"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/client-go/pkg/util",
|
||||
"Comment": "v2.0.0-alpha.0-311-g6b1566c",
|
||||
"Rev": "6b1566ccaa7c069f392461bef8a5fb17087f4729"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/client-go/pkg/util/parsers",
|
||||
"Comment": "v2.0.0-alpha.0-311-g6b1566c",
|
||||
"Rev": "6b1566ccaa7c069f392461bef8a5fb17087f4729"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/client-go/pkg/version",
|
||||
"Comment": "v2.0.0-alpha.0-311-g6b1566c",
|
||||
"Rev": "6b1566ccaa7c069f392461bef8a5fb17087f4729"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/client-go/rest",
|
||||
"Comment": "v2.0.0-alpha.0-311-g6b1566c",
|
||||
"Rev": "6b1566ccaa7c069f392461bef8a5fb17087f4729"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/client-go/rest/watch",
|
||||
"Comment": "v2.0.0-alpha.0-311-g6b1566c",
|
||||
"Rev": "6b1566ccaa7c069f392461bef8a5fb17087f4729"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/client-go/tools/auth",
|
||||
"Comment": "v2.0.0-alpha.0-311-g6b1566c",
|
||||
"Rev": "6b1566ccaa7c069f392461bef8a5fb17087f4729"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/client-go/tools/cache",
|
||||
"Comment": "v2.0.0-alpha.0-311-g6b1566c",
|
||||
"Rev": "6b1566ccaa7c069f392461bef8a5fb17087f4729"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/client-go/tools/clientcmd",
|
||||
"Comment": "v2.0.0-alpha.0-311-g6b1566c",
|
||||
"Rev": "6b1566ccaa7c069f392461bef8a5fb17087f4729"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/client-go/tools/clientcmd/api",
|
||||
"Comment": "v2.0.0-alpha.0-311-g6b1566c",
|
||||
"Rev": "6b1566ccaa7c069f392461bef8a5fb17087f4729"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/client-go/tools/clientcmd/api/latest",
|
||||
"Comment": "v2.0.0-alpha.0-311-g6b1566c",
|
||||
"Rev": "6b1566ccaa7c069f392461bef8a5fb17087f4729"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/client-go/tools/clientcmd/api/v1",
|
||||
"Comment": "v2.0.0-alpha.0-311-g6b1566c",
|
||||
"Rev": "6b1566ccaa7c069f392461bef8a5fb17087f4729"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/client-go/tools/metrics",
|
||||
"Comment": "v2.0.0-alpha.0-311-g6b1566c",
|
||||
"Rev": "6b1566ccaa7c069f392461bef8a5fb17087f4729"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/client-go/transport",
|
||||
"Comment": "v2.0.0-alpha.0-311-g6b1566c",
|
||||
"Rev": "6b1566ccaa7c069f392461bef8a5fb17087f4729"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/client-go/util/cert",
|
||||
"Comment": "v2.0.0-alpha.0-311-g6b1566c",
|
||||
"Rev": "6b1566ccaa7c069f392461bef8a5fb17087f4729"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/client-go/util/flowcontrol",
|
||||
"Comment": "v2.0.0-alpha.0-311-g6b1566c",
|
||||
"Rev": "6b1566ccaa7c069f392461bef8a5fb17087f4729"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/client-go/util/homedir",
|
||||
"Comment": "v2.0.0-alpha.0-311-g6b1566c",
|
||||
"Rev": "6b1566ccaa7c069f392461bef8a5fb17087f4729"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/client-go/util/integer",
|
||||
"Comment": "v2.0.0-alpha.0-311-g6b1566c",
|
||||
"Rev": "6b1566ccaa7c069f392461bef8a5fb17087f4729"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/client-go/util/workqueue",
|
||||
"Comment": "v2.0.0-alpha.0-311-g6b1566c",
|
||||
"Rev": "6b1566ccaa7c069f392461bef8a5fb17087f4729"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
2
Makefile
2
Makefile
@ -57,7 +57,7 @@ test-flags:
|
||||
# to detect if any files are listed as having format problems.
|
||||
.PHONY: fmt
|
||||
fmt: work
|
||||
files=$$(cd $(DEST) && gofmt -l . | tee >(cat - >&2)); [ -z "$$files" ]
|
||||
files=$$(cd $(DEST) && find . -not \( \( -wholename '*/vendor/*' \) -prune \) -name '*.go' | xargs gofmt -s -l | tee >(cat - >&2)); [ -z "$$files" ]
|
||||
|
||||
.PHONY: fmtfix
|
||||
fmtfix: work
|
||||
|
93
cmd/auth-controller/auth-controller.go
Normal file
93
cmd/auth-controller/auth-controller.go
Normal file
@ -0,0 +1,93 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"git.openstack.org/openstack/stackube/pkg/auth-controller/rbacmanager"
|
||||
"git.openstack.org/openstack/stackube/pkg/auth-controller/tenant"
|
||||
"git.openstack.org/openstack/stackube/pkg/openstack"
|
||||
"git.openstack.org/openstack/stackube/pkg/util"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
||||
var (
|
||||
cfg tenant.Config
|
||||
)
|
||||
|
||||
func init() {
|
||||
flag.StringVar(&cfg.KubeConfig, "kubeconfig", "", "- path to kubeconfig")
|
||||
flag.StringVar(&cfg.CloudConfig, "cloudconfig", "", "- path to cloudconfig")
|
||||
flag.Parse()
|
||||
}
|
||||
|
||||
func Main() int {
|
||||
// Verify client setting at the beginning and fail early if there are errors.
|
||||
err := verifyClientSetting()
|
||||
if err != nil {
|
||||
glog.Error(err)
|
||||
return 1
|
||||
}
|
||||
// Creates a new tenant controller
|
||||
tc, err := tenant.New(cfg)
|
||||
if err != nil {
|
||||
glog.Error(err)
|
||||
return 1
|
||||
}
|
||||
// Creates a new RBAC controller
|
||||
rm, err := rbacmanager.New(cfg)
|
||||
if err != nil {
|
||||
glog.Error(err)
|
||||
return 1
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
wg, ctx := errgroup.WithContext(ctx)
|
||||
|
||||
wg.Go(func() error { return tc.Run(ctx.Done()) })
|
||||
wg.Go(func() error { return rm.Run(ctx.Done()) })
|
||||
|
||||
term := make(chan os.Signal)
|
||||
signal.Notify(term, os.Interrupt, syscall.SIGTERM)
|
||||
|
||||
select {
|
||||
case <-term:
|
||||
glog.V(4).Info("Received SIGTERM, exiting gracefully...")
|
||||
case <-ctx.Done():
|
||||
}
|
||||
|
||||
cancel()
|
||||
if err := wg.Wait(); err != nil {
|
||||
glog.Errorf("Unhandled error received: %v", err)
|
||||
return 1
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
func verifyClientSetting() error {
|
||||
config, err := util.NewClusterConfig(cfg.KubeConfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Init cluster config failed: %v", err)
|
||||
}
|
||||
_, err = kubernetes.NewForConfig(config)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Init kubernetes clientset failed: %v", err)
|
||||
}
|
||||
_, err = openstack.NewClient(cfg.CloudConfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Init openstack client failed: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
os.Exit(Main())
|
||||
}
|
59
pkg/auth-controller/client/auth/client.go
Normal file
59
pkg/auth-controller/client/auth/client.go
Normal file
@ -0,0 +1,59 @@
|
||||
package auth
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/pkg/api"
|
||||
"k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
const (
|
||||
TPRGroup = "stackube.kubernetes.io"
|
||||
TPRVersion = "v1"
|
||||
)
|
||||
|
||||
type AuthInterface interface {
|
||||
RESTClient() rest.Interface
|
||||
TenantsGetter
|
||||
//TODO: add networkgetter
|
||||
}
|
||||
|
||||
type AuthClient struct {
|
||||
restClient rest.Interface
|
||||
dynamicClient *dynamic.Client
|
||||
}
|
||||
|
||||
func (c *AuthClient) Tenants(namespace string) TenantInterface {
|
||||
return newTenants(c.restClient, c.dynamicClient, namespace)
|
||||
}
|
||||
|
||||
func (c *AuthClient) RESTClient() rest.Interface {
|
||||
return c.restClient
|
||||
}
|
||||
|
||||
func NewForConfig(c *rest.Config) (*AuthClient, error) {
|
||||
config := *c
|
||||
setConfigDefaults(&config)
|
||||
client, err := rest.RESTClientFor(&config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dynamicClient, err := dynamic.NewClient(&config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &AuthClient{client, dynamicClient}, nil
|
||||
}
|
||||
|
||||
func setConfigDefaults(config *rest.Config) {
|
||||
config.GroupVersion = &schema.GroupVersion{
|
||||
Group: TPRGroup,
|
||||
Version: TPRVersion,
|
||||
}
|
||||
config.APIPath = "/apis"
|
||||
config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: api.Codecs}
|
||||
return
|
||||
}
|
175
pkg/auth-controller/client/auth/tenant.go
Normal file
175
pkg/auth-controller/client/auth/tenant.go
Normal file
@ -0,0 +1,175 @@
|
||||
package auth
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"git.openstack.org/openstack/stackube/pkg/apis/v1"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
const (
|
||||
TPRTenantsKind = "Tenant"
|
||||
TPRTenantName = "tenants"
|
||||
)
|
||||
|
||||
type TenantsGetter interface {
|
||||
Tenants(namespace string) TenantInterface
|
||||
}
|
||||
|
||||
type TenantInterface interface {
|
||||
Create(*v1.Tenant) (*v1.Tenant, error)
|
||||
Get(name string) (*v1.Tenant, error)
|
||||
Update(*v1.Tenant) (*v1.Tenant, error)
|
||||
Delete(name string, options *metav1.DeleteOptions) error
|
||||
List(opts metav1.ListOptions) (runtime.Object, error)
|
||||
Watch(opts metav1.ListOptions) (watch.Interface, error)
|
||||
}
|
||||
|
||||
type tenants struct {
|
||||
restClient rest.Interface
|
||||
client *dynamic.ResourceClient
|
||||
ns string
|
||||
}
|
||||
|
||||
func newTenants(r rest.Interface, c *dynamic.Client, namespace string) *tenants {
|
||||
return &tenants{
|
||||
r,
|
||||
c.Resource(
|
||||
&metav1.APIResource{
|
||||
Kind: TPRTenantsKind,
|
||||
Name: TPRTenantName,
|
||||
Namespaced: true,
|
||||
},
|
||||
namespace,
|
||||
),
|
||||
namespace,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *tenants) Create(o *v1.Tenant) (*v1.Tenant, error) {
|
||||
up, err := UnstructuredFromTenant(o)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
up, err = p.client.Create(up)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return TenantFromUnstructured(up)
|
||||
}
|
||||
|
||||
func (p *tenants) Get(name string) (*v1.Tenant, error) {
|
||||
obj, err := p.client.Get(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return TenantFromUnstructured(obj)
|
||||
}
|
||||
|
||||
func (p *tenants) Update(o *v1.Tenant) (*v1.Tenant, error) {
|
||||
up, err := UnstructuredFromTenant(o)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
up, err = p.client.Update(up)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return TenantFromUnstructured(up)
|
||||
}
|
||||
|
||||
func (p *tenants) Delete(name string, options *metav1.DeleteOptions) error {
|
||||
return p.client.Delete(name, options)
|
||||
}
|
||||
|
||||
func (p *tenants) List(opts metav1.ListOptions) (runtime.Object, error) {
|
||||
req := p.restClient.Get().
|
||||
Namespace(p.ns).
|
||||
Resource("tenants").
|
||||
// VersionedParams(&options, v1.ParameterCodec)
|
||||
FieldsSelectorParam(nil)
|
||||
|
||||
b, err := req.DoRaw()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var tena v1.TenantList
|
||||
return &tena, json.Unmarshal(b, &tena)
|
||||
}
|
||||
|
||||
func (p *tenants) Watch(opts metav1.ListOptions) (watch.Interface, error) {
|
||||
r, err := p.restClient.Get().
|
||||
Prefix("watch").
|
||||
Namespace(p.ns).
|
||||
Resource("tenants").
|
||||
// VersionedParams(&options, v1.ParameterCodec).
|
||||
FieldsSelectorParam(nil).
|
||||
Stream()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return watch.NewStreamWatcher(&tenantDecoder{
|
||||
dec: json.NewDecoder(r),
|
||||
close: r.Close,
|
||||
}), nil
|
||||
}
|
||||
|
||||
// TenantFromUnstructured unmarshals a Tenant object from dynamic client's unstructured
|
||||
func TenantFromUnstructured(r *unstructured.Unstructured) (*v1.Tenant, error) {
|
||||
b, err := json.Marshal(r.Object)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var p v1.Tenant
|
||||
if err := json.Unmarshal(b, &p); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p.TypeMeta.Kind = TPRTenantsKind
|
||||
p.TypeMeta.APIVersion = TPRGroup + "/" + TPRVersion
|
||||
return &p, nil
|
||||
}
|
||||
|
||||
// UnstructuredFromTenant marshals a Tenant object into dynamic client's unstructured
|
||||
func UnstructuredFromTenant(p *v1.Tenant) (*unstructured.Unstructured, error) {
|
||||
p.TypeMeta.Kind = TPRTenantsKind
|
||||
p.TypeMeta.APIVersion = TPRGroup + "/" + TPRVersion
|
||||
b, err := json.Marshal(p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var r unstructured.Unstructured
|
||||
if err := json.Unmarshal(b, &r.Object); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &r, nil
|
||||
}
|
||||
|
||||
type tenantDecoder struct {
|
||||
dec *json.Decoder
|
||||
close func() error
|
||||
}
|
||||
|
||||
func (d *tenantDecoder) Close() {
|
||||
d.close()
|
||||
}
|
||||
|
||||
func (d *tenantDecoder) Decode() (action watch.EventType, object runtime.Object, err error) {
|
||||
var e struct {
|
||||
Type watch.EventType
|
||||
Object v1.Tenant
|
||||
}
|
||||
if err := d.dec.Decode(&e); err != nil {
|
||||
return watch.Error, nil, err
|
||||
}
|
||||
return e.Type, &e.Object, nil
|
||||
}
|
224
pkg/auth-controller/rbacmanager/controller.go
Normal file
224
pkg/auth-controller/rbacmanager/controller.go
Normal file
@ -0,0 +1,224 @@
|
||||
package rbacmanager
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"git.openstack.org/openstack/stackube/pkg/auth-controller/rbacmanager/rbac"
|
||||
"git.openstack.org/openstack/stackube/pkg/auth-controller/tenant"
|
||||
"git.openstack.org/openstack/stackube/pkg/util"
|
||||
|
||||
"github.com/golang/glog"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/pkg/api"
|
||||
"k8s.io/client-go/pkg/api/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
)
|
||||
|
||||
const (
|
||||
resyncPeriod = 5 * time.Minute
|
||||
)
|
||||
|
||||
type Controller struct {
|
||||
kclient *kubernetes.Clientset
|
||||
nsInf cache.SharedIndexInformer
|
||||
queue workqueue.RateLimitingInterface
|
||||
}
|
||||
|
||||
// New creates a new RBAC controller.
|
||||
func New(conf tenant.Config) (*Controller, error) {
|
||||
cfg, err := util.NewClusterConfig(conf.KubeConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("init cluster config failed: %v", err)
|
||||
}
|
||||
client, err := kubernetes.NewForConfig(cfg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("init kubernetes client failed: %v", err)
|
||||
}
|
||||
|
||||
o := &Controller{
|
||||
kclient: client,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "rbacmanager"),
|
||||
}
|
||||
|
||||
o.nsInf = cache.NewSharedIndexInformer(
|
||||
cache.NewListWatchFromClient(o.kclient.Core().RESTClient(), "namespaces", api.NamespaceAll, nil),
|
||||
&v1.Namespace{}, resyncPeriod, cache.Indexers{},
|
||||
)
|
||||
|
||||
o.nsInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: o.handleNamespaceAdd,
|
||||
DeleteFunc: o.handleNamespaceDelete,
|
||||
UpdateFunc: o.handleNamespaceUpdate,
|
||||
})
|
||||
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// Run the controller.
|
||||
func (c *Controller) Run(stopc <-chan struct{}) error {
|
||||
defer c.queue.ShutDown()
|
||||
|
||||
errChan := make(chan error)
|
||||
go func() {
|
||||
v, err := c.kclient.Discovery().ServerVersion()
|
||||
if err != nil {
|
||||
errChan <- fmt.Errorf("communicating with server failed: %v", err)
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("Established connection established, cluster-version: %s", v)
|
||||
errChan <- nil
|
||||
}()
|
||||
|
||||
select {
|
||||
case err := <-errChan:
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
glog.V(4).Info("TPR API endpoints ready")
|
||||
case <-stopc:
|
||||
return nil
|
||||
}
|
||||
|
||||
go c.worker()
|
||||
|
||||
go c.nsInf.Run(stopc)
|
||||
|
||||
<-stopc
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Controller) keyFunc(obj interface{}) (string, bool) {
|
||||
k, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
|
||||
if err != nil {
|
||||
glog.Errorf("Creating key failed: %v", err)
|
||||
return k, false
|
||||
}
|
||||
return k, true
|
||||
}
|
||||
|
||||
// enqueue adds a key to the queue. If obj is a key already it gets added directly.
|
||||
// Otherwise, the key is extracted via keyFunc.
|
||||
func (c *Controller) enqueue(obj interface{}) {
|
||||
if obj == nil {
|
||||
return
|
||||
}
|
||||
|
||||
key, ok := obj.(string)
|
||||
if !ok {
|
||||
key, ok = c.keyFunc(obj)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
c.queue.Add(key)
|
||||
}
|
||||
|
||||
// worker runs a worker thread that just dequeues items, processes them, and marks them done.
|
||||
// It enforces that the syncHandler is never invoked concurrently with the same key.
|
||||
func (c *Controller) worker() {
|
||||
for c.processNextWorkItem() {
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Controller) processNextWorkItem() bool {
|
||||
key, quit := c.queue.Get()
|
||||
if quit {
|
||||
return false
|
||||
}
|
||||
defer c.queue.Done(key)
|
||||
|
||||
err := c.sync(key.(string))
|
||||
if err == nil {
|
||||
c.queue.Forget(key)
|
||||
return true
|
||||
}
|
||||
|
||||
utilruntime.HandleError(fmt.Errorf("Sync %q failed: %v", key, err))
|
||||
c.queue.AddRateLimited(key)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (c *Controller) handleNamespaceAdd(obj interface{}) {
|
||||
key, ok := c.keyFunc(obj)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("Added namespace %s", key)
|
||||
c.enqueue(key)
|
||||
}
|
||||
|
||||
func (c *Controller) handleNamespaceDelete(obj interface{}) {
|
||||
key, ok := c.keyFunc(obj)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("Deleted namespace %s", key)
|
||||
c.enqueue(key)
|
||||
}
|
||||
|
||||
func (c *Controller) handleNamespaceUpdate(old, cur interface{}) {
|
||||
oldns := old.(*v1.Namespace)
|
||||
curns := cur.(*v1.Namespace)
|
||||
if oldns.ResourceVersion == curns.ResourceVersion {
|
||||
return
|
||||
}
|
||||
key, ok := c.keyFunc(cur)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("Updated namespace %s", key)
|
||||
c.enqueue(key)
|
||||
}
|
||||
|
||||
func (c *Controller) sync(key string) error {
|
||||
obj, exists, err := c.nsInf.GetIndexer().GetByKey(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !exists {
|
||||
return nil
|
||||
}
|
||||
|
||||
ns := obj.(*v1.Namespace)
|
||||
glog.V(4).Infof("Sync RBAC %s", key)
|
||||
err = c.syncRbac(ns)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Controller) syncRbac(ns *v1.Namespace) error {
|
||||
if ns.DeletionTimestamp != nil {
|
||||
return nil
|
||||
}
|
||||
tenant, ok := ns.Labels["tenant"]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
rbacClient := c.kclient.Rbac()
|
||||
// Create role for tenant
|
||||
role := rbac.GenerateRoleByNamespace(ns.Name)
|
||||
_, err := rbacClient.Roles(ns.Name).Create(role)
|
||||
if err != nil && !apierrors.IsAlreadyExists(err) {
|
||||
glog.Errorf("Failed create default-role in namespace %s for tenant %s: %v", ns.Name, tenant, err)
|
||||
return err
|
||||
}
|
||||
glog.V(4).Infof("Created default-role in namespace %s for tenant %s", ns.Name, tenant)
|
||||
// Create rolebinding for tenant
|
||||
roleBinding := rbac.GenerateRoleBinding(ns.Name, tenant)
|
||||
_, err = rbacClient.RoleBindings(ns.Name).Create(roleBinding)
|
||||
if err != nil && !apierrors.IsAlreadyExists(err) {
|
||||
glog.Errorf("Failed create %s-rolebindings in namespace %s for tenant %s: %v", tenant, ns.Name, tenant, err)
|
||||
return err
|
||||
}
|
||||
glog.V(4).Infof("Created %s-rolebindings in namespace %s for tenant %s", tenant, ns.Name, tenant)
|
||||
return nil
|
||||
}
|
96
pkg/auth-controller/rbacmanager/rbac/rbac.go
Normal file
96
pkg/auth-controller/rbacmanager/rbac/rbac.go
Normal file
@ -0,0 +1,96 @@
|
||||
package rbac
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/pkg/apis/rbac/v1beta1"
|
||||
)
|
||||
|
||||
func GenerateRoleByNamespace(namespace string) *v1beta1.Role {
|
||||
policyRule := v1beta1.PolicyRule{
|
||||
Verbs: []string{v1beta1.VerbAll},
|
||||
APIGroups: []string{v1beta1.APIGroupAll},
|
||||
Resources: []string{v1beta1.ResourceAll},
|
||||
}
|
||||
role := &v1beta1.Role{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Role",
|
||||
APIVersion: "rbac.authorization.k8s.io/v1beta1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "default-role",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Rules: []v1beta1.PolicyRule{policyRule},
|
||||
}
|
||||
return role
|
||||
}
|
||||
|
||||
func GenerateRoleBinding(namespace, tenant string) *v1beta1.RoleBinding {
|
||||
subject := v1beta1.Subject{
|
||||
Kind: "Group",
|
||||
Name: tenant,
|
||||
}
|
||||
roleRef := v1beta1.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: "Role",
|
||||
Name: "default-role",
|
||||
}
|
||||
roleBinding := &v1beta1.RoleBinding{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "RoleBinding",
|
||||
APIVersion: "rbac.authorization.k8s.io/v1beta1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: tenant + "-rolebinding",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Subjects: []v1beta1.Subject{subject},
|
||||
RoleRef: roleRef,
|
||||
}
|
||||
return roleBinding
|
||||
}
|
||||
|
||||
func GenerateClusterRole() *v1beta1.ClusterRole {
|
||||
policyRule := v1beta1.PolicyRule{
|
||||
Verbs: []string{v1beta1.VerbAll},
|
||||
APIGroups: []string{v1beta1.APIGroupAll},
|
||||
Resources: []string{"namespaces"},
|
||||
}
|
||||
|
||||
clusterRole := &v1beta1.ClusterRole{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "ClusterRole",
|
||||
APIVersion: "rbac.authorization.k8s.io/v1beta1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "namespace-creater",
|
||||
},
|
||||
Rules: []v1beta1.PolicyRule{policyRule},
|
||||
}
|
||||
return clusterRole
|
||||
}
|
||||
|
||||
func GenerateClusterRoleBindingByTenant(tenant string) *v1beta1.ClusterRoleBinding {
|
||||
subject := v1beta1.Subject{
|
||||
Kind: "Group",
|
||||
Name: tenant,
|
||||
}
|
||||
roleRef := v1beta1.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: "ClusterRole",
|
||||
Name: "namespace-creater",
|
||||
}
|
||||
|
||||
clusterRoleBinding := &v1beta1.ClusterRoleBinding{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "ClusterRoleBinding",
|
||||
APIVersion: "rbac.authorization.k8s.io/v1beta1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: tenant + "-namespace-creater",
|
||||
},
|
||||
Subjects: []v1beta1.Subject{subject},
|
||||
RoleRef: roleRef,
|
||||
}
|
||||
return clusterRoleBinding
|
||||
}
|
344
pkg/auth-controller/tenant/controller.go
Normal file
344
pkg/auth-controller/tenant/controller.go
Normal file
@ -0,0 +1,344 @@
|
||||
package tenant
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"git.openstack.org/openstack/stackube/pkg/apis/v1"
|
||||
"git.openstack.org/openstack/stackube/pkg/auth-controller/client/auth"
|
||||
"git.openstack.org/openstack/stackube/pkg/auth-controller/rbacmanager/rbac"
|
||||
"git.openstack.org/openstack/stackube/pkg/openstack"
|
||||
"git.openstack.org/openstack/stackube/pkg/util"
|
||||
|
||||
"github.com/golang/glog"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
apimetav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/pkg/api"
|
||||
apiv1 "k8s.io/client-go/pkg/api/v1"
|
||||
extensionsobj "k8s.io/client-go/pkg/apis/extensions/v1beta1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
)
|
||||
|
||||
const (
|
||||
tprTenant = "tenant." + auth.TPRGroup
|
||||
|
||||
resyncPeriod = 5 * time.Minute
|
||||
)
|
||||
|
||||
// TenantController manages lify cycle of Tenant.
|
||||
type TenantController struct {
|
||||
kclient *kubernetes.Clientset
|
||||
tclient *auth.AuthClient
|
||||
osclient *openstack.Client
|
||||
tenInf cache.SharedIndexInformer
|
||||
queue workqueue.RateLimitingInterface
|
||||
config Config
|
||||
}
|
||||
|
||||
// Config defines configuration parameters for the TenantController.
|
||||
type Config struct {
|
||||
KubeConfig string
|
||||
CloudConfig string
|
||||
}
|
||||
|
||||
// New creates a new tenant controller.
|
||||
func New(conf Config) (*TenantController, error) {
|
||||
cfg, err := util.NewClusterConfig(conf.KubeConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("init cluster config failed: %v", err)
|
||||
}
|
||||
client, err := kubernetes.NewForConfig(cfg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("init kubernetes client failed: %v", err)
|
||||
}
|
||||
tclient, err := auth.NewForConfig(cfg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("init restclient for tenant failed: %v", err)
|
||||
}
|
||||
|
||||
openStackClient, err := openstack.NewClient(conf.CloudConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("init openstack client failed: %v", err)
|
||||
}
|
||||
|
||||
c := &TenantController{
|
||||
kclient: client,
|
||||
tclient: tclient,
|
||||
osclient: openStackClient,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "tenant"),
|
||||
config: conf,
|
||||
}
|
||||
|
||||
c.tenInf = cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: tclient.Tenants(api.NamespaceAll).List,
|
||||
WatchFunc: tclient.Tenants(api.NamespaceAll).Watch,
|
||||
},
|
||||
&v1.Tenant{}, resyncPeriod, cache.Indexers{},
|
||||
)
|
||||
c.tenInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: c.handleAddTenant,
|
||||
DeleteFunc: c.handleDeleteTenant,
|
||||
UpdateFunc: c.handleUpdateTenant,
|
||||
})
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Run the controller.
|
||||
func (c *TenantController) Run(stopc <-chan struct{}) error {
|
||||
defer c.queue.ShutDown()
|
||||
|
||||
errChan := make(chan error)
|
||||
go func() {
|
||||
v, err := c.kclient.Discovery().ServerVersion()
|
||||
if err != nil {
|
||||
errChan <- fmt.Errorf("communicating with server failed: %v", err)
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("Established connection established, cluster-version: %s", v)
|
||||
// Create TPRs
|
||||
if err := c.createTPRs(); err != nil {
|
||||
errChan <- fmt.Errorf("creating TPRs failed: %v", err)
|
||||
return
|
||||
}
|
||||
// Create clusterRole
|
||||
if err = c.createClusterRoles(); err != nil {
|
||||
errChan <- fmt.Errorf("creating clusterrole failed: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
errChan <- nil
|
||||
}()
|
||||
|
||||
select {
|
||||
case err := <-errChan:
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
glog.V(4).Info("TPR API endpoints ready")
|
||||
case <-stopc:
|
||||
return nil
|
||||
}
|
||||
|
||||
go c.worker()
|
||||
|
||||
go c.tenInf.Run(stopc)
|
||||
|
||||
<-stopc
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *TenantController) keyFunc(obj interface{}) (string, bool) {
|
||||
k, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Failed create key: %v", err)
|
||||
return k, false
|
||||
}
|
||||
return k, true
|
||||
}
|
||||
|
||||
func (c *TenantController) handleAddTenant(obj interface{}) {
|
||||
key, ok := c.keyFunc(obj)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("Added tenant %s", key)
|
||||
c.enqueue(key)
|
||||
}
|
||||
|
||||
func (c *TenantController) handleDeleteTenant(obj interface{}) {
|
||||
key, ok := c.keyFunc(obj)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("Deleted tenant %s", key)
|
||||
c.enqueue(key)
|
||||
}
|
||||
|
||||
func (c *TenantController) handleUpdateTenant(old, cur interface{}) {
|
||||
key, ok := c.keyFunc(cur)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("Updated tenant %s", key)
|
||||
c.enqueue(key)
|
||||
}
|
||||
|
||||
// enqueue adds a key to the queue. If obj is a key already it gets added directly.
|
||||
// Otherwise, the key is extracted via keyFunc.
|
||||
func (c *TenantController) enqueue(obj interface{}) {
|
||||
if obj == nil {
|
||||
return
|
||||
}
|
||||
key, ok := obj.(string)
|
||||
if !ok {
|
||||
key, ok = c.keyFunc(obj)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
}
|
||||
c.queue.Add(key)
|
||||
}
|
||||
|
||||
// worker runs a worker thread that just dequeues items, processes them, and marks them done.
|
||||
// It enforces that the syncHandler is never invoked concurrently with the same key.
|
||||
func (c *TenantController) worker() {
|
||||
for c.processNextWorkItem() {
|
||||
}
|
||||
}
|
||||
|
||||
func (c *TenantController) processNextWorkItem() bool {
|
||||
key, quit := c.queue.Get()
|
||||
if quit {
|
||||
return false
|
||||
}
|
||||
defer c.queue.Done(key)
|
||||
|
||||
err := c.sync(key.(string))
|
||||
if err == nil {
|
||||
c.queue.Forget(key)
|
||||
return true
|
||||
}
|
||||
utilruntime.HandleError(fmt.Errorf("Sync %q failed: %v", key, err))
|
||||
c.queue.AddRateLimited(key)
|
||||
return true
|
||||
}
|
||||
|
||||
func (c *TenantController) sync(key string) error {
|
||||
obj, exists, err := c.tenInf.GetIndexer().GetByKey(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !exists {
|
||||
// Delete tenant related resources in k8s
|
||||
tenant := strings.Split(key, "/")
|
||||
deleteOptions := &apimetav1.DeleteOptions{
|
||||
TypeMeta: apimetav1.TypeMeta{
|
||||
Kind: "ClusterRoleBinding",
|
||||
APIVersion: "rbac.authorization.k8s.io/v1beta1",
|
||||
},
|
||||
}
|
||||
err = c.kclient.Rbac().ClusterRoleBindings().Delete(tenant[1]+"-namespace-creater", deleteOptions)
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
glog.Errorf("Failed delete ClusterRoleBinding for tenant %s: %v", tenant[1], err)
|
||||
return err
|
||||
}
|
||||
glog.V(4).Infof("Deleted ClusterRoleBinding %s", tenant[1])
|
||||
// Delete all users on a tenant
|
||||
err = c.osclient.DeleteAllUsersOnTenant(tenant[1])
|
||||
if err != nil {
|
||||
glog.Errorf("Failed delete all users in the tenant %s: %v", tenant[1], err)
|
||||
return err
|
||||
}
|
||||
// Delete tenant in keystone
|
||||
err = c.osclient.DeleteTenant(tenant[1])
|
||||
if err != nil {
|
||||
glog.Errorf("Failed delete tenant %s: %v", tenant[1], err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
t := obj.(*v1.Tenant)
|
||||
glog.V(4).Infof("Sync tenant %s", key)
|
||||
err = c.syncTenant(t)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *TenantController) createTPRs() error {
|
||||
tprs := []*extensionsobj.ThirdPartyResource{
|
||||
{
|
||||
ObjectMeta: apimetav1.ObjectMeta{
|
||||
Name: tprTenant,
|
||||
},
|
||||
Versions: []extensionsobj.APIVersion{
|
||||
{Name: auth.TPRVersion},
|
||||
},
|
||||
Description: "Tpr for tenant",
|
||||
},
|
||||
}
|
||||
tprClient := c.kclient.Extensions().ThirdPartyResources()
|
||||
|
||||
for _, tpr := range tprs {
|
||||
if _, err := tprClient.Create(tpr); err != nil && !apierrors.IsAlreadyExists(err) {
|
||||
return err
|
||||
}
|
||||
glog.V(4).Infof("Created TPR %s", tpr.Name)
|
||||
}
|
||||
|
||||
// We have to wait for the TPRs to be ready. Otherwise the initial watch may fail.
|
||||
err := util.WaitForTPRReady(c.kclient.CoreV1().RESTClient(), auth.TPRGroup, auth.TPRVersion, auth.TPRTenantName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *TenantController) syncTenant(tenant *v1.Tenant) error {
|
||||
roleBinding := rbac.GenerateClusterRoleBindingByTenant(tenant.Name)
|
||||
_, err := c.kclient.Rbac().ClusterRoleBindings().Create(roleBinding)
|
||||
if err != nil && !apierrors.IsAlreadyExists(err) {
|
||||
glog.Errorf("Failed create ClusterRoleBinding for tenant %s: %v", tenant.Name, err)
|
||||
return err
|
||||
}
|
||||
glog.V(4).Infof("Created ClusterRoleBindings %s-namespace-creater for tenant %s", tenant.Name, tenant.Name)
|
||||
if tenant.Spec.TenantID != "" {
|
||||
// Create user with the spec username and password in the given tenant
|
||||
err = c.osclient.CreateUser(tenant.Spec.UserName, tenant.Spec.Password, tenant.Spec.TenantID)
|
||||
if err != nil && !openstack.IsAlreadyExists(err) {
|
||||
glog.Errorf("Failed create user %s: %v", tenant.Spec.UserName, err)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// Create tenant if the tenant not exist in keystone
|
||||
tenantID, err := c.osclient.CreateTenant(tenant.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Create user with the spec username and password in the created tenant
|
||||
err = c.osclient.CreateUser(tenant.Spec.UserName, tenant.Spec.Password, tenantID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Create namespace which name is the same as the tenant's name
|
||||
err = c.createNamespce(tenant.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
glog.V(4).Infof("Created namespace %s for tenant %s", tenant.Name, tenant.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *TenantController) createClusterRoles() error {
|
||||
nsCreater := rbac.GenerateClusterRole()
|
||||
_, err := c.kclient.Rbac().ClusterRoles().Create(nsCreater)
|
||||
if err != nil && !apierrors.IsAlreadyExists(err) {
|
||||
glog.Errorf("Failed create ClusterRoles namespace-creater: %v", err)
|
||||
return err
|
||||
}
|
||||
glog.V(4).Info("Created ClusterRoles namespace-creater")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *TenantController) createNamespce(namespace string) error {
|
||||
_, err := c.kclient.CoreV1().Namespaces().Create(&apiv1.Namespace{
|
||||
ObjectMeta: apimetav1.ObjectMeta{
|
||||
Name: namespace,
|
||||
},
|
||||
})
|
||||
if err != nil && !apierrors.IsAlreadyExists(err) {
|
||||
glog.Errorf("Failed create namespace %s: %v", namespace, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
192
pkg/openstack/client.go
Normal file
192
pkg/openstack/client.go
Normal file
@ -0,0 +1,192 @@
|
||||
package openstack
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/gophercloud/gophercloud"
|
||||
"github.com/gophercloud/gophercloud/openstack"
|
||||
"github.com/gophercloud/gophercloud/openstack/identity/v2/tenants"
|
||||
"github.com/gophercloud/gophercloud/openstack/identity/v2/users"
|
||||
"github.com/gophercloud/gophercloud/pagination"
|
||||
gcfg "gopkg.in/gcfg.v1"
|
||||
)
|
||||
|
||||
const (
|
||||
StatusCodeAlreadyExists int = 409
|
||||
)
|
||||
|
||||
type Client struct {
|
||||
Identity *gophercloud.ServiceClient
|
||||
Provider *gophercloud.ProviderClient
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
Global struct {
|
||||
AuthUrl string `gcfg:"auth-url"`
|
||||
Username string `gcfg:"username"`
|
||||
Password string `gcfg: "password"`
|
||||
TenantName string `gcfg:"tenant-name"`
|
||||
}
|
||||
}
|
||||
|
||||
func toAuthOptions(cfg Config) gophercloud.AuthOptions {
|
||||
return gophercloud.AuthOptions{
|
||||
IdentityEndpoint: cfg.Global.AuthUrl,
|
||||
Username: cfg.Global.Username,
|
||||
Password: cfg.Global.Password,
|
||||
TenantName: cfg.Global.TenantName,
|
||||
}
|
||||
}
|
||||
|
||||
func NewClient(config string) (*Client, error) {
|
||||
var opts gophercloud.AuthOptions
|
||||
if cfg, err := readConfig(config); err != nil {
|
||||
glog.V(0).Info("Failed read cloudconfig: %v. Starting init openstackclient form env", err)
|
||||
opts, err = openstack.AuthOptionsFromEnv()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
opts = toAuthOptions(cfg)
|
||||
}
|
||||
|
||||
provider, err := openstack.AuthenticatedClient(opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
identity, err := openstack.NewIdentityV2(provider, gophercloud.EndpointOpts{
|
||||
Availability: gophercloud.AvailabilityAdmin,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
client := &Client{
|
||||
Identity: identity,
|
||||
Provider: provider,
|
||||
}
|
||||
return client, nil
|
||||
}
|
||||
|
||||
func readConfig(config string) (Config, error) {
|
||||
conf, err := os.Open(config)
|
||||
if err != nil {
|
||||
return Config{}, err
|
||||
}
|
||||
var cfg Config
|
||||
err = gcfg.ReadInto(&cfg, conf)
|
||||
if err != nil {
|
||||
return Config{}, err
|
||||
}
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
func (c *Client) getTenantID(tenantName string) (string, error) {
|
||||
var tenantID string
|
||||
err := tenants.List(c.Identity, nil).EachPage(func(page pagination.Page) (bool, error) {
|
||||
tenantList, err1 := tenants.ExtractTenants(page)
|
||||
if err1 != nil {
|
||||
return false, err1
|
||||
}
|
||||
for _, t := range tenantList {
|
||||
if t.Name == tenantName {
|
||||
tenantID = t.ID
|
||||
break
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return tenantID, nil
|
||||
}
|
||||
|
||||
func (c *Client) CreateTenant(tenantName string) (string, error) {
|
||||
createOpts := tenants.CreateOpts{
|
||||
Name: tenantName,
|
||||
Description: "stackube",
|
||||
Enabled: gophercloud.Enabled,
|
||||
}
|
||||
|
||||
_, err := tenants.Create(c.Identity, createOpts).Extract()
|
||||
if err != nil && !IsAlreadyExists(err) {
|
||||
glog.Errorf("Failed to create tenant %s: %v", tenantName, err)
|
||||
return "", err
|
||||
}
|
||||
glog.V(4).Infof("Tenant %s created", tenantName)
|
||||
tenantID, err := c.getTenantID(tenantName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return tenantID, nil
|
||||
}
|
||||
|
||||
func (c *Client) DeleteTenant(tenantName string) error {
|
||||
return tenants.List(c.Identity, nil).EachPage(func(page pagination.Page) (bool, error) {
|
||||
tenantList, err1 := tenants.ExtractTenants(page)
|
||||
if err1 != nil {
|
||||
return false, err1
|
||||
}
|
||||
for _, t := range tenantList {
|
||||
if t.Name == tenantName {
|
||||
re := tenants.Delete(c.Identity, t.ID)
|
||||
glog.V(4).Infof("Tenant %s deleted: %v", tenantName, re)
|
||||
break
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
}
|
||||
|
||||
func (c *Client) CreateUser(username, password, tenantID string) error {
|
||||
opts := users.CreateOpts{
|
||||
Name: username,
|
||||
TenantID: tenantID,
|
||||
Enabled: gophercloud.Enabled,
|
||||
Password: password,
|
||||
}
|
||||
_, err := users.Create(c.Identity, opts).Extract()
|
||||
if err != nil && !IsAlreadyExists(err) {
|
||||
glog.Errorf("Failed to create user %s: %v", username, err)
|
||||
return err
|
||||
}
|
||||
glog.V(4).Infof("User %s created", username)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) DeleteAllUsersOnTenant(tenantName string) error {
|
||||
tenantID, err := c.getTenantID(tenantName)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
// TODO the users.List method returned users have empty TenantID option
|
||||
return users.List(c.Identity).EachPage(func(page pagination.Page) (bool, error) {
|
||||
usersList, err := users.ExtractUsers(page)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, u := range usersList {
|
||||
if u.TenantID == tenantID {
|
||||
res := users.Delete(c.Identity, u.ID)
|
||||
glog.V(4).Infof("User %s deleted: %v", u.Name, res)
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
}
|
||||
|
||||
// IsAlreadyExists determines if the err is an error which indicates that a specified resource already exists.
|
||||
func IsAlreadyExists(err error) bool {
|
||||
return reasonForError(err) == StatusCodeAlreadyExists
|
||||
}
|
||||
|
||||
func reasonForError(err error) int {
|
||||
switch t := err.(type) {
|
||||
case gophercloud.ErrUnexpectedResponseCode:
|
||||
return t.Actual
|
||||
}
|
||||
return 0
|
||||
}
|
49
pkg/util/k8sutil.go
Normal file
49
pkg/util/k8sutil.go
Normal file
@ -0,0 +1,49 @@
|
||||
package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
)
|
||||
|
||||
// WaitForTPRReady waits for a third party resource to be available
|
||||
// for use.
|
||||
func WaitForTPRReady(restClient rest.Interface, tprGroup, tprVersion, tprName string) error {
|
||||
return wait.Poll(3*time.Second, 30*time.Second, func() (bool, error) {
|
||||
res := restClient.Get().AbsPath("apis", tprGroup, tprVersion, tprName).Do()
|
||||
err := res.Error()
|
||||
if err != nil {
|
||||
// RESTClient returns *apierrors.StatusError for any status codes < 200 or > 206
|
||||
// and http.Client.Do errors are returned directly.
|
||||
if se, ok := err.(*apierrors.StatusError); ok {
|
||||
if se.Status().Code == http.StatusNotFound {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
var statusCode int
|
||||
res.StatusCode(&statusCode)
|
||||
if statusCode != http.StatusOK {
|
||||
return false, fmt.Errorf("invalid status code: %d", statusCode)
|
||||
}
|
||||
|
||||
return true, nil
|
||||
})
|
||||
}
|
||||
|
||||
func NewClusterConfig(kubeConfig string) (*rest.Config, error) {
|
||||
cfg, err := clientcmd.BuildConfigFromFlags("", kubeConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cfg.QPS = 100
|
||||
cfg.Burst = 100
|
||||
return cfg, nil
|
||||
}
|
15
vendor/github.com/davecgh/go-spew/LICENSE
generated
vendored
Normal file
15
vendor/github.com/davecgh/go-spew/LICENSE
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
ISC License
|
||||
|
||||
Copyright (c) 2012-2016 Dave Collins <dave@davec.name>
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
152
vendor/github.com/davecgh/go-spew/spew/bypass.go
generated
vendored
Normal file
152
vendor/github.com/davecgh/go-spew/spew/bypass.go
generated
vendored
Normal file
@ -0,0 +1,152 @@
|
||||
// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
|
||||
//
|
||||
// Permission to use, copy, modify, and distribute this software for any
|
||||
// purpose with or without fee is hereby granted, provided that the above
|
||||
// copyright notice and this permission notice appear in all copies.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
// NOTE: Due to the following build constraints, this file will only be compiled
|
||||
// when the code is not running on Google App Engine, compiled by GopherJS, and
|
||||
// "-tags safe" is not added to the go build command line. The "disableunsafe"
|
||||
// tag is deprecated and thus should not be used.
|
||||
// +build !js,!appengine,!safe,!disableunsafe
|
||||
|
||||
package spew
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
// UnsafeDisabled is a build-time constant which specifies whether or
|
||||
// not access to the unsafe package is available.
|
||||
UnsafeDisabled = false
|
||||
|
||||
// ptrSize is the size of a pointer on the current arch.
|
||||
ptrSize = unsafe.Sizeof((*byte)(nil))
|
||||
)
|
||||
|
||||
var (
|
||||
// offsetPtr, offsetScalar, and offsetFlag are the offsets for the
|
||||
// internal reflect.Value fields. These values are valid before golang
|
||||
// commit ecccf07e7f9d which changed the format. The are also valid
|
||||
// after commit 82f48826c6c7 which changed the format again to mirror
|
||||
// the original format. Code in the init function updates these offsets
|
||||
// as necessary.
|
||||
offsetPtr = uintptr(ptrSize)
|
||||
offsetScalar = uintptr(0)
|
||||
offsetFlag = uintptr(ptrSize * 2)
|
||||
|
||||
// flagKindWidth and flagKindShift indicate various bits that the
|
||||
// reflect package uses internally to track kind information.
|
||||
//
|
||||
// flagRO indicates whether or not the value field of a reflect.Value is
|
||||
// read-only.
|
||||
//
|
||||
// flagIndir indicates whether the value field of a reflect.Value is
|
||||
// the actual data or a pointer to the data.
|
||||
//
|
||||
// These values are valid before golang commit 90a7c3c86944 which
|
||||
// changed their positions. Code in the init function updates these
|
||||
// flags as necessary.
|
||||
flagKindWidth = uintptr(5)
|
||||
flagKindShift = uintptr(flagKindWidth - 1)
|
||||
flagRO = uintptr(1 << 0)
|
||||
flagIndir = uintptr(1 << 1)
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Older versions of reflect.Value stored small integers directly in the
|
||||
// ptr field (which is named val in the older versions). Versions
|
||||
// between commits ecccf07e7f9d and 82f48826c6c7 added a new field named
|
||||
// scalar for this purpose which unfortunately came before the flag
|
||||
// field, so the offset of the flag field is different for those
|
||||
// versions.
|
||||
//
|
||||
// This code constructs a new reflect.Value from a known small integer
|
||||
// and checks if the size of the reflect.Value struct indicates it has
|
||||
// the scalar field. When it does, the offsets are updated accordingly.
|
||||
vv := reflect.ValueOf(0xf00)
|
||||
if unsafe.Sizeof(vv) == (ptrSize * 4) {
|
||||
offsetScalar = ptrSize * 2
|
||||
offsetFlag = ptrSize * 3
|
||||
}
|
||||
|
||||
// Commit 90a7c3c86944 changed the flag positions such that the low
|
||||
// order bits are the kind. This code extracts the kind from the flags
|
||||
// field and ensures it's the correct type. When it's not, the flag
|
||||
// order has been changed to the newer format, so the flags are updated
|
||||
// accordingly.
|
||||
upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag)
|
||||
upfv := *(*uintptr)(upf)
|
||||
flagKindMask := uintptr((1<<flagKindWidth - 1) << flagKindShift)
|
||||
if (upfv&flagKindMask)>>flagKindShift != uintptr(reflect.Int) {
|
||||
flagKindShift = 0
|
||||
flagRO = 1 << 5
|
||||
flagIndir = 1 << 6
|
||||
|
||||
// Commit adf9b30e5594 modified the flags to separate the
|
||||
// flagRO flag into two bits which specifies whether or not the
|
||||
// field is embedded. This causes flagIndir to move over a bit
|
||||
// and means that flagRO is the combination of either of the
|
||||
// original flagRO bit and the new bit.
|
||||
//
|
||||
// This code detects the change by extracting what used to be
|
||||
// the indirect bit to ensure it's set. When it's not, the flag
|
||||
// order has been changed to the newer format, so the flags are
|
||||
// updated accordingly.
|
||||
if upfv&flagIndir == 0 {
|
||||
flagRO = 3 << 5
|
||||
flagIndir = 1 << 7
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
|
||||
// the typical safety restrictions preventing access to unaddressable and
|
||||
// unexported data. It works by digging the raw pointer to the underlying
|
||||
// value out of the protected value and generating a new unprotected (unsafe)
|
||||
// reflect.Value to it.
|
||||
//
|
||||
// This allows us to check for implementations of the Stringer and error
|
||||
// interfaces to be used for pretty printing ordinarily unaddressable and
|
||||
// inaccessible values such as unexported struct fields.
|
||||
func unsafeReflectValue(v reflect.Value) (rv reflect.Value) {
|
||||
indirects := 1
|
||||
vt := v.Type()
|
||||
upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr)
|
||||
rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag))
|
||||
if rvf&flagIndir != 0 {
|
||||
vt = reflect.PtrTo(v.Type())
|
||||
indirects++
|
||||
} else if offsetScalar != 0 {
|
||||
// The value is in the scalar field when it's not one of the
|
||||
// reference types.
|
||||
switch vt.Kind() {
|
||||
case reflect.Uintptr:
|
||||
case reflect.Chan:
|
||||
case reflect.Func:
|
||||
case reflect.Map:
|
||||
case reflect.Ptr:
|
||||
case reflect.UnsafePointer:
|
||||
default:
|
||||
upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) +
|
||||
offsetScalar)
|
||||
}
|
||||
}
|
||||
|
||||
pv := reflect.NewAt(vt, upv)
|
||||
rv = pv
|
||||
for i := 0; i < indirects; i++ {
|
||||
rv = rv.Elem()
|
||||
}
|
||||
return rv
|
||||
}
|
38
vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
generated
vendored
Normal file
38
vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
generated
vendored
Normal file
@ -0,0 +1,38 @@
|
||||
// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
|
||||
//
|
||||
// Permission to use, copy, modify, and distribute this software for any
|
||||
// purpose with or without fee is hereby granted, provided that the above
|
||||
// copyright notice and this permission notice appear in all copies.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
// NOTE: Due to the following build constraints, this file will only be compiled
|
||||
// when the code is running on Google App Engine, compiled by GopherJS, or
|
||||
// "-tags safe" is added to the go build command line. The "disableunsafe"
|
||||
// tag is deprecated and thus should not be used.
|
||||
// +build js appengine safe disableunsafe
|
||||
|
||||
package spew
|
||||
|
||||
import "reflect"
|
||||
|
||||
const (
|
||||
// UnsafeDisabled is a build-time constant which specifies whether or
|
||||
// not access to the unsafe package is available.
|
||||
UnsafeDisabled = true
|
||||
)
|
||||
|
||||
// unsafeReflectValue typically converts the passed reflect.Value into a one
|
||||
// that bypasses the typical safety restrictions preventing access to
|
||||
// unaddressable and unexported data. However, doing this relies on access to
|
||||
// the unsafe package. This is a stub version which simply returns the passed
|
||||
// reflect.Value when the unsafe package is not available.
|
||||
func unsafeReflectValue(v reflect.Value) reflect.Value {
|
||||
return v
|
||||
}
|
341
vendor/github.com/davecgh/go-spew/spew/common.go
generated
vendored
Normal file
341
vendor/github.com/davecgh/go-spew/spew/common.go
generated
vendored
Normal file
@ -0,0 +1,341 @@
|
||||
/*
|
||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
package spew
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Some constants in the form of bytes to avoid string overhead. This mirrors
|
||||
// the technique used in the fmt package.
|
||||
var (
|
||||
panicBytes = []byte("(PANIC=")
|
||||
plusBytes = []byte("+")
|
||||
iBytes = []byte("i")
|
||||
trueBytes = []byte("true")
|
||||
falseBytes = []byte("false")
|
||||
interfaceBytes = []byte("(interface {})")
|
||||
commaNewlineBytes = []byte(",\n")
|
||||
newlineBytes = []byte("\n")
|
||||
openBraceBytes = []byte("{")
|
||||
openBraceNewlineBytes = []byte("{\n")
|
||||
closeBraceBytes = []byte("}")
|
||||
asteriskBytes = []byte("*")
|
||||
colonBytes = []byte(":")
|
||||
colonSpaceBytes = []byte(": ")
|
||||
openParenBytes = []byte("(")
|
||||
closeParenBytes = []byte(")")
|
||||
spaceBytes = []byte(" ")
|
||||
pointerChainBytes = []byte("->")
|
||||
nilAngleBytes = []byte("<nil>")
|
||||
maxNewlineBytes = []byte("<max depth reached>\n")
|
||||
maxShortBytes = []byte("<max>")
|
||||
circularBytes = []byte("<already shown>")
|
||||
circularShortBytes = []byte("<shown>")
|
||||
invalidAngleBytes = []byte("<invalid>")
|
||||
openBracketBytes = []byte("[")
|
||||
closeBracketBytes = []byte("]")
|
||||
percentBytes = []byte("%")
|
||||
precisionBytes = []byte(".")
|
||||
openAngleBytes = []byte("<")
|
||||
closeAngleBytes = []byte(">")
|
||||
openMapBytes = []byte("map[")
|
||||
closeMapBytes = []byte("]")
|
||||
lenEqualsBytes = []byte("len=")
|
||||
capEqualsBytes = []byte("cap=")
|
||||
)
|
||||
|
||||
// hexDigits is used to map a decimal value to a hex digit.
|
||||
var hexDigits = "0123456789abcdef"
|
||||
|
||||
// catchPanic handles any panics that might occur during the handleMethods
|
||||
// calls.
|
||||
func catchPanic(w io.Writer, v reflect.Value) {
|
||||
if err := recover(); err != nil {
|
||||
w.Write(panicBytes)
|
||||
fmt.Fprintf(w, "%v", err)
|
||||
w.Write(closeParenBytes)
|
||||
}
|
||||
}
|
||||
|
||||
// handleMethods attempts to call the Error and String methods on the underlying
|
||||
// type the passed reflect.Value represents and outputes the result to Writer w.
|
||||
//
|
||||
// It handles panics in any called methods by catching and displaying the error
|
||||
// as the formatted value.
|
||||
func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) {
|
||||
// We need an interface to check if the type implements the error or
|
||||
// Stringer interface. However, the reflect package won't give us an
|
||||
// interface on certain things like unexported struct fields in order
|
||||
// to enforce visibility rules. We use unsafe, when it's available,
|
||||
// to bypass these restrictions since this package does not mutate the
|
||||
// values.
|
||||
if !v.CanInterface() {
|
||||
if UnsafeDisabled {
|
||||
return false
|
||||
}
|
||||
|
||||
v = unsafeReflectValue(v)
|
||||
}
|
||||
|
||||
// Choose whether or not to do error and Stringer interface lookups against
|
||||
// the base type or a pointer to the base type depending on settings.
|
||||
// Technically calling one of these methods with a pointer receiver can
|
||||
// mutate the value, however, types which choose to satisify an error or
|
||||
// Stringer interface with a pointer receiver should not be mutating their
|
||||
// state inside these interface methods.
|
||||
if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() {
|
||||
v = unsafeReflectValue(v)
|
||||
}
|
||||
if v.CanAddr() {
|
||||
v = v.Addr()
|
||||
}
|
||||
|
||||
// Is it an error or Stringer?
|
||||
switch iface := v.Interface().(type) {
|
||||
case error:
|
||||
defer catchPanic(w, v)
|
||||
if cs.ContinueOnMethod {
|
||||
w.Write(openParenBytes)
|
||||
w.Write([]byte(iface.Error()))
|
||||
w.Write(closeParenBytes)
|
||||
w.Write(spaceBytes)
|
||||
return false
|
||||
}
|
||||
|
||||
w.Write([]byte(iface.Error()))
|
||||
return true
|
||||
|
||||
case fmt.Stringer:
|
||||
defer catchPanic(w, v)
|
||||
if cs.ContinueOnMethod {
|
||||
w.Write(openParenBytes)
|
||||
w.Write([]byte(iface.String()))
|
||||
w.Write(closeParenBytes)
|
||||
w.Write(spaceBytes)
|
||||
return false
|
||||
}
|
||||
w.Write([]byte(iface.String()))
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// printBool outputs a boolean value as true or false to Writer w.
|
||||
func printBool(w io.Writer, val bool) {
|
||||
if val {
|
||||
w.Write(trueBytes)
|
||||
} else {
|
||||
w.Write(falseBytes)
|
||||
}
|
||||
}
|
||||
|
||||
// printInt outputs a signed integer value to Writer w.
|
||||
func printInt(w io.Writer, val int64, base int) {
|
||||
w.Write([]byte(strconv.FormatInt(val, base)))
|
||||
}
|
||||
|
||||
// printUint outputs an unsigned integer value to Writer w.
|
||||
func printUint(w io.Writer, val uint64, base int) {
|
||||
w.Write([]byte(strconv.FormatUint(val, base)))
|
||||
}
|
||||
|
||||
// printFloat outputs a floating point value using the specified precision,
|
||||
// which is expected to be 32 or 64bit, to Writer w.
|
||||
func printFloat(w io.Writer, val float64, precision int) {
|
||||
w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision)))
|
||||
}
|
||||
|
||||
// printComplex outputs a complex value using the specified float precision
|
||||
// for the real and imaginary parts to Writer w.
|
||||
func printComplex(w io.Writer, c complex128, floatPrecision int) {
|
||||
r := real(c)
|
||||
w.Write(openParenBytes)
|
||||
w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision)))
|
||||
i := imag(c)
|
||||
if i >= 0 {
|
||||
w.Write(plusBytes)
|
||||
}
|
||||
w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision)))
|
||||
w.Write(iBytes)
|
||||
w.Write(closeParenBytes)
|
||||
}
|
||||
|
||||
// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x'
|
||||
// prefix to Writer w.
|
||||
func printHexPtr(w io.Writer, p uintptr) {
|
||||
// Null pointer.
|
||||
num := uint64(p)
|
||||
if num == 0 {
|
||||
w.Write(nilAngleBytes)
|
||||
return
|
||||
}
|
||||
|
||||
// Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix
|
||||
buf := make([]byte, 18)
|
||||
|
||||
// It's simpler to construct the hex string right to left.
|
||||
base := uint64(16)
|
||||
i := len(buf) - 1
|
||||
for num >= base {
|
||||
buf[i] = hexDigits[num%base]
|
||||
num /= base
|
||||
i--
|
||||
}
|
||||
buf[i] = hexDigits[num]
|
||||
|
||||
// Add '0x' prefix.
|
||||
i--
|
||||
buf[i] = 'x'
|
||||
i--
|
||||
buf[i] = '0'
|
||||
|
||||
// Strip unused leading bytes.
|
||||
buf = buf[i:]
|
||||
w.Write(buf)
|
||||
}
|
||||
|
||||
// valuesSorter implements sort.Interface to allow a slice of reflect.Value
|
||||
// elements to be sorted.
|
||||
type valuesSorter struct {
|
||||
values []reflect.Value
|
||||
strings []string // either nil or same len and values
|
||||
cs *ConfigState
|
||||
}
|
||||
|
||||
// newValuesSorter initializes a valuesSorter instance, which holds a set of
|
||||
// surrogate keys on which the data should be sorted. It uses flags in
|
||||
// ConfigState to decide if and how to populate those surrogate keys.
|
||||
func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface {
|
||||
vs := &valuesSorter{values: values, cs: cs}
|
||||
if canSortSimply(vs.values[0].Kind()) {
|
||||
return vs
|
||||
}
|
||||
if !cs.DisableMethods {
|
||||
vs.strings = make([]string, len(values))
|
||||
for i := range vs.values {
|
||||
b := bytes.Buffer{}
|
||||
if !handleMethods(cs, &b, vs.values[i]) {
|
||||
vs.strings = nil
|
||||
break
|
||||
}
|
||||
vs.strings[i] = b.String()
|
||||
}
|
||||
}
|
||||
if vs.strings == nil && cs.SpewKeys {
|
||||
vs.strings = make([]string, len(values))
|
||||
for i := range vs.values {
|
||||
vs.strings[i] = Sprintf("%#v", vs.values[i].Interface())
|
||||
}
|
||||
}
|
||||
return vs
|
||||
}
|
||||
|
||||
// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted
|
||||
// directly, or whether it should be considered for sorting by surrogate keys
|
||||
// (if the ConfigState allows it).
|
||||
func canSortSimply(kind reflect.Kind) bool {
|
||||
// This switch parallels valueSortLess, except for the default case.
|
||||
switch kind {
|
||||
case reflect.Bool:
|
||||
return true
|
||||
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
||||
return true
|
||||
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
||||
return true
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return true
|
||||
case reflect.String:
|
||||
return true
|
||||
case reflect.Uintptr:
|
||||
return true
|
||||
case reflect.Array:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Len returns the number of values in the slice. It is part of the
|
||||
// sort.Interface implementation.
|
||||
func (s *valuesSorter) Len() int {
|
||||
return len(s.values)
|
||||
}
|
||||
|
||||
// Swap swaps the values at the passed indices. It is part of the
|
||||
// sort.Interface implementation.
|
||||
func (s *valuesSorter) Swap(i, j int) {
|
||||
s.values[i], s.values[j] = s.values[j], s.values[i]
|
||||
if s.strings != nil {
|
||||
s.strings[i], s.strings[j] = s.strings[j], s.strings[i]
|
||||
}
|
||||
}
|
||||
|
||||
// valueSortLess returns whether the first value should sort before the second
|
||||
// value. It is used by valueSorter.Less as part of the sort.Interface
|
||||
// implementation.
|
||||
func valueSortLess(a, b reflect.Value) bool {
|
||||
switch a.Kind() {
|
||||
case reflect.Bool:
|
||||
return !a.Bool() && b.Bool()
|
||||
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
||||
return a.Int() < b.Int()
|
||||
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
||||
return a.Uint() < b.Uint()
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return a.Float() < b.Float()
|
||||
case reflect.String:
|
||||
return a.String() < b.String()
|
||||
case reflect.Uintptr:
|
||||
return a.Uint() < b.Uint()
|
||||
case reflect.Array:
|
||||
// Compare the contents of both arrays.
|
||||
l := a.Len()
|
||||
for i := 0; i < l; i++ {
|
||||
av := a.Index(i)
|
||||
bv := b.Index(i)
|
||||
if av.Interface() == bv.Interface() {
|
||||
continue
|
||||
}
|
||||
return valueSortLess(av, bv)
|
||||
}
|
||||
}
|
||||
return a.String() < b.String()
|
||||
}
|
||||
|
||||
// Less returns whether the value at index i should sort before the
|
||||
// value at index j. It is part of the sort.Interface implementation.
|
||||
func (s *valuesSorter) Less(i, j int) bool {
|
||||
if s.strings == nil {
|
||||
return valueSortLess(s.values[i], s.values[j])
|
||||
}
|
||||
return s.strings[i] < s.strings[j]
|
||||
}
|
||||
|
||||
// sortValues is a sort function that handles both native types and any type that
|
||||
// can be converted to error or Stringer. Other inputs are sorted according to
|
||||
// their Value.String() value to ensure display stability.
|
||||
func sortValues(values []reflect.Value, cs *ConfigState) {
|
||||
if len(values) == 0 {
|
||||
return
|
||||
}
|
||||
sort.Sort(newValuesSorter(values, cs))
|
||||
}
|
306
vendor/github.com/davecgh/go-spew/spew/config.go
generated
vendored
Normal file
306
vendor/github.com/davecgh/go-spew/spew/config.go
generated
vendored
Normal file
@ -0,0 +1,306 @@
|
||||
/*
|
||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
package spew
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
// ConfigState houses the configuration options used by spew to format and
|
||||
// display values. There is a global instance, Config, that is used to control
|
||||
// all top-level Formatter and Dump functionality. Each ConfigState instance
|
||||
// provides methods equivalent to the top-level functions.
|
||||
//
|
||||
// The zero value for ConfigState provides no indentation. You would typically
|
||||
// want to set it to a space or a tab.
|
||||
//
|
||||
// Alternatively, you can use NewDefaultConfig to get a ConfigState instance
|
||||
// with default settings. See the documentation of NewDefaultConfig for default
|
||||
// values.
|
||||
type ConfigState struct {
|
||||
// Indent specifies the string to use for each indentation level. The
|
||||
// global config instance that all top-level functions use set this to a
|
||||
// single space by default. If you would like more indentation, you might
|
||||
// set this to a tab with "\t" or perhaps two spaces with " ".
|
||||
Indent string
|
||||
|
||||
// MaxDepth controls the maximum number of levels to descend into nested
|
||||
// data structures. The default, 0, means there is no limit.
|
||||
//
|
||||
// NOTE: Circular data structures are properly detected, so it is not
|
||||
// necessary to set this value unless you specifically want to limit deeply
|
||||
// nested data structures.
|
||||
MaxDepth int
|
||||
|
||||
// DisableMethods specifies whether or not error and Stringer interfaces are
|
||||
// invoked for types that implement them.
|
||||
DisableMethods bool
|
||||
|
||||
// DisablePointerMethods specifies whether or not to check for and invoke
|
||||
// error and Stringer interfaces on types which only accept a pointer
|
||||
// receiver when the current type is not a pointer.
|
||||
//
|
||||
// NOTE: This might be an unsafe action since calling one of these methods
|
||||
// with a pointer receiver could technically mutate the value, however,
|
||||
// in practice, types which choose to satisify an error or Stringer
|
||||
// interface with a pointer receiver should not be mutating their state
|
||||
// inside these interface methods. As a result, this option relies on
|
||||
// access to the unsafe package, so it will not have any effect when
|
||||
// running in environments without access to the unsafe package such as
|
||||
// Google App Engine or with the "safe" build tag specified.
|
||||
DisablePointerMethods bool
|
||||
|
||||
// DisablePointerAddresses specifies whether to disable the printing of
|
||||
// pointer addresses. This is useful when diffing data structures in tests.
|
||||
DisablePointerAddresses bool
|
||||
|
||||
// DisableCapacities specifies whether to disable the printing of capacities
|
||||
// for arrays, slices, maps and channels. This is useful when diffing
|
||||
// data structures in tests.
|
||||
DisableCapacities bool
|
||||
|
||||
// ContinueOnMethod specifies whether or not recursion should continue once
|
||||
// a custom error or Stringer interface is invoked. The default, false,
|
||||
// means it will print the results of invoking the custom error or Stringer
|
||||
// interface and return immediately instead of continuing to recurse into
|
||||
// the internals of the data type.
|
||||
//
|
||||
// NOTE: This flag does not have any effect if method invocation is disabled
|
||||
// via the DisableMethods or DisablePointerMethods options.
|
||||
ContinueOnMethod bool
|
||||
|
||||
// SortKeys specifies map keys should be sorted before being printed. Use
|
||||
// this to have a more deterministic, diffable output. Note that only
|
||||
// native types (bool, int, uint, floats, uintptr and string) and types
|
||||
// that support the error or Stringer interfaces (if methods are
|
||||
// enabled) are supported, with other types sorted according to the
|
||||
// reflect.Value.String() output which guarantees display stability.
|
||||
SortKeys bool
|
||||
|
||||
// SpewKeys specifies that, as a last resort attempt, map keys should
|
||||
// be spewed to strings and sorted by those strings. This is only
|
||||
// considered if SortKeys is true.
|
||||
SpewKeys bool
|
||||
}
|
||||
|
||||
// Config is the active configuration of the top-level functions.
|
||||
// The configuration can be changed by modifying the contents of spew.Config.
|
||||
var Config = ConfigState{Indent: " "}
|
||||
|
||||
// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the formatted string as a value that satisfies error. See NewFormatter
|
||||
// for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) {
|
||||
return fmt.Errorf(format, c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
|
||||
return fmt.Fprint(w, c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
|
||||
return fmt.Fprintf(w, format, c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
|
||||
// passed with a Formatter interface returned by c.NewFormatter. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
|
||||
return fmt.Fprintln(w, c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Print is a wrapper for fmt.Print that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Print(c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Print(a ...interface{}) (n int, err error) {
|
||||
return fmt.Print(c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Printf is a wrapper for fmt.Printf that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) {
|
||||
return fmt.Printf(format, c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Println is a wrapper for fmt.Println that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Println(c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Println(a ...interface{}) (n int, err error) {
|
||||
return fmt.Println(c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the resulting string. See NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Sprint(a ...interface{}) string {
|
||||
return fmt.Sprint(c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the resulting string. See NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Sprintf(format string, a ...interface{}) string {
|
||||
return fmt.Sprintf(format, c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
|
||||
// were passed with a Formatter interface returned by c.NewFormatter. It
|
||||
// returns the resulting string. See NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Sprintln(a ...interface{}) string {
|
||||
return fmt.Sprintln(c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
/*
|
||||
NewFormatter returns a custom formatter that satisfies the fmt.Formatter
|
||||
interface. As a result, it integrates cleanly with standard fmt package
|
||||
printing functions. The formatter is useful for inline printing of smaller data
|
||||
types similar to the standard %v format specifier.
|
||||
|
||||
The custom formatter only responds to the %v (most compact), %+v (adds pointer
|
||||
addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb
|
||||
combinations. Any other verbs such as %x and %q will be sent to the the
|
||||
standard fmt package for formatting. In addition, the custom formatter ignores
|
||||
the width and precision arguments (however they will still work on the format
|
||||
specifiers not handled by the custom formatter).
|
||||
|
||||
Typically this function shouldn't be called directly. It is much easier to make
|
||||
use of the custom formatter by calling one of the convenience functions such as
|
||||
c.Printf, c.Println, or c.Printf.
|
||||
*/
|
||||
func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter {
|
||||
return newFormatter(c, v)
|
||||
}
|
||||
|
||||
// Fdump formats and displays the passed arguments to io.Writer w. It formats
|
||||
// exactly the same as Dump.
|
||||
func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) {
|
||||
fdump(c, w, a...)
|
||||
}
|
||||
|
||||
/*
|
||||
Dump displays the passed parameters to standard out with newlines, customizable
|
||||
indentation, and additional debug information such as complete types and all
|
||||
pointer addresses used to indirect to the final value. It provides the
|
||||
following features over the built-in printing facilities provided by the fmt
|
||||
package:
|
||||
|
||||
* Pointers are dereferenced and followed
|
||||
* Circular data structures are detected and handled properly
|
||||
* Custom Stringer/error interfaces are optionally invoked, including
|
||||
on unexported types
|
||||
* Custom types which only implement the Stringer/error interfaces via
|
||||
a pointer receiver are optionally invoked when passing non-pointer
|
||||
variables
|
||||
* Byte arrays and slices are dumped like the hexdump -C command which
|
||||
includes offsets, byte values in hex, and ASCII output
|
||||
|
||||
The configuration options are controlled by modifying the public members
|
||||
of c. See ConfigState for options documentation.
|
||||
|
||||
See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
|
||||
get the formatted result as a string.
|
||||
*/
|
||||
func (c *ConfigState) Dump(a ...interface{}) {
|
||||
fdump(c, os.Stdout, a...)
|
||||
}
|
||||
|
||||
// Sdump returns a string with the passed arguments formatted exactly the same
|
||||
// as Dump.
|
||||
func (c *ConfigState) Sdump(a ...interface{}) string {
|
||||
var buf bytes.Buffer
|
||||
fdump(c, &buf, a...)
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// convertArgs accepts a slice of arguments and returns a slice of the same
|
||||
// length with each argument converted to a spew Formatter interface using
|
||||
// the ConfigState associated with s.
|
||||
func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) {
|
||||
formatters = make([]interface{}, len(args))
|
||||
for index, arg := range args {
|
||||
formatters[index] = newFormatter(c, arg)
|
||||
}
|
||||
return formatters
|
||||
}
|
||||
|
||||
// NewDefaultConfig returns a ConfigState with the following default settings.
|
||||
//
|
||||
// Indent: " "
|
||||
// MaxDepth: 0
|
||||
// DisableMethods: false
|
||||
// DisablePointerMethods: false
|
||||
// ContinueOnMethod: false
|
||||
// SortKeys: false
|
||||
func NewDefaultConfig() *ConfigState {
|
||||
return &ConfigState{Indent: " "}
|
||||
}
|
211
vendor/github.com/davecgh/go-spew/spew/doc.go
generated
vendored
Normal file
211
vendor/github.com/davecgh/go-spew/spew/doc.go
generated
vendored
Normal file
@ -0,0 +1,211 @@
|
||||
/*
|
||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
/*
|
||||
Package spew implements a deep pretty printer for Go data structures to aid in
|
||||
debugging.
|
||||
|
||||
A quick overview of the additional features spew provides over the built-in
|
||||
printing facilities for Go data types are as follows:
|
||||
|
||||
* Pointers are dereferenced and followed
|
||||
* Circular data structures are detected and handled properly
|
||||
* Custom Stringer/error interfaces are optionally invoked, including
|
||||
on unexported types
|
||||
* Custom types which only implement the Stringer/error interfaces via
|
||||
a pointer receiver are optionally invoked when passing non-pointer
|
||||
variables
|
||||
* Byte arrays and slices are dumped like the hexdump -C command which
|
||||
includes offsets, byte values in hex, and ASCII output (only when using
|
||||
Dump style)
|
||||
|
||||
There are two different approaches spew allows for dumping Go data structures:
|
||||
|
||||
* Dump style which prints with newlines, customizable indentation,
|
||||
and additional debug information such as types and all pointer addresses
|
||||
used to indirect to the final value
|
||||
* A custom Formatter interface that integrates cleanly with the standard fmt
|
||||
package and replaces %v, %+v, %#v, and %#+v to provide inline printing
|
||||
similar to the default %v while providing the additional functionality
|
||||
outlined above and passing unsupported format verbs such as %x and %q
|
||||
along to fmt
|
||||
|
||||
Quick Start
|
||||
|
||||
This section demonstrates how to quickly get started with spew. See the
|
||||
sections below for further details on formatting and configuration options.
|
||||
|
||||
To dump a variable with full newlines, indentation, type, and pointer
|
||||
information use Dump, Fdump, or Sdump:
|
||||
spew.Dump(myVar1, myVar2, ...)
|
||||
spew.Fdump(someWriter, myVar1, myVar2, ...)
|
||||
str := spew.Sdump(myVar1, myVar2, ...)
|
||||
|
||||
Alternatively, if you would prefer to use format strings with a compacted inline
|
||||
printing style, use the convenience wrappers Printf, Fprintf, etc with
|
||||
%v (most compact), %+v (adds pointer addresses), %#v (adds types), or
|
||||
%#+v (adds types and pointer addresses):
|
||||
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
||||
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
||||
spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
||||
spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
||||
|
||||
Configuration Options
|
||||
|
||||
Configuration of spew is handled by fields in the ConfigState type. For
|
||||
convenience, all of the top-level functions use a global state available
|
||||
via the spew.Config global.
|
||||
|
||||
It is also possible to create a ConfigState instance that provides methods
|
||||
equivalent to the top-level functions. This allows concurrent configuration
|
||||
options. See the ConfigState documentation for more details.
|
||||
|
||||
The following configuration options are available:
|
||||
* Indent
|
||||
String to use for each indentation level for Dump functions.
|
||||
It is a single space by default. A popular alternative is "\t".
|
||||
|
||||
* MaxDepth
|
||||
Maximum number of levels to descend into nested data structures.
|
||||
There is no limit by default.
|
||||
|
||||
* DisableMethods
|
||||
Disables invocation of error and Stringer interface methods.
|
||||
Method invocation is enabled by default.
|
||||
|
||||
* DisablePointerMethods
|
||||
Disables invocation of error and Stringer interface methods on types
|
||||
which only accept pointer receivers from non-pointer variables.
|
||||
Pointer method invocation is enabled by default.
|
||||
|
||||
* DisablePointerAddresses
|
||||
DisablePointerAddresses specifies whether to disable the printing of
|
||||
pointer addresses. This is useful when diffing data structures in tests.
|
||||
|
||||
* DisableCapacities
|
||||
DisableCapacities specifies whether to disable the printing of
|
||||
capacities for arrays, slices, maps and channels. This is useful when
|
||||
diffing data structures in tests.
|
||||
|
||||
* ContinueOnMethod
|
||||
Enables recursion into types after invoking error and Stringer interface
|
||||
methods. Recursion after method invocation is disabled by default.
|
||||
|
||||
* SortKeys
|
||||
Specifies map keys should be sorted before being printed. Use
|
||||
this to have a more deterministic, diffable output. Note that
|
||||
only native types (bool, int, uint, floats, uintptr and string)
|
||||
and types which implement error or Stringer interfaces are
|
||||
supported with other types sorted according to the
|
||||
reflect.Value.String() output which guarantees display
|
||||
stability. Natural map order is used by default.
|
||||
|
||||
* SpewKeys
|
||||
Specifies that, as a last resort attempt, map keys should be
|
||||
spewed to strings and sorted by those strings. This is only
|
||||
considered if SortKeys is true.
|
||||
|
||||
Dump Usage
|
||||
|
||||
Simply call spew.Dump with a list of variables you want to dump:
|
||||
|
||||
spew.Dump(myVar1, myVar2, ...)
|
||||
|
||||
You may also call spew.Fdump if you would prefer to output to an arbitrary
|
||||
io.Writer. For example, to dump to standard error:
|
||||
|
||||
spew.Fdump(os.Stderr, myVar1, myVar2, ...)
|
||||
|
||||
A third option is to call spew.Sdump to get the formatted output as a string:
|
||||
|
||||
str := spew.Sdump(myVar1, myVar2, ...)
|
||||
|
||||
Sample Dump Output
|
||||
|
||||
See the Dump example for details on the setup of the types and variables being
|
||||
shown here.
|
||||
|
||||
(main.Foo) {
|
||||
unexportedField: (*main.Bar)(0xf84002e210)({
|
||||
flag: (main.Flag) flagTwo,
|
||||
data: (uintptr) <nil>
|
||||
}),
|
||||
ExportedField: (map[interface {}]interface {}) (len=1) {
|
||||
(string) (len=3) "one": (bool) true
|
||||
}
|
||||
}
|
||||
|
||||
Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C
|
||||
command as shown.
|
||||
([]uint8) (len=32 cap=32) {
|
||||
00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
|
||||
00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
|
||||
00000020 31 32 |12|
|
||||
}
|
||||
|
||||
Custom Formatter
|
||||
|
||||
Spew provides a custom formatter that implements the fmt.Formatter interface
|
||||
so that it integrates cleanly with standard fmt package printing functions. The
|
||||
formatter is useful for inline printing of smaller data types similar to the
|
||||
standard %v format specifier.
|
||||
|
||||
The custom formatter only responds to the %v (most compact), %+v (adds pointer
|
||||
addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
|
||||
combinations. Any other verbs such as %x and %q will be sent to the the
|
||||
standard fmt package for formatting. In addition, the custom formatter ignores
|
||||
the width and precision arguments (however they will still work on the format
|
||||
specifiers not handled by the custom formatter).
|
||||
|
||||
Custom Formatter Usage
|
||||
|
||||
The simplest way to make use of the spew custom formatter is to call one of the
|
||||
convenience functions such as spew.Printf, spew.Println, or spew.Printf. The
|
||||
functions have syntax you are most likely already familiar with:
|
||||
|
||||
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
||||
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
||||
spew.Println(myVar, myVar2)
|
||||
spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
||||
spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
||||
|
||||
See the Index for the full list convenience functions.
|
||||
|
||||
Sample Formatter Output
|
||||
|
||||
Double pointer to a uint8:
|
||||
%v: <**>5
|
||||
%+v: <**>(0xf8400420d0->0xf8400420c8)5
|
||||
%#v: (**uint8)5
|
||||
%#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
|
||||
|
||||
Pointer to circular struct with a uint8 field and a pointer to itself:
|
||||
%v: <*>{1 <*><shown>}
|
||||
%+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}
|
||||
%#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}
|
||||
%#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)<shown>}
|
||||
|
||||
See the Printf example for details on the setup of variables being shown
|
||||
here.
|
||||
|
||||
Errors
|
||||
|
||||
Since it is possible for custom Stringer/error interfaces to panic, spew
|
||||
detects them and handles them internally by printing the panic information
|
||||
inline with the output. Since spew is intended to provide deep pretty printing
|
||||
capabilities on structures, it intentionally does not return any errors.
|
||||
*/
|
||||
package spew
|
509
vendor/github.com/davecgh/go-spew/spew/dump.go
generated
vendored
Normal file
509
vendor/github.com/davecgh/go-spew/spew/dump.go
generated
vendored
Normal file
@ -0,0 +1,509 @@
|
||||
/*
|
||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
package spew
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
// uint8Type is a reflect.Type representing a uint8. It is used to
|
||||
// convert cgo types to uint8 slices for hexdumping.
|
||||
uint8Type = reflect.TypeOf(uint8(0))
|
||||
|
||||
// cCharRE is a regular expression that matches a cgo char.
|
||||
// It is used to detect character arrays to hexdump them.
|
||||
cCharRE = regexp.MustCompile("^.*\\._Ctype_char$")
|
||||
|
||||
// cUnsignedCharRE is a regular expression that matches a cgo unsigned
|
||||
// char. It is used to detect unsigned character arrays to hexdump
|
||||
// them.
|
||||
cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$")
|
||||
|
||||
// cUint8tCharRE is a regular expression that matches a cgo uint8_t.
|
||||
// It is used to detect uint8_t arrays to hexdump them.
|
||||
cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$")
|
||||
)
|
||||
|
||||
// dumpState contains information about the state of a dump operation.
|
||||
type dumpState struct {
|
||||
w io.Writer
|
||||
depth int
|
||||
pointers map[uintptr]int
|
||||
ignoreNextType bool
|
||||
ignoreNextIndent bool
|
||||
cs *ConfigState
|
||||
}
|
||||
|
||||
// indent performs indentation according to the depth level and cs.Indent
|
||||
// option.
|
||||
func (d *dumpState) indent() {
|
||||
if d.ignoreNextIndent {
|
||||
d.ignoreNextIndent = false
|
||||
return
|
||||
}
|
||||
d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth))
|
||||
}
|
||||
|
||||
// unpackValue returns values inside of non-nil interfaces when possible.
|
||||
// This is useful for data types like structs, arrays, slices, and maps which
|
||||
// can contain varying types packed inside an interface.
|
||||
func (d *dumpState) unpackValue(v reflect.Value) reflect.Value {
|
||||
if v.Kind() == reflect.Interface && !v.IsNil() {
|
||||
v = v.Elem()
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// dumpPtr handles formatting of pointers by indirecting them as necessary.
|
||||
func (d *dumpState) dumpPtr(v reflect.Value) {
|
||||
// Remove pointers at or below the current depth from map used to detect
|
||||
// circular refs.
|
||||
for k, depth := range d.pointers {
|
||||
if depth >= d.depth {
|
||||
delete(d.pointers, k)
|
||||
}
|
||||
}
|
||||
|
||||
// Keep list of all dereferenced pointers to show later.
|
||||
pointerChain := make([]uintptr, 0)
|
||||
|
||||
// Figure out how many levels of indirection there are by dereferencing
|
||||
// pointers and unpacking interfaces down the chain while detecting circular
|
||||
// references.
|
||||
nilFound := false
|
||||
cycleFound := false
|
||||
indirects := 0
|
||||
ve := v
|
||||
for ve.Kind() == reflect.Ptr {
|
||||
if ve.IsNil() {
|
||||
nilFound = true
|
||||
break
|
||||
}
|
||||
indirects++
|
||||
addr := ve.Pointer()
|
||||
pointerChain = append(pointerChain, addr)
|
||||
if pd, ok := d.pointers[addr]; ok && pd < d.depth {
|
||||
cycleFound = true
|
||||
indirects--
|
||||
break
|
||||
}
|
||||
d.pointers[addr] = d.depth
|
||||
|
||||
ve = ve.Elem()
|
||||
if ve.Kind() == reflect.Interface {
|
||||
if ve.IsNil() {
|
||||
nilFound = true
|
||||
break
|
||||
}
|
||||
ve = ve.Elem()
|
||||
}
|
||||
}
|
||||
|
||||
// Display type information.
|
||||
d.w.Write(openParenBytes)
|
||||
d.w.Write(bytes.Repeat(asteriskBytes, indirects))
|
||||
d.w.Write([]byte(ve.Type().String()))
|
||||
d.w.Write(closeParenBytes)
|
||||
|
||||
// Display pointer information.
|
||||
if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 {
|
||||
d.w.Write(openParenBytes)
|
||||
for i, addr := range pointerChain {
|
||||
if i > 0 {
|
||||
d.w.Write(pointerChainBytes)
|
||||
}
|
||||
printHexPtr(d.w, addr)
|
||||
}
|
||||
d.w.Write(closeParenBytes)
|
||||
}
|
||||
|
||||
// Display dereferenced value.
|
||||
d.w.Write(openParenBytes)
|
||||
switch {
|
||||
case nilFound == true:
|
||||
d.w.Write(nilAngleBytes)
|
||||
|
||||
case cycleFound == true:
|
||||
d.w.Write(circularBytes)
|
||||
|
||||
default:
|
||||
d.ignoreNextType = true
|
||||
d.dump(ve)
|
||||
}
|
||||
d.w.Write(closeParenBytes)
|
||||
}
|
||||
|
||||
// dumpSlice handles formatting of arrays and slices. Byte (uint8 under
|
||||
// reflection) arrays and slices are dumped in hexdump -C fashion.
|
||||
func (d *dumpState) dumpSlice(v reflect.Value) {
|
||||
// Determine whether this type should be hex dumped or not. Also,
|
||||
// for types which should be hexdumped, try to use the underlying data
|
||||
// first, then fall back to trying to convert them to a uint8 slice.
|
||||
var buf []uint8
|
||||
doConvert := false
|
||||
doHexDump := false
|
||||
numEntries := v.Len()
|
||||
if numEntries > 0 {
|
||||
vt := v.Index(0).Type()
|
||||
vts := vt.String()
|
||||
switch {
|
||||
// C types that need to be converted.
|
||||
case cCharRE.MatchString(vts):
|
||||
fallthrough
|
||||
case cUnsignedCharRE.MatchString(vts):
|
||||
fallthrough
|
||||
case cUint8tCharRE.MatchString(vts):
|
||||
doConvert = true
|
||||
|
||||
// Try to use existing uint8 slices and fall back to converting
|
||||
// and copying if that fails.
|
||||
case vt.Kind() == reflect.Uint8:
|
||||
// We need an addressable interface to convert the type
|
||||
// to a byte slice. However, the reflect package won't
|
||||
// give us an interface on certain things like
|
||||
// unexported struct fields in order to enforce
|
||||
// visibility rules. We use unsafe, when available, to
|
||||
// bypass these restrictions since this package does not
|
||||
// mutate the values.
|
||||
vs := v
|
||||
if !vs.CanInterface() || !vs.CanAddr() {
|
||||
vs = unsafeReflectValue(vs)
|
||||
}
|
||||
if !UnsafeDisabled {
|
||||
vs = vs.Slice(0, numEntries)
|
||||
|
||||
// Use the existing uint8 slice if it can be
|
||||
// type asserted.
|
||||
iface := vs.Interface()
|
||||
if slice, ok := iface.([]uint8); ok {
|
||||
buf = slice
|
||||
doHexDump = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// The underlying data needs to be converted if it can't
|
||||
// be type asserted to a uint8 slice.
|
||||
doConvert = true
|
||||
}
|
||||
|
||||
// Copy and convert the underlying type if needed.
|
||||
if doConvert && vt.ConvertibleTo(uint8Type) {
|
||||
// Convert and copy each element into a uint8 byte
|
||||
// slice.
|
||||
buf = make([]uint8, numEntries)
|
||||
for i := 0; i < numEntries; i++ {
|
||||
vv := v.Index(i)
|
||||
buf[i] = uint8(vv.Convert(uint8Type).Uint())
|
||||
}
|
||||
doHexDump = true
|
||||
}
|
||||
}
|
||||
|
||||
// Hexdump the entire slice as needed.
|
||||
if doHexDump {
|
||||
indent := strings.Repeat(d.cs.Indent, d.depth)
|
||||
str := indent + hex.Dump(buf)
|
||||
str = strings.Replace(str, "\n", "\n"+indent, -1)
|
||||
str = strings.TrimRight(str, d.cs.Indent)
|
||||
d.w.Write([]byte(str))
|
||||
return
|
||||
}
|
||||
|
||||
// Recursively call dump for each item.
|
||||
for i := 0; i < numEntries; i++ {
|
||||
d.dump(d.unpackValue(v.Index(i)))
|
||||
if i < (numEntries - 1) {
|
||||
d.w.Write(commaNewlineBytes)
|
||||
} else {
|
||||
d.w.Write(newlineBytes)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// dump is the main workhorse for dumping a value. It uses the passed reflect
|
||||
// value to figure out what kind of object we are dealing with and formats it
|
||||
// appropriately. It is a recursive function, however circular data structures
|
||||
// are detected and handled properly.
|
||||
func (d *dumpState) dump(v reflect.Value) {
|
||||
// Handle invalid reflect values immediately.
|
||||
kind := v.Kind()
|
||||
if kind == reflect.Invalid {
|
||||
d.w.Write(invalidAngleBytes)
|
||||
return
|
||||
}
|
||||
|
||||
// Handle pointers specially.
|
||||
if kind == reflect.Ptr {
|
||||
d.indent()
|
||||
d.dumpPtr(v)
|
||||
return
|
||||
}
|
||||
|
||||
// Print type information unless already handled elsewhere.
|
||||
if !d.ignoreNextType {
|
||||
d.indent()
|
||||
d.w.Write(openParenBytes)
|
||||
d.w.Write([]byte(v.Type().String()))
|
||||
d.w.Write(closeParenBytes)
|
||||
d.w.Write(spaceBytes)
|
||||
}
|
||||
d.ignoreNextType = false
|
||||
|
||||
// Display length and capacity if the built-in len and cap functions
|
||||
// work with the value's kind and the len/cap itself is non-zero.
|
||||
valueLen, valueCap := 0, 0
|
||||
switch v.Kind() {
|
||||
case reflect.Array, reflect.Slice, reflect.Chan:
|
||||
valueLen, valueCap = v.Len(), v.Cap()
|
||||
case reflect.Map, reflect.String:
|
||||
valueLen = v.Len()
|
||||
}
|
||||
if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 {
|
||||
d.w.Write(openParenBytes)
|
||||
if valueLen != 0 {
|
||||
d.w.Write(lenEqualsBytes)
|
||||
printInt(d.w, int64(valueLen), 10)
|
||||
}
|
||||
if !d.cs.DisableCapacities && valueCap != 0 {
|
||||
if valueLen != 0 {
|
||||
d.w.Write(spaceBytes)
|
||||
}
|
||||
d.w.Write(capEqualsBytes)
|
||||
printInt(d.w, int64(valueCap), 10)
|
||||
}
|
||||
d.w.Write(closeParenBytes)
|
||||
d.w.Write(spaceBytes)
|
||||
}
|
||||
|
||||
// Call Stringer/error interfaces if they exist and the handle methods flag
|
||||
// is enabled
|
||||
if !d.cs.DisableMethods {
|
||||
if (kind != reflect.Invalid) && (kind != reflect.Interface) {
|
||||
if handled := handleMethods(d.cs, d.w, v); handled {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switch kind {
|
||||
case reflect.Invalid:
|
||||
// Do nothing. We should never get here since invalid has already
|
||||
// been handled above.
|
||||
|
||||
case reflect.Bool:
|
||||
printBool(d.w, v.Bool())
|
||||
|
||||
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
||||
printInt(d.w, v.Int(), 10)
|
||||
|
||||
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
||||
printUint(d.w, v.Uint(), 10)
|
||||
|
||||
case reflect.Float32:
|
||||
printFloat(d.w, v.Float(), 32)
|
||||
|
||||
case reflect.Float64:
|
||||
printFloat(d.w, v.Float(), 64)
|
||||
|
||||
case reflect.Complex64:
|
||||
printComplex(d.w, v.Complex(), 32)
|
||||
|
||||
case reflect.Complex128:
|
||||
printComplex(d.w, v.Complex(), 64)
|
||||
|
||||
case reflect.Slice:
|
||||
if v.IsNil() {
|
||||
d.w.Write(nilAngleBytes)
|
||||
break
|
||||
}
|
||||
fallthrough
|
||||
|
||||
case reflect.Array:
|
||||
d.w.Write(openBraceNewlineBytes)
|
||||
d.depth++
|
||||
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
|
||||
d.indent()
|
||||
d.w.Write(maxNewlineBytes)
|
||||
} else {
|
||||
d.dumpSlice(v)
|
||||
}
|
||||
d.depth--
|
||||
d.indent()
|
||||
d.w.Write(closeBraceBytes)
|
||||
|
||||
case reflect.String:
|
||||
d.w.Write([]byte(strconv.Quote(v.String())))
|
||||
|
||||
case reflect.Interface:
|
||||
// The only time we should get here is for nil interfaces due to
|
||||
// unpackValue calls.
|
||||
if v.IsNil() {
|
||||
d.w.Write(nilAngleBytes)
|
||||
}
|
||||
|
||||
case reflect.Ptr:
|
||||
// Do nothing. We should never get here since pointers have already
|
||||
// been handled above.
|
||||
|
||||
case reflect.Map:
|
||||
// nil maps should be indicated as different than empty maps
|
||||
if v.IsNil() {
|
||||
d.w.Write(nilAngleBytes)
|
||||
break
|
||||
}
|
||||
|
||||
d.w.Write(openBraceNewlineBytes)
|
||||
d.depth++
|
||||
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
|
||||
d.indent()
|
||||
d.w.Write(maxNewlineBytes)
|
||||
} else {
|
||||
numEntries := v.Len()
|
||||
keys := v.MapKeys()
|
||||
if d.cs.SortKeys {
|
||||
sortValues(keys, d.cs)
|
||||
}
|
||||
for i, key := range keys {
|
||||
d.dump(d.unpackValue(key))
|
||||
d.w.Write(colonSpaceBytes)
|
||||
d.ignoreNextIndent = true
|
||||
d.dump(d.unpackValue(v.MapIndex(key)))
|
||||
if i < (numEntries - 1) {
|
||||
d.w.Write(commaNewlineBytes)
|
||||
} else {
|
||||
d.w.Write(newlineBytes)
|
||||
}
|
||||
}
|
||||
}
|
||||
d.depth--
|
||||
d.indent()
|
||||
d.w.Write(closeBraceBytes)
|
||||
|
||||
case reflect.Struct:
|
||||
d.w.Write(openBraceNewlineBytes)
|
||||
d.depth++
|
||||
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
|
||||
d.indent()
|
||||
d.w.Write(maxNewlineBytes)
|
||||
} else {
|
||||
vt := v.Type()
|
||||
numFields := v.NumField()
|
||||
for i := 0; i < numFields; i++ {
|
||||
d.indent()
|
||||
vtf := vt.Field(i)
|
||||
d.w.Write([]byte(vtf.Name))
|
||||
d.w.Write(colonSpaceBytes)
|
||||
d.ignoreNextIndent = true
|
||||
d.dump(d.unpackValue(v.Field(i)))
|
||||
if i < (numFields - 1) {
|
||||
d.w.Write(commaNewlineBytes)
|
||||
} else {
|
||||
d.w.Write(newlineBytes)
|
||||
}
|
||||
}
|
||||
}
|
||||
d.depth--
|
||||
d.indent()
|
||||
d.w.Write(closeBraceBytes)
|
||||
|
||||
case reflect.Uintptr:
|
||||
printHexPtr(d.w, uintptr(v.Uint()))
|
||||
|
||||
case reflect.UnsafePointer, reflect.Chan, reflect.Func:
|
||||
printHexPtr(d.w, v.Pointer())
|
||||
|
||||
// There were not any other types at the time this code was written, but
|
||||
// fall back to letting the default fmt package handle it in case any new
|
||||
// types are added.
|
||||
default:
|
||||
if v.CanInterface() {
|
||||
fmt.Fprintf(d.w, "%v", v.Interface())
|
||||
} else {
|
||||
fmt.Fprintf(d.w, "%v", v.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// fdump is a helper function to consolidate the logic from the various public
|
||||
// methods which take varying writers and config states.
|
||||
func fdump(cs *ConfigState, w io.Writer, a ...interface{}) {
|
||||
for _, arg := range a {
|
||||
if arg == nil {
|
||||
w.Write(interfaceBytes)
|
||||
w.Write(spaceBytes)
|
||||
w.Write(nilAngleBytes)
|
||||
w.Write(newlineBytes)
|
||||
continue
|
||||
}
|
||||
|
||||
d := dumpState{w: w, cs: cs}
|
||||
d.pointers = make(map[uintptr]int)
|
||||
d.dump(reflect.ValueOf(arg))
|
||||
d.w.Write(newlineBytes)
|
||||
}
|
||||
}
|
||||
|
||||
// Fdump formats and displays the passed arguments to io.Writer w. It formats
|
||||
// exactly the same as Dump.
|
||||
func Fdump(w io.Writer, a ...interface{}) {
|
||||
fdump(&Config, w, a...)
|
||||
}
|
||||
|
||||
// Sdump returns a string with the passed arguments formatted exactly the same
|
||||
// as Dump.
|
||||
func Sdump(a ...interface{}) string {
|
||||
var buf bytes.Buffer
|
||||
fdump(&Config, &buf, a...)
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
/*
|
||||
Dump displays the passed parameters to standard out with newlines, customizable
|
||||
indentation, and additional debug information such as complete types and all
|
||||
pointer addresses used to indirect to the final value. It provides the
|
||||
following features over the built-in printing facilities provided by the fmt
|
||||
package:
|
||||
|
||||
* Pointers are dereferenced and followed
|
||||
* Circular data structures are detected and handled properly
|
||||
* Custom Stringer/error interfaces are optionally invoked, including
|
||||
on unexported types
|
||||
* Custom types which only implement the Stringer/error interfaces via
|
||||
a pointer receiver are optionally invoked when passing non-pointer
|
||||
variables
|
||||
* Byte arrays and slices are dumped like the hexdump -C command which
|
||||
includes offsets, byte values in hex, and ASCII output
|
||||
|
||||
The configuration options are controlled by an exported package global,
|
||||
spew.Config. See ConfigState for options documentation.
|
||||
|
||||
See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
|
||||
get the formatted result as a string.
|
||||
*/
|
||||
func Dump(a ...interface{}) {
|
||||
fdump(&Config, os.Stdout, a...)
|
||||
}
|
419
vendor/github.com/davecgh/go-spew/spew/format.go
generated
vendored
Normal file
419
vendor/github.com/davecgh/go-spew/spew/format.go
generated
vendored
Normal file
@ -0,0 +1,419 @@
|
||||
/*
|
||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
package spew
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// supportedFlags is a list of all the character flags supported by fmt package.
|
||||
const supportedFlags = "0-+# "
|
||||
|
||||
// formatState implements the fmt.Formatter interface and contains information
|
||||
// about the state of a formatting operation. The NewFormatter function can
|
||||
// be used to get a new Formatter which can be used directly as arguments
|
||||
// in standard fmt package printing calls.
|
||||
type formatState struct {
|
||||
value interface{}
|
||||
fs fmt.State
|
||||
depth int
|
||||
pointers map[uintptr]int
|
||||
ignoreNextType bool
|
||||
cs *ConfigState
|
||||
}
|
||||
|
||||
// buildDefaultFormat recreates the original format string without precision
|
||||
// and width information to pass in to fmt.Sprintf in the case of an
|
||||
// unrecognized type. Unless new types are added to the language, this
|
||||
// function won't ever be called.
|
||||
func (f *formatState) buildDefaultFormat() (format string) {
|
||||
buf := bytes.NewBuffer(percentBytes)
|
||||
|
||||
for _, flag := range supportedFlags {
|
||||
if f.fs.Flag(int(flag)) {
|
||||
buf.WriteRune(flag)
|
||||
}
|
||||
}
|
||||
|
||||
buf.WriteRune('v')
|
||||
|
||||
format = buf.String()
|
||||
return format
|
||||
}
|
||||
|
||||
// constructOrigFormat recreates the original format string including precision
|
||||
// and width information to pass along to the standard fmt package. This allows
|
||||
// automatic deferral of all format strings this package doesn't support.
|
||||
func (f *formatState) constructOrigFormat(verb rune) (format string) {
|
||||
buf := bytes.NewBuffer(percentBytes)
|
||||
|
||||
for _, flag := range supportedFlags {
|
||||
if f.fs.Flag(int(flag)) {
|
||||
buf.WriteRune(flag)
|
||||
}
|
||||
}
|
||||
|
||||
if width, ok := f.fs.Width(); ok {
|
||||
buf.WriteString(strconv.Itoa(width))
|
||||
}
|
||||
|
||||
if precision, ok := f.fs.Precision(); ok {
|
||||
buf.Write(precisionBytes)
|
||||
buf.WriteString(strconv.Itoa(precision))
|
||||
}
|
||||
|
||||
buf.WriteRune(verb)
|
||||
|
||||
format = buf.String()
|
||||
return format
|
||||
}
|
||||
|
||||
// unpackValue returns values inside of non-nil interfaces when possible and
|
||||
// ensures that types for values which have been unpacked from an interface
|
||||
// are displayed when the show types flag is also set.
|
||||
// This is useful for data types like structs, arrays, slices, and maps which
|
||||
// can contain varying types packed inside an interface.
|
||||
func (f *formatState) unpackValue(v reflect.Value) reflect.Value {
|
||||
if v.Kind() == reflect.Interface {
|
||||
f.ignoreNextType = false
|
||||
if !v.IsNil() {
|
||||
v = v.Elem()
|
||||
}
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// formatPtr handles formatting of pointers by indirecting them as necessary.
|
||||
func (f *formatState) formatPtr(v reflect.Value) {
|
||||
// Display nil if top level pointer is nil.
|
||||
showTypes := f.fs.Flag('#')
|
||||
if v.IsNil() && (!showTypes || f.ignoreNextType) {
|
||||
f.fs.Write(nilAngleBytes)
|
||||
return
|
||||
}
|
||||
|
||||
// Remove pointers at or below the current depth from map used to detect
|
||||
// circular refs.
|
||||
for k, depth := range f.pointers {
|
||||
if depth >= f.depth {
|
||||
delete(f.pointers, k)
|
||||
}
|
||||
}
|
||||
|
||||
// Keep list of all dereferenced pointers to possibly show later.
|
||||
pointerChain := make([]uintptr, 0)
|
||||
|
||||
// Figure out how many levels of indirection there are by derferencing
|
||||
// pointers and unpacking interfaces down the chain while detecting circular
|
||||
// references.
|
||||
nilFound := false
|
||||
cycleFound := false
|
||||
indirects := 0
|
||||
ve := v
|
||||
for ve.Kind() == reflect.Ptr {
|
||||
if ve.IsNil() {
|
||||
nilFound = true
|
||||
break
|
||||
}
|
||||
indirects++
|
||||
addr := ve.Pointer()
|
||||
pointerChain = append(pointerChain, addr)
|
||||
if pd, ok := f.pointers[addr]; ok && pd < f.depth {
|
||||
cycleFound = true
|
||||
indirects--
|
||||
break
|
||||
}
|
||||
f.pointers[addr] = f.depth
|
||||
|
||||
ve = ve.Elem()
|
||||
if ve.Kind() == reflect.Interface {
|
||||
if ve.IsNil() {
|
||||
nilFound = true
|
||||
break
|
||||
}
|
||||
ve = ve.Elem()
|
||||
}
|
||||
}
|
||||
|
||||
// Display type or indirection level depending on flags.
|
||||
if showTypes && !f.ignoreNextType {
|
||||
f.fs.Write(openParenBytes)
|
||||
f.fs.Write(bytes.Repeat(asteriskBytes, indirects))
|
||||
f.fs.Write([]byte(ve.Type().String()))
|
||||
f.fs.Write(closeParenBytes)
|
||||
} else {
|
||||
if nilFound || cycleFound {
|
||||
indirects += strings.Count(ve.Type().String(), "*")
|
||||
}
|
||||
f.fs.Write(openAngleBytes)
|
||||
f.fs.Write([]byte(strings.Repeat("*", indirects)))
|
||||
f.fs.Write(closeAngleBytes)
|
||||
}
|
||||
|
||||
// Display pointer information depending on flags.
|
||||
if f.fs.Flag('+') && (len(pointerChain) > 0) {
|
||||
f.fs.Write(openParenBytes)
|
||||
for i, addr := range pointerChain {
|
||||
if i > 0 {
|
||||
f.fs.Write(pointerChainBytes)
|
||||
}
|
||||
printHexPtr(f.fs, addr)
|
||||
}
|
||||
f.fs.Write(closeParenBytes)
|
||||
}
|
||||
|
||||
// Display dereferenced value.
|
||||
switch {
|
||||
case nilFound == true:
|
||||
f.fs.Write(nilAngleBytes)
|
||||
|
||||
case cycleFound == true:
|
||||
f.fs.Write(circularShortBytes)
|
||||
|
||||
default:
|
||||
f.ignoreNextType = true
|
||||
f.format(ve)
|
||||
}
|
||||
}
|
||||
|
||||
// format is the main workhorse for providing the Formatter interface. It
|
||||
// uses the passed reflect value to figure out what kind of object we are
|
||||
// dealing with and formats it appropriately. It is a recursive function,
|
||||
// however circular data structures are detected and handled properly.
|
||||
func (f *formatState) format(v reflect.Value) {
|
||||
// Handle invalid reflect values immediately.
|
||||
kind := v.Kind()
|
||||
if kind == reflect.Invalid {
|
||||
f.fs.Write(invalidAngleBytes)
|
||||
return
|
||||
}
|
||||
|
||||
// Handle pointers specially.
|
||||
if kind == reflect.Ptr {
|
||||
f.formatPtr(v)
|
||||
return
|
||||
}
|
||||
|
||||
// Print type information unless already handled elsewhere.
|
||||
if !f.ignoreNextType && f.fs.Flag('#') {
|
||||
f.fs.Write(openParenBytes)
|
||||
f.fs.Write([]byte(v.Type().String()))
|
||||
f.fs.Write(closeParenBytes)
|
||||
}
|
||||
f.ignoreNextType = false
|
||||
|
||||
// Call Stringer/error interfaces if they exist and the handle methods
|
||||
// flag is enabled.
|
||||
if !f.cs.DisableMethods {
|
||||
if (kind != reflect.Invalid) && (kind != reflect.Interface) {
|
||||
if handled := handleMethods(f.cs, f.fs, v); handled {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switch kind {
|
||||
case reflect.Invalid:
|
||||
// Do nothing. We should never get here since invalid has already
|
||||
// been handled above.
|
||||
|
||||
case reflect.Bool:
|
||||
printBool(f.fs, v.Bool())
|
||||
|
||||
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
||||
printInt(f.fs, v.Int(), 10)
|
||||
|
||||
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
||||
printUint(f.fs, v.Uint(), 10)
|
||||
|
||||
case reflect.Float32:
|
||||
printFloat(f.fs, v.Float(), 32)
|
||||
|
||||
case reflect.Float64:
|
||||
printFloat(f.fs, v.Float(), 64)
|
||||
|
||||
case reflect.Complex64:
|
||||
printComplex(f.fs, v.Complex(), 32)
|
||||
|
||||
case reflect.Complex128:
|
||||
printComplex(f.fs, v.Complex(), 64)
|
||||
|
||||
case reflect.Slice:
|
||||
if v.IsNil() {
|
||||
f.fs.Write(nilAngleBytes)
|
||||
break
|
||||
}
|
||||
fallthrough
|
||||
|
||||
case reflect.Array:
|
||||
f.fs.Write(openBracketBytes)
|
||||
f.depth++
|
||||
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
|
||||
f.fs.Write(maxShortBytes)
|
||||
} else {
|
||||
numEntries := v.Len()
|
||||
for i := 0; i < numEntries; i++ {
|
||||
if i > 0 {
|
||||
f.fs.Write(spaceBytes)
|
||||
}
|
||||
f.ignoreNextType = true
|
||||
f.format(f.unpackValue(v.Index(i)))
|
||||
}
|
||||
}
|
||||
f.depth--
|
||||
f.fs.Write(closeBracketBytes)
|
||||
|
||||
case reflect.String:
|
||||
f.fs.Write([]byte(v.String()))
|
||||
|
||||
case reflect.Interface:
|
||||
// The only time we should get here is for nil interfaces due to
|
||||
// unpackValue calls.
|
||||
if v.IsNil() {
|
||||
f.fs.Write(nilAngleBytes)
|
||||
}
|
||||
|
||||
case reflect.Ptr:
|
||||
// Do nothing. We should never get here since pointers have already
|
||||
// been handled above.
|
||||
|
||||
case reflect.Map:
|
||||
// nil maps should be indicated as different than empty maps
|
||||
if v.IsNil() {
|
||||
f.fs.Write(nilAngleBytes)
|
||||
break
|
||||
}
|
||||
|
||||
f.fs.Write(openMapBytes)
|
||||
f.depth++
|
||||
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
|
||||
f.fs.Write(maxShortBytes)
|
||||
} else {
|
||||
keys := v.MapKeys()
|
||||
if f.cs.SortKeys {
|
||||
sortValues(keys, f.cs)
|
||||
}
|
||||
for i, key := range keys {
|
||||
if i > 0 {
|
||||
f.fs.Write(spaceBytes)
|
||||
}
|
||||
f.ignoreNextType = true
|
||||
f.format(f.unpackValue(key))
|
||||
f.fs.Write(colonBytes)
|
||||
f.ignoreNextType = true
|
||||
f.format(f.unpackValue(v.MapIndex(key)))
|
||||
}
|
||||
}
|
||||
f.depth--
|
||||
f.fs.Write(closeMapBytes)
|
||||
|
||||
case reflect.Struct:
|
||||
numFields := v.NumField()
|
||||
f.fs.Write(openBraceBytes)
|
||||
f.depth++
|
||||
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
|
||||
f.fs.Write(maxShortBytes)
|
||||
} else {
|
||||
vt := v.Type()
|
||||
for i := 0; i < numFields; i++ {
|
||||
if i > 0 {
|
||||
f.fs.Write(spaceBytes)
|
||||
}
|
||||
vtf := vt.Field(i)
|
||||
if f.fs.Flag('+') || f.fs.Flag('#') {
|
||||
f.fs.Write([]byte(vtf.Name))
|
||||
f.fs.Write(colonBytes)
|
||||
}
|
||||
f.format(f.unpackValue(v.Field(i)))
|
||||
}
|
||||
}
|
||||
f.depth--
|
||||
f.fs.Write(closeBraceBytes)
|
||||
|
||||
case reflect.Uintptr:
|
||||
printHexPtr(f.fs, uintptr(v.Uint()))
|
||||
|
||||
case reflect.UnsafePointer, reflect.Chan, reflect.Func:
|
||||
printHexPtr(f.fs, v.Pointer())
|
||||
|
||||
// There were not any other types at the time this code was written, but
|
||||
// fall back to letting the default fmt package handle it if any get added.
|
||||
default:
|
||||
format := f.buildDefaultFormat()
|
||||
if v.CanInterface() {
|
||||
fmt.Fprintf(f.fs, format, v.Interface())
|
||||
} else {
|
||||
fmt.Fprintf(f.fs, format, v.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Format satisfies the fmt.Formatter interface. See NewFormatter for usage
|
||||
// details.
|
||||
func (f *formatState) Format(fs fmt.State, verb rune) {
|
||||
f.fs = fs
|
||||
|
||||
// Use standard formatting for verbs that are not v.
|
||||
if verb != 'v' {
|
||||
format := f.constructOrigFormat(verb)
|
||||
fmt.Fprintf(fs, format, f.value)
|
||||
return
|
||||
}
|
||||
|
||||
if f.value == nil {
|
||||
if fs.Flag('#') {
|
||||
fs.Write(interfaceBytes)
|
||||
}
|
||||
fs.Write(nilAngleBytes)
|
||||
return
|
||||
}
|
||||
|
||||
f.format(reflect.ValueOf(f.value))
|
||||
}
|
||||
|
||||
// newFormatter is a helper function to consolidate the logic from the various
|
||||
// public methods which take varying config states.
|
||||
func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter {
|
||||
fs := &formatState{value: v, cs: cs}
|
||||
fs.pointers = make(map[uintptr]int)
|
||||
return fs
|
||||
}
|
||||
|
||||
/*
|
||||
NewFormatter returns a custom formatter that satisfies the fmt.Formatter
|
||||
interface. As a result, it integrates cleanly with standard fmt package
|
||||
printing functions. The formatter is useful for inline printing of smaller data
|
||||
types similar to the standard %v format specifier.
|
||||
|
||||
The custom formatter only responds to the %v (most compact), %+v (adds pointer
|
||||
addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
|
||||
combinations. Any other verbs such as %x and %q will be sent to the the
|
||||
standard fmt package for formatting. In addition, the custom formatter ignores
|
||||
the width and precision arguments (however they will still work on the format
|
||||
specifiers not handled by the custom formatter).
|
||||
|
||||
Typically this function shouldn't be called directly. It is much easier to make
|
||||
use of the custom formatter by calling one of the convenience functions such as
|
||||
Printf, Println, or Fprintf.
|
||||
*/
|
||||
func NewFormatter(v interface{}) fmt.Formatter {
|
||||
return newFormatter(&Config, v)
|
||||
}
|
148
vendor/github.com/davecgh/go-spew/spew/spew.go
generated
vendored
Normal file
148
vendor/github.com/davecgh/go-spew/spew/spew.go
generated
vendored
Normal file
@ -0,0 +1,148 @@
|
||||
/*
|
||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
package spew
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the formatted string as a value that satisfies error. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Errorf(format string, a ...interface{}) (err error) {
|
||||
return fmt.Errorf(format, convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Fprint(w io.Writer, a ...interface{}) (n int, err error) {
|
||||
return fmt.Fprint(w, convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
|
||||
return fmt.Fprintf(w, format, convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
|
||||
// passed with a default Formatter interface returned by NewFormatter. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
|
||||
return fmt.Fprintln(w, convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Print is a wrapper for fmt.Print that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Print(a ...interface{}) (n int, err error) {
|
||||
return fmt.Print(convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Printf is a wrapper for fmt.Printf that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Printf(format string, a ...interface{}) (n int, err error) {
|
||||
return fmt.Printf(format, convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Println is a wrapper for fmt.Println that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Println(a ...interface{}) (n int, err error) {
|
||||
return fmt.Println(convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the resulting string. See NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Sprint(a ...interface{}) string {
|
||||
return fmt.Sprint(convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the resulting string. See NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Sprintf(format string, a ...interface{}) string {
|
||||
return fmt.Sprintf(format, convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
|
||||
// were passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the resulting string. See NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Sprintln(a ...interface{}) string {
|
||||
return fmt.Sprintln(convertArgs(a)...)
|
||||
}
|
||||
|
||||
// convertArgs accepts a slice of arguments and returns a slice of the same
|
||||
// length with each argument converted to a default spew Formatter interface.
|
||||
func convertArgs(args []interface{}) (formatters []interface{}) {
|
||||
formatters = make([]interface{}, len(args))
|
||||
for index, arg := range args {
|
||||
formatters[index] = NewFormatter(arg)
|
||||
}
|
||||
return formatters
|
||||
}
|
182
vendor/github.com/docker/distribution/AUTHORS
generated
vendored
Normal file
182
vendor/github.com/docker/distribution/AUTHORS
generated
vendored
Normal file
@ -0,0 +1,182 @@
|
||||
a-palchikov <deemok@gmail.com>
|
||||
Aaron Lehmann <aaron.lehmann@docker.com>
|
||||
Aaron Schlesinger <aschlesinger@deis.com>
|
||||
Aaron Vinson <avinson.public@gmail.com>
|
||||
Adam Duke <adam.v.duke@gmail.com>
|
||||
Adam Enger <adamenger@gmail.com>
|
||||
Adrian Mouat <adrian.mouat@gmail.com>
|
||||
Ahmet Alp Balkan <ahmetalpbalkan@gmail.com>
|
||||
Alex Chan <alex.chan@metaswitch.com>
|
||||
Alex Elman <aelman@indeed.com>
|
||||
Alexey Gladkov <gladkov.alexey@gmail.com>
|
||||
allencloud <allen.sun@daocloud.io>
|
||||
amitshukla <ashukla73@hotmail.com>
|
||||
Amy Lindburg <amy.lindburg@docker.com>
|
||||
Andrew Hsu <andrewhsu@acm.org>
|
||||
Andrew Meredith <andymeredith@gmail.com>
|
||||
Andrew T Nguyen <andrew.nguyen@docker.com>
|
||||
Andrey Kostov <kostov.andrey@gmail.com>
|
||||
Andy Goldstein <agoldste@redhat.com>
|
||||
Anis Elleuch <vadmeste@gmail.com>
|
||||
Anton Tiurin <noxiouz@yandex.ru>
|
||||
Antonio Mercado <amercado@thinknode.com>
|
||||
Antonio Murdaca <runcom@redhat.com>
|
||||
Anusha Ragunathan <anusha@docker.com>
|
||||
Arien Holthuizen <aholthuizen@schubergphilis.com>
|
||||
Arnaud Porterie <arnaud.porterie@docker.com>
|
||||
Arthur Baars <arthur@semmle.com>
|
||||
Asuka Suzuki <hello@tanksuzuki.com>
|
||||
Avi Miller <avi.miller@oracle.com>
|
||||
Ayose Cazorla <ayosec@gmail.com>
|
||||
BadZen <dave.trombley@gmail.com>
|
||||
Ben Bodenmiller <bbodenmiller@hotmail.com>
|
||||
Ben Firshman <ben@firshman.co.uk>
|
||||
bin liu <liubin0329@gmail.com>
|
||||
Brian Bland <brian.bland@docker.com>
|
||||
burnettk <burnettk@gmail.com>
|
||||
Carson A <ca@carsonoid.net>
|
||||
Cezar Sa Espinola <cezarsa@gmail.com>
|
||||
Charles Smith <charles.smith@docker.com>
|
||||
Chris Dillon <squarism@gmail.com>
|
||||
cuiwei13 <cuiwei13@pku.edu.cn>
|
||||
cyli <cyli@twistedmatrix.com>
|
||||
Daisuke Fujita <dtanshi45@gmail.com>
|
||||
Daniel Huhn <daniel@danielhuhn.de>
|
||||
Darren Shepherd <darren@rancher.com>
|
||||
Dave Trombley <dave.trombley@gmail.com>
|
||||
Dave Tucker <dt@docker.com>
|
||||
David Lawrence <david.lawrence@docker.com>
|
||||
David Verhasselt <david@crowdway.com>
|
||||
David Xia <dxia@spotify.com>
|
||||
davidli <wenquan.li@hp.com>
|
||||
Dejan Golja <dejan@golja.org>
|
||||
Derek McGowan <derek@mcgstyle.net>
|
||||
Diogo Mónica <diogo.monica@gmail.com>
|
||||
DJ Enriquez <dj.enriquez@infospace.com>
|
||||
Donald Huang <don.hcd@gmail.com>
|
||||
Doug Davis <dug@us.ibm.com>
|
||||
Edgar Lee <edgar.lee@docker.com>
|
||||
Eric Yang <windfarer@gmail.com>
|
||||
Fabio Berchtold <jamesclonk@jamesclonk.ch>
|
||||
Fabio Huser <fabio@fh1.ch>
|
||||
farmerworking <farmerworking@gmail.com>
|
||||
Felix Yan <felixonmars@archlinux.org>
|
||||
Florentin Raud <florentin.raud@gmail.com>
|
||||
Frank Chen <frankchn@gmail.com>
|
||||
Frederick F. Kautz IV <fkautz@alumni.cmu.edu>
|
||||
gabriell nascimento <gabriell@bluesoft.com.br>
|
||||
Gleb Schukin <gschukin@ptsecurity.com>
|
||||
harche <p.harshal@gmail.com>
|
||||
Henri Gomez <henri.gomez@gmail.com>
|
||||
Hu Keping <hukeping@huawei.com>
|
||||
Hua Wang <wanghua.humble@gmail.com>
|
||||
HuKeping <hukeping@huawei.com>
|
||||
Ian Babrou <ibobrik@gmail.com>
|
||||
igayoso <igayoso@gmail.com>
|
||||
Jack Griffin <jackpg14@gmail.com>
|
||||
James Findley <jfindley@fastmail.com>
|
||||
Jason Freidman <jason.freidman@gmail.com>
|
||||
Jason Heiss <jheiss@aput.net>
|
||||
Jeff Nickoloff <jeff@allingeek.com>
|
||||
Jess Frazelle <acidburn@google.com>
|
||||
Jessie Frazelle <jessie@docker.com>
|
||||
jhaohai <jhaohai@foxmail.com>
|
||||
Jianqing Wang <tsing@jianqing.org>
|
||||
Jihoon Chung <jihoon@gmail.com>
|
||||
Joao Fernandes <joao.fernandes@docker.com>
|
||||
John Mulhausen <john@docker.com>
|
||||
John Starks <jostarks@microsoft.com>
|
||||
Jon Johnson <jonjohnson@google.com>
|
||||
Jon Poler <jonathan.poler@apcera.com>
|
||||
Jonathan Boulle <jonathanboulle@gmail.com>
|
||||
Jordan Liggitt <jliggitt@redhat.com>
|
||||
Josh Chorlton <josh.chorlton@docker.com>
|
||||
Josh Hawn <josh.hawn@docker.com>
|
||||
Julien Fernandez <julien.fernandez@gmail.com>
|
||||
Ke Xu <leonhartx.k@gmail.com>
|
||||
Keerthan Mala <kmala@engineyard.com>
|
||||
Kelsey Hightower <kelsey.hightower@gmail.com>
|
||||
Kenneth Lim <kennethlimcp@gmail.com>
|
||||
Kenny Leung <kleung@google.com>
|
||||
Li Yi <denverdino@gmail.com>
|
||||
Liu Hua <sdu.liu@huawei.com>
|
||||
liuchang0812 <liuchang0812@gmail.com>
|
||||
Lloyd Ramey <lnr0626@gmail.com>
|
||||
Louis Kottmann <louis.kottmann@gmail.com>
|
||||
Luke Carpenter <x@rubynerd.net>
|
||||
Marcus Martins <marcus@docker.com>
|
||||
Mary Anthony <mary@docker.com>
|
||||
Matt Bentley <mbentley@mbentley.net>
|
||||
Matt Duch <matt@learnmetrics.com>
|
||||
Matt Moore <mattmoor@google.com>
|
||||
Matt Robenolt <matt@ydekproductions.com>
|
||||
Matthew Green <greenmr@live.co.uk>
|
||||
Michael Prokop <mika@grml.org>
|
||||
Michal Minar <miminar@redhat.com>
|
||||
Michal Minář <miminar@redhat.com>
|
||||
Mike Brown <brownwm@us.ibm.com>
|
||||
Miquel Sabaté <msabate@suse.com>
|
||||
Misty Stanley-Jones <misty@apache.org>
|
||||
Misty Stanley-Jones <misty@docker.com>
|
||||
Morgan Bauer <mbauer@us.ibm.com>
|
||||
moxiegirl <mary@docker.com>
|
||||
Nathan Sullivan <nathan@nightsys.net>
|
||||
nevermosby <robolwq@qq.com>
|
||||
Nghia Tran <tcnghia@gmail.com>
|
||||
Nikita Tarasov <nikita@mygento.ru>
|
||||
Noah Treuhaft <noah.treuhaft@docker.com>
|
||||
Nuutti Kotivuori <nuutti.kotivuori@poplatek.fi>
|
||||
Oilbeater <liumengxinfly@gmail.com>
|
||||
Olivier Gambier <olivier@docker.com>
|
||||
Olivier Jacques <olivier.jacques@hp.com>
|
||||
Omer Cohen <git@omer.io>
|
||||
Patrick Devine <patrick.devine@docker.com>
|
||||
Phil Estes <estesp@linux.vnet.ibm.com>
|
||||
Philip Misiowiec <philip@atlashealth.com>
|
||||
Pierre-Yves Ritschard <pyr@spootnik.org>
|
||||
Qiao Anran <qiaoanran@gmail.com>
|
||||
Randy Barlow <randy@electronsweatshop.com>
|
||||
Richard Scothern <richard.scothern@docker.com>
|
||||
Rodolfo Carvalho <rhcarvalho@gmail.com>
|
||||
Rusty Conover <rusty@luckydinosaur.com>
|
||||
Sean Boran <Boran@users.noreply.github.com>
|
||||
Sebastiaan van Stijn <github@gone.nl>
|
||||
Sebastien Coavoux <s.coavoux@free.fr>
|
||||
Serge Dubrouski <sergeyfd@gmail.com>
|
||||
Sharif Nassar <sharif@mrwacky.com>
|
||||
Shawn Falkner-Horine <dreadpirateshawn@gmail.com>
|
||||
Shreyas Karnik <karnik.shreyas@gmail.com>
|
||||
Simon Thulbourn <simon+github@thulbourn.com>
|
||||
spacexnice <yaoyao.xyy@alibaba-inc.com>
|
||||
Spencer Rinehart <anubis@overthemonkey.com>
|
||||
Stan Hu <stanhu@gmail.com>
|
||||
Stefan Majewsky <stefan.majewsky@sap.com>
|
||||
Stefan Weil <sw@weilnetz.de>
|
||||
Stephen J Day <stephen.day@docker.com>
|
||||
Sungho Moon <sungho.moon@navercorp.com>
|
||||
Sven Dowideit <SvenDowideit@home.org.au>
|
||||
Sylvain Baubeau <sbaubeau@redhat.com>
|
||||
Ted Reed <ted.reed@gmail.com>
|
||||
tgic <farmer1992@gmail.com>
|
||||
Thomas Sjögren <konstruktoid@users.noreply.github.com>
|
||||
Tianon Gravi <admwiggin@gmail.com>
|
||||
Tibor Vass <teabee89@gmail.com>
|
||||
Tonis Tiigi <tonistiigi@gmail.com>
|
||||
Tony Holdstock-Brown <tony@docker.com>
|
||||
Trevor Pounds <trevor.pounds@gmail.com>
|
||||
Troels Thomsen <troels@thomsen.io>
|
||||
Victor Vieux <vieux@docker.com>
|
||||
Victoria Bialas <victoria.bialas@docker.com>
|
||||
Vincent Batts <vbatts@redhat.com>
|
||||
Vincent Demeester <vincent@sbr.pm>
|
||||
Vincent Giersch <vincent.giersch@ovh.net>
|
||||
W. Trevor King <wking@tremily.us>
|
||||
weiyuan.yl <weiyuan.yl@alibaba-inc.com>
|
||||
xg.song <xg.song@venusource.com>
|
||||
xiekeyang <xiekeyang@huawei.com>
|
||||
Yann ROBERT <yann.robert@anantaplex.fr>
|
||||
yaoyao.xyy <yaoyao.xyy@alibaba-inc.com>
|
||||
yuexiao-wang <wang.yuexiao@zte.com.cn>
|
||||
yuzou <zouyu7@huawei.com>
|
||||
zhouhaibing089 <zhouhaibing089@gmail.com>
|
||||
姜继忠 <jizhong.jiangjz@alibaba-inc.com>
|
202
vendor/github.com/docker/distribution/LICENSE
generated
vendored
Normal file
202
vendor/github.com/docker/distribution/LICENSE
generated
vendored
Normal file
@ -0,0 +1,202 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
139
vendor/github.com/docker/distribution/digest/digest.go
generated
vendored
Normal file
139
vendor/github.com/docker/distribution/digest/digest.go
generated
vendored
Normal file
@ -0,0 +1,139 @@
|
||||
package digest
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
// DigestSha256EmptyTar is the canonical sha256 digest of empty data
|
||||
DigestSha256EmptyTar = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
|
||||
)
|
||||
|
||||
// Digest allows simple protection of hex formatted digest strings, prefixed
|
||||
// by their algorithm. Strings of type Digest have some guarantee of being in
|
||||
// the correct format and it provides quick access to the components of a
|
||||
// digest string.
|
||||
//
|
||||
// The following is an example of the contents of Digest types:
|
||||
//
|
||||
// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc
|
||||
//
|
||||
// This allows to abstract the digest behind this type and work only in those
|
||||
// terms.
|
||||
type Digest string
|
||||
|
||||
// NewDigest returns a Digest from alg and a hash.Hash object.
|
||||
func NewDigest(alg Algorithm, h hash.Hash) Digest {
|
||||
return NewDigestFromBytes(alg, h.Sum(nil))
|
||||
}
|
||||
|
||||
// NewDigestFromBytes returns a new digest from the byte contents of p.
|
||||
// Typically, this can come from hash.Hash.Sum(...) or xxx.SumXXX(...)
|
||||
// functions. This is also useful for rebuilding digests from binary
|
||||
// serializations.
|
||||
func NewDigestFromBytes(alg Algorithm, p []byte) Digest {
|
||||
return Digest(fmt.Sprintf("%s:%x", alg, p))
|
||||
}
|
||||
|
||||
// NewDigestFromHex returns a Digest from alg and a the hex encoded digest.
|
||||
func NewDigestFromHex(alg, hex string) Digest {
|
||||
return Digest(fmt.Sprintf("%s:%s", alg, hex))
|
||||
}
|
||||
|
||||
// DigestRegexp matches valid digest types.
|
||||
var DigestRegexp = regexp.MustCompile(`[a-zA-Z0-9-_+.]+:[a-fA-F0-9]+`)
|
||||
|
||||
// DigestRegexpAnchored matches valid digest types, anchored to the start and end of the match.
|
||||
var DigestRegexpAnchored = regexp.MustCompile(`^` + DigestRegexp.String() + `$`)
|
||||
|
||||
var (
|
||||
// ErrDigestInvalidFormat returned when digest format invalid.
|
||||
ErrDigestInvalidFormat = fmt.Errorf("invalid checksum digest format")
|
||||
|
||||
// ErrDigestInvalidLength returned when digest has invalid length.
|
||||
ErrDigestInvalidLength = fmt.Errorf("invalid checksum digest length")
|
||||
|
||||
// ErrDigestUnsupported returned when the digest algorithm is unsupported.
|
||||
ErrDigestUnsupported = fmt.Errorf("unsupported digest algorithm")
|
||||
)
|
||||
|
||||
// ParseDigest parses s and returns the validated digest object. An error will
|
||||
// be returned if the format is invalid.
|
||||
func ParseDigest(s string) (Digest, error) {
|
||||
d := Digest(s)
|
||||
|
||||
return d, d.Validate()
|
||||
}
|
||||
|
||||
// FromReader returns the most valid digest for the underlying content using
|
||||
// the canonical digest algorithm.
|
||||
func FromReader(rd io.Reader) (Digest, error) {
|
||||
return Canonical.FromReader(rd)
|
||||
}
|
||||
|
||||
// FromBytes digests the input and returns a Digest.
|
||||
func FromBytes(p []byte) Digest {
|
||||
return Canonical.FromBytes(p)
|
||||
}
|
||||
|
||||
// Validate checks that the contents of d is a valid digest, returning an
|
||||
// error if not.
|
||||
func (d Digest) Validate() error {
|
||||
s := string(d)
|
||||
|
||||
if !DigestRegexpAnchored.MatchString(s) {
|
||||
return ErrDigestInvalidFormat
|
||||
}
|
||||
|
||||
i := strings.Index(s, ":")
|
||||
if i < 0 {
|
||||
return ErrDigestInvalidFormat
|
||||
}
|
||||
|
||||
// case: "sha256:" with no hex.
|
||||
if i+1 == len(s) {
|
||||
return ErrDigestInvalidFormat
|
||||
}
|
||||
|
||||
switch algorithm := Algorithm(s[:i]); algorithm {
|
||||
case SHA256, SHA384, SHA512:
|
||||
if algorithm.Size()*2 != len(s[i+1:]) {
|
||||
return ErrDigestInvalidLength
|
||||
}
|
||||
break
|
||||
default:
|
||||
return ErrDigestUnsupported
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Algorithm returns the algorithm portion of the digest. This will panic if
|
||||
// the underlying digest is not in a valid format.
|
||||
func (d Digest) Algorithm() Algorithm {
|
||||
return Algorithm(d[:d.sepIndex()])
|
||||
}
|
||||
|
||||
// Hex returns the hex digest portion of the digest. This will panic if the
|
||||
// underlying digest is not in a valid format.
|
||||
func (d Digest) Hex() string {
|
||||
return string(d[d.sepIndex()+1:])
|
||||
}
|
||||
|
||||
func (d Digest) String() string {
|
||||
return string(d)
|
||||
}
|
||||
|
||||
func (d Digest) sepIndex() int {
|
||||
i := strings.Index(string(d), ":")
|
||||
|
||||
if i < 0 {
|
||||
panic("could not find ':' in digest: " + d)
|
||||
}
|
||||
|
||||
return i
|
||||
}
|
155
vendor/github.com/docker/distribution/digest/digester.go
generated
vendored
Normal file
155
vendor/github.com/docker/distribution/digest/digester.go
generated
vendored
Normal file
@ -0,0 +1,155 @@
|
||||
package digest
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
)
|
||||
|
||||
// Algorithm identifies and implementation of a digester by an identifier.
|
||||
// Note the that this defines both the hash algorithm used and the string
|
||||
// encoding.
|
||||
type Algorithm string
|
||||
|
||||
// supported digest types
|
||||
const (
|
||||
SHA256 Algorithm = "sha256" // sha256 with hex encoding
|
||||
SHA384 Algorithm = "sha384" // sha384 with hex encoding
|
||||
SHA512 Algorithm = "sha512" // sha512 with hex encoding
|
||||
|
||||
// Canonical is the primary digest algorithm used with the distribution
|
||||
// project. Other digests may be used but this one is the primary storage
|
||||
// digest.
|
||||
Canonical = SHA256
|
||||
)
|
||||
|
||||
var (
|
||||
// TODO(stevvooe): Follow the pattern of the standard crypto package for
|
||||
// registration of digests. Effectively, we are a registerable set and
|
||||
// common symbol access.
|
||||
|
||||
// algorithms maps values to hash.Hash implementations. Other algorithms
|
||||
// may be available but they cannot be calculated by the digest package.
|
||||
algorithms = map[Algorithm]crypto.Hash{
|
||||
SHA256: crypto.SHA256,
|
||||
SHA384: crypto.SHA384,
|
||||
SHA512: crypto.SHA512,
|
||||
}
|
||||
)
|
||||
|
||||
// Available returns true if the digest type is available for use. If this
|
||||
// returns false, New and Hash will return nil.
|
||||
func (a Algorithm) Available() bool {
|
||||
h, ok := algorithms[a]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
// check availability of the hash, as well
|
||||
return h.Available()
|
||||
}
|
||||
|
||||
func (a Algorithm) String() string {
|
||||
return string(a)
|
||||
}
|
||||
|
||||
// Size returns number of bytes returned by the hash.
|
||||
func (a Algorithm) Size() int {
|
||||
h, ok := algorithms[a]
|
||||
if !ok {
|
||||
return 0
|
||||
}
|
||||
return h.Size()
|
||||
}
|
||||
|
||||
// Set implemented to allow use of Algorithm as a command line flag.
|
||||
func (a *Algorithm) Set(value string) error {
|
||||
if value == "" {
|
||||
*a = Canonical
|
||||
} else {
|
||||
// just do a type conversion, support is queried with Available.
|
||||
*a = Algorithm(value)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// New returns a new digester for the specified algorithm. If the algorithm
|
||||
// does not have a digester implementation, nil will be returned. This can be
|
||||
// checked by calling Available before calling New.
|
||||
func (a Algorithm) New() Digester {
|
||||
return &digester{
|
||||
alg: a,
|
||||
hash: a.Hash(),
|
||||
}
|
||||
}
|
||||
|
||||
// Hash returns a new hash as used by the algorithm. If not available, the
|
||||
// method will panic. Check Algorithm.Available() before calling.
|
||||
func (a Algorithm) Hash() hash.Hash {
|
||||
if !a.Available() {
|
||||
// NOTE(stevvooe): A missing hash is usually a programming error that
|
||||
// must be resolved at compile time. We don't import in the digest
|
||||
// package to allow users to choose their hash implementation (such as
|
||||
// when using stevvooe/resumable or a hardware accelerated package).
|
||||
//
|
||||
// Applications that may want to resolve the hash at runtime should
|
||||
// call Algorithm.Available before call Algorithm.Hash().
|
||||
panic(fmt.Sprintf("%v not available (make sure it is imported)", a))
|
||||
}
|
||||
|
||||
return algorithms[a].New()
|
||||
}
|
||||
|
||||
// FromReader returns the digest of the reader using the algorithm.
|
||||
func (a Algorithm) FromReader(rd io.Reader) (Digest, error) {
|
||||
digester := a.New()
|
||||
|
||||
if _, err := io.Copy(digester.Hash(), rd); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return digester.Digest(), nil
|
||||
}
|
||||
|
||||
// FromBytes digests the input and returns a Digest.
|
||||
func (a Algorithm) FromBytes(p []byte) Digest {
|
||||
digester := a.New()
|
||||
|
||||
if _, err := digester.Hash().Write(p); err != nil {
|
||||
// Writes to a Hash should never fail. None of the existing
|
||||
// hash implementations in the stdlib or hashes vendored
|
||||
// here can return errors from Write. Having a panic in this
|
||||
// condition instead of having FromBytes return an error value
|
||||
// avoids unnecessary error handling paths in all callers.
|
||||
panic("write to hash function returned error: " + err.Error())
|
||||
}
|
||||
|
||||
return digester.Digest()
|
||||
}
|
||||
|
||||
// TODO(stevvooe): Allow resolution of verifiers using the digest type and
|
||||
// this registration system.
|
||||
|
||||
// Digester calculates the digest of written data. Writes should go directly
|
||||
// to the return value of Hash, while calling Digest will return the current
|
||||
// value of the digest.
|
||||
type Digester interface {
|
||||
Hash() hash.Hash // provides direct access to underlying hash instance.
|
||||
Digest() Digest
|
||||
}
|
||||
|
||||
// digester provides a simple digester definition that embeds a hasher.
|
||||
type digester struct {
|
||||
alg Algorithm
|
||||
hash hash.Hash
|
||||
}
|
||||
|
||||
func (d *digester) Hash() hash.Hash {
|
||||
return d.hash
|
||||
}
|
||||
|
||||
func (d *digester) Digest() Digest {
|
||||
return NewDigest(d.alg, d.hash)
|
||||
}
|
42
vendor/github.com/docker/distribution/digest/doc.go
generated
vendored
Normal file
42
vendor/github.com/docker/distribution/digest/doc.go
generated
vendored
Normal file
@ -0,0 +1,42 @@
|
||||
// Package digest provides a generalized type to opaquely represent message
|
||||
// digests and their operations within the registry. The Digest type is
|
||||
// designed to serve as a flexible identifier in a content-addressable system.
|
||||
// More importantly, it provides tools and wrappers to work with
|
||||
// hash.Hash-based digests with little effort.
|
||||
//
|
||||
// Basics
|
||||
//
|
||||
// The format of a digest is simply a string with two parts, dubbed the
|
||||
// "algorithm" and the "digest", separated by a colon:
|
||||
//
|
||||
// <algorithm>:<digest>
|
||||
//
|
||||
// An example of a sha256 digest representation follows:
|
||||
//
|
||||
// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc
|
||||
//
|
||||
// In this case, the string "sha256" is the algorithm and the hex bytes are
|
||||
// the "digest".
|
||||
//
|
||||
// Because the Digest type is simply a string, once a valid Digest is
|
||||
// obtained, comparisons are cheap, quick and simple to express with the
|
||||
// standard equality operator.
|
||||
//
|
||||
// Verification
|
||||
//
|
||||
// The main benefit of using the Digest type is simple verification against a
|
||||
// given digest. The Verifier interface, modeled after the stdlib hash.Hash
|
||||
// interface, provides a common write sink for digest verification. After
|
||||
// writing is complete, calling the Verifier.Verified method will indicate
|
||||
// whether or not the stream of bytes matches the target digest.
|
||||
//
|
||||
// Missing Features
|
||||
//
|
||||
// In addition to the above, we intend to add the following features to this
|
||||
// package:
|
||||
//
|
||||
// 1. A Digester type that supports write sink digest calculation.
|
||||
//
|
||||
// 2. Suspend and resume of ongoing digest calculations to support efficient digest verification in the registry.
|
||||
//
|
||||
package digest
|
245
vendor/github.com/docker/distribution/digest/set.go
generated
vendored
Normal file
245
vendor/github.com/docker/distribution/digest/set.go
generated
vendored
Normal file
@ -0,0 +1,245 @@
|
||||
package digest
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrDigestNotFound is used when a matching digest
|
||||
// could not be found in a set.
|
||||
ErrDigestNotFound = errors.New("digest not found")
|
||||
|
||||
// ErrDigestAmbiguous is used when multiple digests
|
||||
// are found in a set. None of the matching digests
|
||||
// should be considered valid matches.
|
||||
ErrDigestAmbiguous = errors.New("ambiguous digest string")
|
||||
)
|
||||
|
||||
// Set is used to hold a unique set of digests which
|
||||
// may be easily referenced by easily referenced by a string
|
||||
// representation of the digest as well as short representation.
|
||||
// The uniqueness of the short representation is based on other
|
||||
// digests in the set. If digests are omitted from this set,
|
||||
// collisions in a larger set may not be detected, therefore it
|
||||
// is important to always do short representation lookups on
|
||||
// the complete set of digests. To mitigate collisions, an
|
||||
// appropriately long short code should be used.
|
||||
type Set struct {
|
||||
mutex sync.RWMutex
|
||||
entries digestEntries
|
||||
}
|
||||
|
||||
// NewSet creates an empty set of digests
|
||||
// which may have digests added.
|
||||
func NewSet() *Set {
|
||||
return &Set{
|
||||
entries: digestEntries{},
|
||||
}
|
||||
}
|
||||
|
||||
// checkShortMatch checks whether two digests match as either whole
|
||||
// values or short values. This function does not test equality,
|
||||
// rather whether the second value could match against the first
|
||||
// value.
|
||||
func checkShortMatch(alg Algorithm, hex, shortAlg, shortHex string) bool {
|
||||
if len(hex) == len(shortHex) {
|
||||
if hex != shortHex {
|
||||
return false
|
||||
}
|
||||
if len(shortAlg) > 0 && string(alg) != shortAlg {
|
||||
return false
|
||||
}
|
||||
} else if !strings.HasPrefix(hex, shortHex) {
|
||||
return false
|
||||
} else if len(shortAlg) > 0 && string(alg) != shortAlg {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Lookup looks for a digest matching the given string representation.
|
||||
// If no digests could be found ErrDigestNotFound will be returned
|
||||
// with an empty digest value. If multiple matches are found
|
||||
// ErrDigestAmbiguous will be returned with an empty digest value.
|
||||
func (dst *Set) Lookup(d string) (Digest, error) {
|
||||
dst.mutex.RLock()
|
||||
defer dst.mutex.RUnlock()
|
||||
if len(dst.entries) == 0 {
|
||||
return "", ErrDigestNotFound
|
||||
}
|
||||
var (
|
||||
searchFunc func(int) bool
|
||||
alg Algorithm
|
||||
hex string
|
||||
)
|
||||
dgst, err := ParseDigest(d)
|
||||
if err == ErrDigestInvalidFormat {
|
||||
hex = d
|
||||
searchFunc = func(i int) bool {
|
||||
return dst.entries[i].val >= d
|
||||
}
|
||||
} else {
|
||||
hex = dgst.Hex()
|
||||
alg = dgst.Algorithm()
|
||||
searchFunc = func(i int) bool {
|
||||
if dst.entries[i].val == hex {
|
||||
return dst.entries[i].alg >= alg
|
||||
}
|
||||
return dst.entries[i].val >= hex
|
||||
}
|
||||
}
|
||||
idx := sort.Search(len(dst.entries), searchFunc)
|
||||
if idx == len(dst.entries) || !checkShortMatch(dst.entries[idx].alg, dst.entries[idx].val, string(alg), hex) {
|
||||
return "", ErrDigestNotFound
|
||||
}
|
||||
if dst.entries[idx].alg == alg && dst.entries[idx].val == hex {
|
||||
return dst.entries[idx].digest, nil
|
||||
}
|
||||
if idx+1 < len(dst.entries) && checkShortMatch(dst.entries[idx+1].alg, dst.entries[idx+1].val, string(alg), hex) {
|
||||
return "", ErrDigestAmbiguous
|
||||
}
|
||||
|
||||
return dst.entries[idx].digest, nil
|
||||
}
|
||||
|
||||
// Add adds the given digest to the set. An error will be returned
|
||||
// if the given digest is invalid. If the digest already exists in the
|
||||
// set, this operation will be a no-op.
|
||||
func (dst *Set) Add(d Digest) error {
|
||||
if err := d.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
dst.mutex.Lock()
|
||||
defer dst.mutex.Unlock()
|
||||
entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d}
|
||||
searchFunc := func(i int) bool {
|
||||
if dst.entries[i].val == entry.val {
|
||||
return dst.entries[i].alg >= entry.alg
|
||||
}
|
||||
return dst.entries[i].val >= entry.val
|
||||
}
|
||||
idx := sort.Search(len(dst.entries), searchFunc)
|
||||
if idx == len(dst.entries) {
|
||||
dst.entries = append(dst.entries, entry)
|
||||
return nil
|
||||
} else if dst.entries[idx].digest == d {
|
||||
return nil
|
||||
}
|
||||
|
||||
entries := append(dst.entries, nil)
|
||||
copy(entries[idx+1:], entries[idx:len(entries)-1])
|
||||
entries[idx] = entry
|
||||
dst.entries = entries
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove removes the given digest from the set. An err will be
|
||||
// returned if the given digest is invalid. If the digest does
|
||||
// not exist in the set, this operation will be a no-op.
|
||||
func (dst *Set) Remove(d Digest) error {
|
||||
if err := d.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
dst.mutex.Lock()
|
||||
defer dst.mutex.Unlock()
|
||||
entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d}
|
||||
searchFunc := func(i int) bool {
|
||||
if dst.entries[i].val == entry.val {
|
||||
return dst.entries[i].alg >= entry.alg
|
||||
}
|
||||
return dst.entries[i].val >= entry.val
|
||||
}
|
||||
idx := sort.Search(len(dst.entries), searchFunc)
|
||||
// Not found if idx is after or value at idx is not digest
|
||||
if idx == len(dst.entries) || dst.entries[idx].digest != d {
|
||||
return nil
|
||||
}
|
||||
|
||||
entries := dst.entries
|
||||
copy(entries[idx:], entries[idx+1:])
|
||||
entries = entries[:len(entries)-1]
|
||||
dst.entries = entries
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// All returns all the digests in the set
|
||||
func (dst *Set) All() []Digest {
|
||||
dst.mutex.RLock()
|
||||
defer dst.mutex.RUnlock()
|
||||
retValues := make([]Digest, len(dst.entries))
|
||||
for i := range dst.entries {
|
||||
retValues[i] = dst.entries[i].digest
|
||||
}
|
||||
|
||||
return retValues
|
||||
}
|
||||
|
||||
// ShortCodeTable returns a map of Digest to unique short codes. The
|
||||
// length represents the minimum value, the maximum length may be the
|
||||
// entire value of digest if uniqueness cannot be achieved without the
|
||||
// full value. This function will attempt to make short codes as short
|
||||
// as possible to be unique.
|
||||
func ShortCodeTable(dst *Set, length int) map[Digest]string {
|
||||
dst.mutex.RLock()
|
||||
defer dst.mutex.RUnlock()
|
||||
m := make(map[Digest]string, len(dst.entries))
|
||||
l := length
|
||||
resetIdx := 0
|
||||
for i := 0; i < len(dst.entries); i++ {
|
||||
var short string
|
||||
extended := true
|
||||
for extended {
|
||||
extended = false
|
||||
if len(dst.entries[i].val) <= l {
|
||||
short = dst.entries[i].digest.String()
|
||||
} else {
|
||||
short = dst.entries[i].val[:l]
|
||||
for j := i + 1; j < len(dst.entries); j++ {
|
||||
if checkShortMatch(dst.entries[j].alg, dst.entries[j].val, "", short) {
|
||||
if j > resetIdx {
|
||||
resetIdx = j
|
||||
}
|
||||
extended = true
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
if extended {
|
||||
l++
|
||||
}
|
||||
}
|
||||
}
|
||||
m[dst.entries[i].digest] = short
|
||||
if i >= resetIdx {
|
||||
l = length
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
type digestEntry struct {
|
||||
alg Algorithm
|
||||
val string
|
||||
digest Digest
|
||||
}
|
||||
|
||||
type digestEntries []*digestEntry
|
||||
|
||||
func (d digestEntries) Len() int {
|
||||
return len(d)
|
||||
}
|
||||
|
||||
func (d digestEntries) Less(i, j int) bool {
|
||||
if d[i].val != d[j].val {
|
||||
return d[i].val < d[j].val
|
||||
}
|
||||
return d[i].alg < d[j].alg
|
||||
}
|
||||
|
||||
func (d digestEntries) Swap(i, j int) {
|
||||
d[i], d[j] = d[j], d[i]
|
||||
}
|
44
vendor/github.com/docker/distribution/digest/verifiers.go
generated
vendored
Normal file
44
vendor/github.com/docker/distribution/digest/verifiers.go
generated
vendored
Normal file
@ -0,0 +1,44 @@
|
||||
package digest
|
||||
|
||||
import (
|
||||
"hash"
|
||||
"io"
|
||||
)
|
||||
|
||||
// Verifier presents a general verification interface to be used with message
|
||||
// digests and other byte stream verifications. Users instantiate a Verifier
|
||||
// from one of the various methods, write the data under test to it then check
|
||||
// the result with the Verified method.
|
||||
type Verifier interface {
|
||||
io.Writer
|
||||
|
||||
// Verified will return true if the content written to Verifier matches
|
||||
// the digest.
|
||||
Verified() bool
|
||||
}
|
||||
|
||||
// NewDigestVerifier returns a verifier that compares the written bytes
|
||||
// against a passed in digest.
|
||||
func NewDigestVerifier(d Digest) (Verifier, error) {
|
||||
if err := d.Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return hashVerifier{
|
||||
hash: d.Algorithm().Hash(),
|
||||
digest: d,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type hashVerifier struct {
|
||||
digest Digest
|
||||
hash hash.Hash
|
||||
}
|
||||
|
||||
func (hv hashVerifier) Write(p []byte) (n int, err error) {
|
||||
return hv.hash.Write(p)
|
||||
}
|
||||
|
||||
func (hv hashVerifier) Verified() bool {
|
||||
return hv.digest == NewDigest(hv.digest.Algorithm(), hv.hash)
|
||||
}
|
370
vendor/github.com/docker/distribution/reference/reference.go
generated
vendored
Normal file
370
vendor/github.com/docker/distribution/reference/reference.go
generated
vendored
Normal file
@ -0,0 +1,370 @@
|
||||
// Package reference provides a general type to represent any way of referencing images within the registry.
|
||||
// Its main purpose is to abstract tags and digests (content-addressable hash).
|
||||
//
|
||||
// Grammar
|
||||
//
|
||||
// reference := name [ ":" tag ] [ "@" digest ]
|
||||
// name := [hostname '/'] component ['/' component]*
|
||||
// hostname := hostcomponent ['.' hostcomponent]* [':' port-number]
|
||||
// hostcomponent := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/
|
||||
// port-number := /[0-9]+/
|
||||
// component := alpha-numeric [separator alpha-numeric]*
|
||||
// alpha-numeric := /[a-z0-9]+/
|
||||
// separator := /[_.]|__|[-]*/
|
||||
//
|
||||
// tag := /[\w][\w.-]{0,127}/
|
||||
//
|
||||
// digest := digest-algorithm ":" digest-hex
|
||||
// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]
|
||||
// digest-algorithm-separator := /[+.-_]/
|
||||
// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/
|
||||
// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value
|
||||
package reference
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/distribution/digest"
|
||||
)
|
||||
|
||||
const (
|
||||
// NameTotalLengthMax is the maximum total number of characters in a repository name.
|
||||
NameTotalLengthMax = 255
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference.
|
||||
ErrReferenceInvalidFormat = errors.New("invalid reference format")
|
||||
|
||||
// ErrTagInvalidFormat represents an error while trying to parse a string as a tag.
|
||||
ErrTagInvalidFormat = errors.New("invalid tag format")
|
||||
|
||||
// ErrDigestInvalidFormat represents an error while trying to parse a string as a tag.
|
||||
ErrDigestInvalidFormat = errors.New("invalid digest format")
|
||||
|
||||
// ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters.
|
||||
ErrNameContainsUppercase = errors.New("repository name must be lowercase")
|
||||
|
||||
// ErrNameEmpty is returned for empty, invalid repository names.
|
||||
ErrNameEmpty = errors.New("repository name must have at least one component")
|
||||
|
||||
// ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax.
|
||||
ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax)
|
||||
)
|
||||
|
||||
// Reference is an opaque object reference identifier that may include
|
||||
// modifiers such as a hostname, name, tag, and digest.
|
||||
type Reference interface {
|
||||
// String returns the full reference
|
||||
String() string
|
||||
}
|
||||
|
||||
// Field provides a wrapper type for resolving correct reference types when
|
||||
// working with encoding.
|
||||
type Field struct {
|
||||
reference Reference
|
||||
}
|
||||
|
||||
// AsField wraps a reference in a Field for encoding.
|
||||
func AsField(reference Reference) Field {
|
||||
return Field{reference}
|
||||
}
|
||||
|
||||
// Reference unwraps the reference type from the field to
|
||||
// return the Reference object. This object should be
|
||||
// of the appropriate type to further check for different
|
||||
// reference types.
|
||||
func (f Field) Reference() Reference {
|
||||
return f.reference
|
||||
}
|
||||
|
||||
// MarshalText serializes the field to byte text which
|
||||
// is the string of the reference.
|
||||
func (f Field) MarshalText() (p []byte, err error) {
|
||||
return []byte(f.reference.String()), nil
|
||||
}
|
||||
|
||||
// UnmarshalText parses text bytes by invoking the
|
||||
// reference parser to ensure the appropriately
|
||||
// typed reference object is wrapped by field.
|
||||
func (f *Field) UnmarshalText(p []byte) error {
|
||||
r, err := Parse(string(p))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f.reference = r
|
||||
return nil
|
||||
}
|
||||
|
||||
// Named is an object with a full name
|
||||
type Named interface {
|
||||
Reference
|
||||
Name() string
|
||||
}
|
||||
|
||||
// Tagged is an object which has a tag
|
||||
type Tagged interface {
|
||||
Reference
|
||||
Tag() string
|
||||
}
|
||||
|
||||
// NamedTagged is an object including a name and tag.
|
||||
type NamedTagged interface {
|
||||
Named
|
||||
Tag() string
|
||||
}
|
||||
|
||||
// Digested is an object which has a digest
|
||||
// in which it can be referenced by
|
||||
type Digested interface {
|
||||
Reference
|
||||
Digest() digest.Digest
|
||||
}
|
||||
|
||||
// Canonical reference is an object with a fully unique
|
||||
// name including a name with hostname and digest
|
||||
type Canonical interface {
|
||||
Named
|
||||
Digest() digest.Digest
|
||||
}
|
||||
|
||||
// SplitHostname splits a named reference into a
|
||||
// hostname and name string. If no valid hostname is
|
||||
// found, the hostname is empty and the full value
|
||||
// is returned as name
|
||||
func SplitHostname(named Named) (string, string) {
|
||||
name := named.Name()
|
||||
match := anchoredNameRegexp.FindStringSubmatch(name)
|
||||
if len(match) != 3 {
|
||||
return "", name
|
||||
}
|
||||
return match[1], match[2]
|
||||
}
|
||||
|
||||
// Parse parses s and returns a syntactically valid Reference.
|
||||
// If an error was encountered it is returned, along with a nil Reference.
|
||||
// NOTE: Parse will not handle short digests.
|
||||
func Parse(s string) (Reference, error) {
|
||||
matches := ReferenceRegexp.FindStringSubmatch(s)
|
||||
if matches == nil {
|
||||
if s == "" {
|
||||
return nil, ErrNameEmpty
|
||||
}
|
||||
if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil {
|
||||
return nil, ErrNameContainsUppercase
|
||||
}
|
||||
return nil, ErrReferenceInvalidFormat
|
||||
}
|
||||
|
||||
if len(matches[1]) > NameTotalLengthMax {
|
||||
return nil, ErrNameTooLong
|
||||
}
|
||||
|
||||
ref := reference{
|
||||
name: matches[1],
|
||||
tag: matches[2],
|
||||
}
|
||||
if matches[3] != "" {
|
||||
var err error
|
||||
ref.digest, err = digest.ParseDigest(matches[3])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
r := getBestReferenceType(ref)
|
||||
if r == nil {
|
||||
return nil, ErrNameEmpty
|
||||
}
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// ParseNamed parses s and returns a syntactically valid reference implementing
|
||||
// the Named interface. The reference must have a name, otherwise an error is
|
||||
// returned.
|
||||
// If an error was encountered it is returned, along with a nil Reference.
|
||||
// NOTE: ParseNamed will not handle short digests.
|
||||
func ParseNamed(s string) (Named, error) {
|
||||
ref, err := Parse(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
named, isNamed := ref.(Named)
|
||||
if !isNamed {
|
||||
return nil, fmt.Errorf("reference %s has no name", ref.String())
|
||||
}
|
||||
return named, nil
|
||||
}
|
||||
|
||||
// WithName returns a named object representing the given string. If the input
|
||||
// is invalid ErrReferenceInvalidFormat will be returned.
|
||||
func WithName(name string) (Named, error) {
|
||||
if len(name) > NameTotalLengthMax {
|
||||
return nil, ErrNameTooLong
|
||||
}
|
||||
if !anchoredNameRegexp.MatchString(name) {
|
||||
return nil, ErrReferenceInvalidFormat
|
||||
}
|
||||
return repository(name), nil
|
||||
}
|
||||
|
||||
// WithTag combines the name from "name" and the tag from "tag" to form a
|
||||
// reference incorporating both the name and the tag.
|
||||
func WithTag(name Named, tag string) (NamedTagged, error) {
|
||||
if !anchoredTagRegexp.MatchString(tag) {
|
||||
return nil, ErrTagInvalidFormat
|
||||
}
|
||||
if canonical, ok := name.(Canonical); ok {
|
||||
return reference{
|
||||
name: name.Name(),
|
||||
tag: tag,
|
||||
digest: canonical.Digest(),
|
||||
}, nil
|
||||
}
|
||||
return taggedReference{
|
||||
name: name.Name(),
|
||||
tag: tag,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// WithDigest combines the name from "name" and the digest from "digest" to form
|
||||
// a reference incorporating both the name and the digest.
|
||||
func WithDigest(name Named, digest digest.Digest) (Canonical, error) {
|
||||
if !anchoredDigestRegexp.MatchString(digest.String()) {
|
||||
return nil, ErrDigestInvalidFormat
|
||||
}
|
||||
if tagged, ok := name.(Tagged); ok {
|
||||
return reference{
|
||||
name: name.Name(),
|
||||
tag: tagged.Tag(),
|
||||
digest: digest,
|
||||
}, nil
|
||||
}
|
||||
return canonicalReference{
|
||||
name: name.Name(),
|
||||
digest: digest,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Match reports whether ref matches the specified pattern.
|
||||
// See https://godoc.org/path#Match for supported patterns.
|
||||
func Match(pattern string, ref Reference) (bool, error) {
|
||||
matched, err := path.Match(pattern, ref.String())
|
||||
if namedRef, isNamed := ref.(Named); isNamed && !matched {
|
||||
matched, _ = path.Match(pattern, namedRef.Name())
|
||||
}
|
||||
return matched, err
|
||||
}
|
||||
|
||||
// TrimNamed removes any tag or digest from the named reference.
|
||||
func TrimNamed(ref Named) Named {
|
||||
return repository(ref.Name())
|
||||
}
|
||||
|
||||
func getBestReferenceType(ref reference) Reference {
|
||||
if ref.name == "" {
|
||||
// Allow digest only references
|
||||
if ref.digest != "" {
|
||||
return digestReference(ref.digest)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if ref.tag == "" {
|
||||
if ref.digest != "" {
|
||||
return canonicalReference{
|
||||
name: ref.name,
|
||||
digest: ref.digest,
|
||||
}
|
||||
}
|
||||
return repository(ref.name)
|
||||
}
|
||||
if ref.digest == "" {
|
||||
return taggedReference{
|
||||
name: ref.name,
|
||||
tag: ref.tag,
|
||||
}
|
||||
}
|
||||
|
||||
return ref
|
||||
}
|
||||
|
||||
type reference struct {
|
||||
name string
|
||||
tag string
|
||||
digest digest.Digest
|
||||
}
|
||||
|
||||
func (r reference) String() string {
|
||||
return r.name + ":" + r.tag + "@" + r.digest.String()
|
||||
}
|
||||
|
||||
func (r reference) Name() string {
|
||||
return r.name
|
||||
}
|
||||
|
||||
func (r reference) Tag() string {
|
||||
return r.tag
|
||||
}
|
||||
|
||||
func (r reference) Digest() digest.Digest {
|
||||
return r.digest
|
||||
}
|
||||
|
||||
type repository string
|
||||
|
||||
func (r repository) String() string {
|
||||
return string(r)
|
||||
}
|
||||
|
||||
func (r repository) Name() string {
|
||||
return string(r)
|
||||
}
|
||||
|
||||
type digestReference digest.Digest
|
||||
|
||||
func (d digestReference) String() string {
|
||||
return d.String()
|
||||
}
|
||||
|
||||
func (d digestReference) Digest() digest.Digest {
|
||||
return digest.Digest(d)
|
||||
}
|
||||
|
||||
type taggedReference struct {
|
||||
name string
|
||||
tag string
|
||||
}
|
||||
|
||||
func (t taggedReference) String() string {
|
||||
return t.name + ":" + t.tag
|
||||
}
|
||||
|
||||
func (t taggedReference) Name() string {
|
||||
return t.name
|
||||
}
|
||||
|
||||
func (t taggedReference) Tag() string {
|
||||
return t.tag
|
||||
}
|
||||
|
||||
type canonicalReference struct {
|
||||
name string
|
||||
digest digest.Digest
|
||||
}
|
||||
|
||||
func (c canonicalReference) String() string {
|
||||
return c.name + "@" + c.digest.String()
|
||||
}
|
||||
|
||||
func (c canonicalReference) Name() string {
|
||||
return c.name
|
||||
}
|
||||
|
||||
func (c canonicalReference) Digest() digest.Digest {
|
||||
return c.digest
|
||||
}
|
124
vendor/github.com/docker/distribution/reference/regexp.go
generated
vendored
Normal file
124
vendor/github.com/docker/distribution/reference/regexp.go
generated
vendored
Normal file
@ -0,0 +1,124 @@
|
||||
package reference
|
||||
|
||||
import "regexp"
|
||||
|
||||
var (
|
||||
// alphaNumericRegexp defines the alpha numeric atom, typically a
|
||||
// component of names. This only allows lower case characters and digits.
|
||||
alphaNumericRegexp = match(`[a-z0-9]+`)
|
||||
|
||||
// separatorRegexp defines the separators allowed to be embedded in name
|
||||
// components. This allow one period, one or two underscore and multiple
|
||||
// dashes.
|
||||
separatorRegexp = match(`(?:[._]|__|[-]*)`)
|
||||
|
||||
// nameComponentRegexp restricts registry path component names to start
|
||||
// with at least one letter or number, with following parts able to be
|
||||
// separated by one period, one or two underscore and multiple dashes.
|
||||
nameComponentRegexp = expression(
|
||||
alphaNumericRegexp,
|
||||
optional(repeated(separatorRegexp, alphaNumericRegexp)))
|
||||
|
||||
// hostnameComponentRegexp restricts the registry hostname component of a
|
||||
// repository name to start with a component as defined by hostnameRegexp
|
||||
// and followed by an optional port.
|
||||
hostnameComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`)
|
||||
|
||||
// hostnameRegexp defines the structure of potential hostname components
|
||||
// that may be part of image names. This is purposely a subset of what is
|
||||
// allowed by DNS to ensure backwards compatibility with Docker image
|
||||
// names.
|
||||
hostnameRegexp = expression(
|
||||
hostnameComponentRegexp,
|
||||
optional(repeated(literal(`.`), hostnameComponentRegexp)),
|
||||
optional(literal(`:`), match(`[0-9]+`)))
|
||||
|
||||
// TagRegexp matches valid tag names. From docker/docker:graph/tags.go.
|
||||
TagRegexp = match(`[\w][\w.-]{0,127}`)
|
||||
|
||||
// anchoredTagRegexp matches valid tag names, anchored at the start and
|
||||
// end of the matched string.
|
||||
anchoredTagRegexp = anchored(TagRegexp)
|
||||
|
||||
// DigestRegexp matches valid digests.
|
||||
DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`)
|
||||
|
||||
// anchoredDigestRegexp matches valid digests, anchored at the start and
|
||||
// end of the matched string.
|
||||
anchoredDigestRegexp = anchored(DigestRegexp)
|
||||
|
||||
// NameRegexp is the format for the name component of references. The
|
||||
// regexp has capturing groups for the hostname and name part omitting
|
||||
// the separating forward slash from either.
|
||||
NameRegexp = expression(
|
||||
optional(hostnameRegexp, literal(`/`)),
|
||||
nameComponentRegexp,
|
||||
optional(repeated(literal(`/`), nameComponentRegexp)))
|
||||
|
||||
// anchoredNameRegexp is used to parse a name value, capturing the
|
||||
// hostname and trailing components.
|
||||
anchoredNameRegexp = anchored(
|
||||
optional(capture(hostnameRegexp), literal(`/`)),
|
||||
capture(nameComponentRegexp,
|
||||
optional(repeated(literal(`/`), nameComponentRegexp))))
|
||||
|
||||
// ReferenceRegexp is the full supported format of a reference. The regexp
|
||||
// is anchored and has capturing groups for name, tag, and digest
|
||||
// components.
|
||||
ReferenceRegexp = anchored(capture(NameRegexp),
|
||||
optional(literal(":"), capture(TagRegexp)),
|
||||
optional(literal("@"), capture(DigestRegexp)))
|
||||
)
|
||||
|
||||
// match compiles the string to a regular expression.
|
||||
var match = regexp.MustCompile
|
||||
|
||||
// literal compiles s into a literal regular expression, escaping any regexp
|
||||
// reserved characters.
|
||||
func literal(s string) *regexp.Regexp {
|
||||
re := match(regexp.QuoteMeta(s))
|
||||
|
||||
if _, complete := re.LiteralPrefix(); !complete {
|
||||
panic("must be a literal")
|
||||
}
|
||||
|
||||
return re
|
||||
}
|
||||
|
||||
// expression defines a full expression, where each regular expression must
|
||||
// follow the previous.
|
||||
func expression(res ...*regexp.Regexp) *regexp.Regexp {
|
||||
var s string
|
||||
for _, re := range res {
|
||||
s += re.String()
|
||||
}
|
||||
|
||||
return match(s)
|
||||
}
|
||||
|
||||
// optional wraps the expression in a non-capturing group and makes the
|
||||
// production optional.
|
||||
func optional(res ...*regexp.Regexp) *regexp.Regexp {
|
||||
return match(group(expression(res...)).String() + `?`)
|
||||
}
|
||||
|
||||
// repeated wraps the regexp in a non-capturing group to get one or more
|
||||
// matches.
|
||||
func repeated(res ...*regexp.Regexp) *regexp.Regexp {
|
||||
return match(group(expression(res...)).String() + `+`)
|
||||
}
|
||||
|
||||
// group wraps the regexp in a non-capturing group.
|
||||
func group(res ...*regexp.Regexp) *regexp.Regexp {
|
||||
return match(`(?:` + expression(res...).String() + `)`)
|
||||
}
|
||||
|
||||
// capture wraps the expression in a capturing group.
|
||||
func capture(res ...*regexp.Regexp) *regexp.Regexp {
|
||||
return match(`(` + expression(res...).String() + `)`)
|
||||
}
|
||||
|
||||
// anchored anchors the regular expression by adding start and end delimiters.
|
||||
func anchored(res ...*regexp.Regexp) *regexp.Regexp {
|
||||
return match(`^` + expression(res...).String() + `$`)
|
||||
}
|
4
vendor/github.com/emicklei/go-restful-swagger12/.travis.yml
generated
vendored
Normal file
4
vendor/github.com/emicklei/go-restful-swagger12/.travis.yml
generated
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.x
|
46
vendor/github.com/emicklei/go-restful-swagger12/CHANGES.md
generated
vendored
Normal file
46
vendor/github.com/emicklei/go-restful-swagger12/CHANGES.md
generated
vendored
Normal file
@ -0,0 +1,46 @@
|
||||
Change history of swagger
|
||||
=
|
||||
2017-01-30
|
||||
- moved from go-restful/swagger to go-restful-swagger12
|
||||
|
||||
2015-10-16
|
||||
- add type override mechanism for swagger models (MR 254, nathanejohnson)
|
||||
- replace uses of wildcard in generated apidocs (issue 251)
|
||||
|
||||
2015-05-25
|
||||
- (api break) changed the type of Properties in Model
|
||||
- (api break) changed the type of Models in ApiDeclaration
|
||||
- (api break) changed the parameter type of PostBuildDeclarationMapFunc
|
||||
|
||||
2015-04-09
|
||||
- add ModelBuildable interface for customization of Model
|
||||
|
||||
2015-03-17
|
||||
- preserve order of Routes per WebService in Swagger listing
|
||||
- fix use of $ref and type in Swagger models
|
||||
- add api version to listing
|
||||
|
||||
2014-11-14
|
||||
- operation parameters are now sorted using ordering path,query,form,header,body
|
||||
|
||||
2014-11-12
|
||||
- respect omitempty tag value for embedded structs
|
||||
- expose ApiVersion of WebService to Swagger ApiDeclaration
|
||||
|
||||
2014-05-29
|
||||
- (api add) Ability to define custom http.Handler to serve swagger-ui static files
|
||||
|
||||
2014-05-04
|
||||
- (fix) include model for array element type of response
|
||||
|
||||
2014-01-03
|
||||
- (fix) do not add primitive type to the Api models
|
||||
|
||||
2013-11-27
|
||||
- (fix) make Swagger work for WebServices with root ("/" or "") paths
|
||||
|
||||
2013-10-29
|
||||
- (api add) package variable LogInfo to customize logging function
|
||||
|
||||
2013-10-15
|
||||
- upgraded to spec version 1.2 (https://github.com/wordnik/swagger-core/wiki/1.2-transition)
|
22
vendor/github.com/emicklei/go-restful-swagger12/LICENSE
generated
vendored
Normal file
22
vendor/github.com/emicklei/go-restful-swagger12/LICENSE
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
Copyright (c) 2017 Ernest Micklei
|
||||
|
||||
MIT License
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
83
vendor/github.com/emicklei/go-restful-swagger12/README.md
generated
vendored
Normal file
83
vendor/github.com/emicklei/go-restful-swagger12/README.md
generated
vendored
Normal file
@ -0,0 +1,83 @@
|
||||
# go-restful-swagger12
|
||||
|
||||
[![Build Status](https://travis-ci.org/emicklei/go-restful-swagger12.png)](https://travis-ci.org/emicklei/go-restful-swagger12)
|
||||
[![GoDoc](https://godoc.org/github.com/emicklei/go-restful-swagger12?status.svg)](https://godoc.org/github.com/emicklei/go-restful-swagger12)
|
||||
|
||||
How to use Swagger UI with go-restful
|
||||
=
|
||||
|
||||
Get the Swagger UI sources (version 1.2 only)
|
||||
|
||||
git clone https://github.com/wordnik/swagger-ui.git
|
||||
|
||||
The project contains a "dist" folder.
|
||||
Its contents has all the Swagger UI files you need.
|
||||
|
||||
The `index.html` has an `url` set to `http://petstore.swagger.wordnik.com/api/api-docs`.
|
||||
You need to change that to match your WebService JSON endpoint e.g. `http://localhost:8080/apidocs.json`
|
||||
|
||||
Now, you can install the Swagger WebService for serving the Swagger specification in JSON.
|
||||
|
||||
config := swagger.Config{
|
||||
WebServices: restful.RegisteredWebServices(),
|
||||
ApiPath: "/apidocs.json",
|
||||
SwaggerPath: "/apidocs/",
|
||||
SwaggerFilePath: "/Users/emicklei/Projects/swagger-ui/dist"}
|
||||
swagger.InstallSwaggerService(config)
|
||||
|
||||
|
||||
Documenting Structs
|
||||
--
|
||||
|
||||
Currently there are 2 ways to document your structs in the go-restful Swagger.
|
||||
|
||||
###### By using struct tags
|
||||
- Use tag "description" to annotate a struct field with a description to show in the UI
|
||||
- Use tag "modelDescription" to annotate the struct itself with a description to show in the UI. The tag can be added in an field of the struct and in case that there are multiple definition, they will be appended with an empty line.
|
||||
|
||||
###### By using the SwaggerDoc method
|
||||
Here is an example with an `Address` struct and the documentation for each of the fields. The `""` is a special entry for **documenting the struct itself**.
|
||||
|
||||
type Address struct {
|
||||
Country string `json:"country,omitempty"`
|
||||
PostCode int `json:"postcode,omitempty"`
|
||||
}
|
||||
|
||||
func (Address) SwaggerDoc() map[string]string {
|
||||
return map[string]string{
|
||||
"": "Address doc",
|
||||
"country": "Country doc",
|
||||
"postcode": "PostCode doc",
|
||||
}
|
||||
}
|
||||
|
||||
This example will generate a JSON like this
|
||||
|
||||
{
|
||||
"Address": {
|
||||
"id": "Address",
|
||||
"description": "Address doc",
|
||||
"properties": {
|
||||
"country": {
|
||||
"type": "string",
|
||||
"description": "Country doc"
|
||||
},
|
||||
"postcode": {
|
||||
"type": "integer",
|
||||
"format": "int32",
|
||||
"description": "PostCode doc"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
**Very Important Notes:**
|
||||
- `SwaggerDoc()` is using a **NON-Pointer** receiver (e.g. func (Address) and not func (*Address))
|
||||
- The returned map should use as key the name of the field as defined in the JSON parameter (e.g. `"postcode"` and not `"PostCode"`)
|
||||
|
||||
Notes
|
||||
--
|
||||
- The Nickname of an Operation is automatically set by finding the name of the function. You can override it using RouteBuilder.Operation(..)
|
||||
- The WebServices field of swagger.Config can be used to control which service you want to expose and document ; you can have multiple configs and therefore multiple endpoints.
|
||||
|
||||
© 2017, ernestmicklei.com. MIT License. Contributions welcome.
|
64
vendor/github.com/emicklei/go-restful-swagger12/api_declaration_list.go
generated
vendored
Normal file
64
vendor/github.com/emicklei/go-restful-swagger12/api_declaration_list.go
generated
vendored
Normal file
@ -0,0 +1,64 @@
|
||||
package swagger
|
||||
|
||||
// Copyright 2015 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
)
|
||||
|
||||
// ApiDeclarationList maintains an ordered list of ApiDeclaration.
|
||||
type ApiDeclarationList struct {
|
||||
List []ApiDeclaration
|
||||
}
|
||||
|
||||
// At returns the ApiDeclaration by its path unless absent, then ok is false
|
||||
func (l *ApiDeclarationList) At(path string) (a ApiDeclaration, ok bool) {
|
||||
for _, each := range l.List {
|
||||
if each.ResourcePath == path {
|
||||
return each, true
|
||||
}
|
||||
}
|
||||
return a, false
|
||||
}
|
||||
|
||||
// Put adds or replaces a ApiDeclaration with this name
|
||||
func (l *ApiDeclarationList) Put(path string, a ApiDeclaration) {
|
||||
// maybe replace existing
|
||||
for i, each := range l.List {
|
||||
if each.ResourcePath == path {
|
||||
// replace
|
||||
l.List[i] = a
|
||||
return
|
||||
}
|
||||
}
|
||||
// add
|
||||
l.List = append(l.List, a)
|
||||
}
|
||||
|
||||
// Do enumerates all the properties, each with its assigned name
|
||||
func (l *ApiDeclarationList) Do(block func(path string, decl ApiDeclaration)) {
|
||||
for _, each := range l.List {
|
||||
block(each.ResourcePath, each)
|
||||
}
|
||||
}
|
||||
|
||||
// MarshalJSON writes the ModelPropertyList as if it was a map[string]ModelProperty
|
||||
func (l ApiDeclarationList) MarshalJSON() ([]byte, error) {
|
||||
var buf bytes.Buffer
|
||||
encoder := json.NewEncoder(&buf)
|
||||
buf.WriteString("{\n")
|
||||
for i, each := range l.List {
|
||||
buf.WriteString("\"")
|
||||
buf.WriteString(each.ResourcePath)
|
||||
buf.WriteString("\": ")
|
||||
encoder.Encode(each)
|
||||
if i < len(l.List)-1 {
|
||||
buf.WriteString(",\n")
|
||||
}
|
||||
}
|
||||
buf.WriteString("}")
|
||||
return buf.Bytes(), nil
|
||||
}
|
46
vendor/github.com/emicklei/go-restful-swagger12/config.go
generated
vendored
Normal file
46
vendor/github.com/emicklei/go-restful-swagger12/config.go
generated
vendored
Normal file
@ -0,0 +1,46 @@
|
||||
package swagger
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"reflect"
|
||||
|
||||
"github.com/emicklei/go-restful"
|
||||
)
|
||||
|
||||
// PostBuildDeclarationMapFunc can be used to modify the api declaration map.
|
||||
type PostBuildDeclarationMapFunc func(apiDeclarationMap *ApiDeclarationList)
|
||||
|
||||
// MapSchemaFormatFunc can be used to modify typeName at definition time.
|
||||
type MapSchemaFormatFunc func(typeName string) string
|
||||
|
||||
// MapModelTypeNameFunc can be used to return the desired typeName for a given
|
||||
// type. It will return false if the default name should be used.
|
||||
type MapModelTypeNameFunc func(t reflect.Type) (string, bool)
|
||||
|
||||
type Config struct {
|
||||
// url where the services are available, e.g. http://localhost:8080
|
||||
// if left empty then the basePath of Swagger is taken from the actual request
|
||||
WebServicesUrl string
|
||||
// path where the JSON api is avaiable , e.g. /apidocs
|
||||
ApiPath string
|
||||
// [optional] path where the swagger UI will be served, e.g. /swagger
|
||||
SwaggerPath string
|
||||
// [optional] location of folder containing Swagger HTML5 application index.html
|
||||
SwaggerFilePath string
|
||||
// api listing is constructed from this list of restful WebServices.
|
||||
WebServices []*restful.WebService
|
||||
// will serve all static content (scripts,pages,images)
|
||||
StaticHandler http.Handler
|
||||
// [optional] on default CORS (Cross-Origin-Resource-Sharing) is enabled.
|
||||
DisableCORS bool
|
||||
// Top-level API version. Is reflected in the resource listing.
|
||||
ApiVersion string
|
||||
// If set then call this handler after building the complete ApiDeclaration Map
|
||||
PostBuildHandler PostBuildDeclarationMapFunc
|
||||
// Swagger global info struct
|
||||
Info Info
|
||||
// [optional] If set, model builder should call this handler to get addition typename-to-swagger-format-field conversion.
|
||||
SchemaFormatHandler MapSchemaFormatFunc
|
||||
// [optional] If set, model builder should call this handler to retrieve the name for a given type.
|
||||
ModelTypeNameHandler MapModelTypeNameFunc
|
||||
}
|
467
vendor/github.com/emicklei/go-restful-swagger12/model_builder.go
generated
vendored
Normal file
467
vendor/github.com/emicklei/go-restful-swagger12/model_builder.go
generated
vendored
Normal file
@ -0,0 +1,467 @@
|
||||
package swagger
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ModelBuildable is used for extending Structs that need more control over
|
||||
// how the Model appears in the Swagger api declaration.
|
||||
type ModelBuildable interface {
|
||||
PostBuildModel(m *Model) *Model
|
||||
}
|
||||
|
||||
type modelBuilder struct {
|
||||
Models *ModelList
|
||||
Config *Config
|
||||
}
|
||||
|
||||
type documentable interface {
|
||||
SwaggerDoc() map[string]string
|
||||
}
|
||||
|
||||
// Check if this structure has a method with signature func (<theModel>) SwaggerDoc() map[string]string
|
||||
// If it exists, retrive the documentation and overwrite all struct tag descriptions
|
||||
func getDocFromMethodSwaggerDoc2(model reflect.Type) map[string]string {
|
||||
if docable, ok := reflect.New(model).Elem().Interface().(documentable); ok {
|
||||
return docable.SwaggerDoc()
|
||||
}
|
||||
return make(map[string]string)
|
||||
}
|
||||
|
||||
// addModelFrom creates and adds a Model to the builder and detects and calls
|
||||
// the post build hook for customizations
|
||||
func (b modelBuilder) addModelFrom(sample interface{}) {
|
||||
if modelOrNil := b.addModel(reflect.TypeOf(sample), ""); modelOrNil != nil {
|
||||
// allow customizations
|
||||
if buildable, ok := sample.(ModelBuildable); ok {
|
||||
modelOrNil = buildable.PostBuildModel(modelOrNil)
|
||||
b.Models.Put(modelOrNil.Id, *modelOrNil)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b modelBuilder) addModel(st reflect.Type, nameOverride string) *Model {
|
||||
// Turn pointers into simpler types so further checks are
|
||||
// correct.
|
||||
if st.Kind() == reflect.Ptr {
|
||||
st = st.Elem()
|
||||
}
|
||||
|
||||
modelName := b.keyFrom(st)
|
||||
if nameOverride != "" {
|
||||
modelName = nameOverride
|
||||
}
|
||||
// no models needed for primitive types
|
||||
if b.isPrimitiveType(modelName) {
|
||||
return nil
|
||||
}
|
||||
// golang encoding/json packages says array and slice values encode as
|
||||
// JSON arrays, except that []byte encodes as a base64-encoded string.
|
||||
// If we see a []byte here, treat it at as a primitive type (string)
|
||||
// and deal with it in buildArrayTypeProperty.
|
||||
if (st.Kind() == reflect.Slice || st.Kind() == reflect.Array) &&
|
||||
st.Elem().Kind() == reflect.Uint8 {
|
||||
return nil
|
||||
}
|
||||
// see if we already have visited this model
|
||||
if _, ok := b.Models.At(modelName); ok {
|
||||
return nil
|
||||
}
|
||||
sm := Model{
|
||||
Id: modelName,
|
||||
Required: []string{},
|
||||
Properties: ModelPropertyList{}}
|
||||
|
||||
// reference the model before further initializing (enables recursive structs)
|
||||
b.Models.Put(modelName, sm)
|
||||
|
||||
// check for slice or array
|
||||
if st.Kind() == reflect.Slice || st.Kind() == reflect.Array {
|
||||
b.addModel(st.Elem(), "")
|
||||
return &sm
|
||||
}
|
||||
// check for structure or primitive type
|
||||
if st.Kind() != reflect.Struct {
|
||||
return &sm
|
||||
}
|
||||
|
||||
fullDoc := getDocFromMethodSwaggerDoc2(st)
|
||||
modelDescriptions := []string{}
|
||||
|
||||
for i := 0; i < st.NumField(); i++ {
|
||||
field := st.Field(i)
|
||||
jsonName, modelDescription, prop := b.buildProperty(field, &sm, modelName)
|
||||
if len(modelDescription) > 0 {
|
||||
modelDescriptions = append(modelDescriptions, modelDescription)
|
||||
}
|
||||
|
||||
// add if not omitted
|
||||
if len(jsonName) != 0 {
|
||||
// update description
|
||||
if fieldDoc, ok := fullDoc[jsonName]; ok {
|
||||
prop.Description = fieldDoc
|
||||
}
|
||||
// update Required
|
||||
if b.isPropertyRequired(field) {
|
||||
sm.Required = append(sm.Required, jsonName)
|
||||
}
|
||||
sm.Properties.Put(jsonName, prop)
|
||||
}
|
||||
}
|
||||
|
||||
// We always overwrite documentation if SwaggerDoc method exists
|
||||
// "" is special for documenting the struct itself
|
||||
if modelDoc, ok := fullDoc[""]; ok {
|
||||
sm.Description = modelDoc
|
||||
} else if len(modelDescriptions) != 0 {
|
||||
sm.Description = strings.Join(modelDescriptions, "\n")
|
||||
}
|
||||
|
||||
// update model builder with completed model
|
||||
b.Models.Put(modelName, sm)
|
||||
|
||||
return &sm
|
||||
}
|
||||
|
||||
func (b modelBuilder) isPropertyRequired(field reflect.StructField) bool {
|
||||
required := true
|
||||
if jsonTag := field.Tag.Get("json"); jsonTag != "" {
|
||||
s := strings.Split(jsonTag, ",")
|
||||
if len(s) > 1 && s[1] == "omitempty" {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return required
|
||||
}
|
||||
|
||||
func (b modelBuilder) buildProperty(field reflect.StructField, model *Model, modelName string) (jsonName, modelDescription string, prop ModelProperty) {
|
||||
jsonName = b.jsonNameOfField(field)
|
||||
if len(jsonName) == 0 {
|
||||
// empty name signals skip property
|
||||
return "", "", prop
|
||||
}
|
||||
|
||||
if field.Name == "XMLName" && field.Type.String() == "xml.Name" {
|
||||
// property is metadata for the xml.Name attribute, can be skipped
|
||||
return "", "", prop
|
||||
}
|
||||
|
||||
if tag := field.Tag.Get("modelDescription"); tag != "" {
|
||||
modelDescription = tag
|
||||
}
|
||||
|
||||
prop.setPropertyMetadata(field)
|
||||
if prop.Type != nil {
|
||||
return jsonName, modelDescription, prop
|
||||
}
|
||||
fieldType := field.Type
|
||||
|
||||
// check if type is doing its own marshalling
|
||||
marshalerType := reflect.TypeOf((*json.Marshaler)(nil)).Elem()
|
||||
if fieldType.Implements(marshalerType) {
|
||||
var pType = "string"
|
||||
if prop.Type == nil {
|
||||
prop.Type = &pType
|
||||
}
|
||||
if prop.Format == "" {
|
||||
prop.Format = b.jsonSchemaFormat(b.keyFrom(fieldType))
|
||||
}
|
||||
return jsonName, modelDescription, prop
|
||||
}
|
||||
|
||||
// check if annotation says it is a string
|
||||
if jsonTag := field.Tag.Get("json"); jsonTag != "" {
|
||||
s := strings.Split(jsonTag, ",")
|
||||
if len(s) > 1 && s[1] == "string" {
|
||||
stringt := "string"
|
||||
prop.Type = &stringt
|
||||
return jsonName, modelDescription, prop
|
||||
}
|
||||
}
|
||||
|
||||
fieldKind := fieldType.Kind()
|
||||
switch {
|
||||
case fieldKind == reflect.Struct:
|
||||
jsonName, prop := b.buildStructTypeProperty(field, jsonName, model)
|
||||
return jsonName, modelDescription, prop
|
||||
case fieldKind == reflect.Slice || fieldKind == reflect.Array:
|
||||
jsonName, prop := b.buildArrayTypeProperty(field, jsonName, modelName)
|
||||
return jsonName, modelDescription, prop
|
||||
case fieldKind == reflect.Ptr:
|
||||
jsonName, prop := b.buildPointerTypeProperty(field, jsonName, modelName)
|
||||
return jsonName, modelDescription, prop
|
||||
case fieldKind == reflect.String:
|
||||
stringt := "string"
|
||||
prop.Type = &stringt
|
||||
return jsonName, modelDescription, prop
|
||||
case fieldKind == reflect.Map:
|
||||
// if it's a map, it's unstructured, and swagger 1.2 can't handle it
|
||||
objectType := "object"
|
||||
prop.Type = &objectType
|
||||
return jsonName, modelDescription, prop
|
||||
}
|
||||
|
||||
fieldTypeName := b.keyFrom(fieldType)
|
||||
if b.isPrimitiveType(fieldTypeName) {
|
||||
mapped := b.jsonSchemaType(fieldTypeName)
|
||||
prop.Type = &mapped
|
||||
prop.Format = b.jsonSchemaFormat(fieldTypeName)
|
||||
return jsonName, modelDescription, prop
|
||||
}
|
||||
modelType := b.keyFrom(fieldType)
|
||||
prop.Ref = &modelType
|
||||
|
||||
if fieldType.Name() == "" { // override type of anonymous structs
|
||||
nestedTypeName := modelName + "." + jsonName
|
||||
prop.Ref = &nestedTypeName
|
||||
b.addModel(fieldType, nestedTypeName)
|
||||
}
|
||||
return jsonName, modelDescription, prop
|
||||
}
|
||||
|
||||
func hasNamedJSONTag(field reflect.StructField) bool {
|
||||
parts := strings.Split(field.Tag.Get("json"), ",")
|
||||
if len(parts) == 0 {
|
||||
return false
|
||||
}
|
||||
for _, s := range parts[1:] {
|
||||
if s == "inline" {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return len(parts[0]) > 0
|
||||
}
|
||||
|
||||
func (b modelBuilder) buildStructTypeProperty(field reflect.StructField, jsonName string, model *Model) (nameJson string, prop ModelProperty) {
|
||||
prop.setPropertyMetadata(field)
|
||||
// Check for type override in tag
|
||||
if prop.Type != nil {
|
||||
return jsonName, prop
|
||||
}
|
||||
fieldType := field.Type
|
||||
// check for anonymous
|
||||
if len(fieldType.Name()) == 0 {
|
||||
// anonymous
|
||||
anonType := model.Id + "." + jsonName
|
||||
b.addModel(fieldType, anonType)
|
||||
prop.Ref = &anonType
|
||||
return jsonName, prop
|
||||
}
|
||||
|
||||
if field.Name == fieldType.Name() && field.Anonymous && !hasNamedJSONTag(field) {
|
||||
// embedded struct
|
||||
sub := modelBuilder{new(ModelList), b.Config}
|
||||
sub.addModel(fieldType, "")
|
||||
subKey := sub.keyFrom(fieldType)
|
||||
// merge properties from sub
|
||||
subModel, _ := sub.Models.At(subKey)
|
||||
subModel.Properties.Do(func(k string, v ModelProperty) {
|
||||
model.Properties.Put(k, v)
|
||||
// if subModel says this property is required then include it
|
||||
required := false
|
||||
for _, each := range subModel.Required {
|
||||
if k == each {
|
||||
required = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if required {
|
||||
model.Required = append(model.Required, k)
|
||||
}
|
||||
})
|
||||
// add all new referenced models
|
||||
sub.Models.Do(func(key string, sub Model) {
|
||||
if key != subKey {
|
||||
if _, ok := b.Models.At(key); !ok {
|
||||
b.Models.Put(key, sub)
|
||||
}
|
||||
}
|
||||
})
|
||||
// empty name signals skip property
|
||||
return "", prop
|
||||
}
|
||||
// simple struct
|
||||
b.addModel(fieldType, "")
|
||||
var pType = b.keyFrom(fieldType)
|
||||
prop.Ref = &pType
|
||||
return jsonName, prop
|
||||
}
|
||||
|
||||
func (b modelBuilder) buildArrayTypeProperty(field reflect.StructField, jsonName, modelName string) (nameJson string, prop ModelProperty) {
|
||||
// check for type override in tags
|
||||
prop.setPropertyMetadata(field)
|
||||
if prop.Type != nil {
|
||||
return jsonName, prop
|
||||
}
|
||||
fieldType := field.Type
|
||||
if fieldType.Elem().Kind() == reflect.Uint8 {
|
||||
stringt := "string"
|
||||
prop.Type = &stringt
|
||||
return jsonName, prop
|
||||
}
|
||||
var pType = "array"
|
||||
prop.Type = &pType
|
||||
isPrimitive := b.isPrimitiveType(fieldType.Elem().Name())
|
||||
elemTypeName := b.getElementTypeName(modelName, jsonName, fieldType.Elem())
|
||||
prop.Items = new(Item)
|
||||
if isPrimitive {
|
||||
mapped := b.jsonSchemaType(elemTypeName)
|
||||
prop.Items.Type = &mapped
|
||||
} else {
|
||||
prop.Items.Ref = &elemTypeName
|
||||
}
|
||||
// add|overwrite model for element type
|
||||
if fieldType.Elem().Kind() == reflect.Ptr {
|
||||
fieldType = fieldType.Elem()
|
||||
}
|
||||
if !isPrimitive {
|
||||
b.addModel(fieldType.Elem(), elemTypeName)
|
||||
}
|
||||
return jsonName, prop
|
||||
}
|
||||
|
||||
func (b modelBuilder) buildPointerTypeProperty(field reflect.StructField, jsonName, modelName string) (nameJson string, prop ModelProperty) {
|
||||
prop.setPropertyMetadata(field)
|
||||
// Check for type override in tags
|
||||
if prop.Type != nil {
|
||||
return jsonName, prop
|
||||
}
|
||||
fieldType := field.Type
|
||||
|
||||
// override type of pointer to list-likes
|
||||
if fieldType.Elem().Kind() == reflect.Slice || fieldType.Elem().Kind() == reflect.Array {
|
||||
var pType = "array"
|
||||
prop.Type = &pType
|
||||
isPrimitive := b.isPrimitiveType(fieldType.Elem().Elem().Name())
|
||||
elemName := b.getElementTypeName(modelName, jsonName, fieldType.Elem().Elem())
|
||||
if isPrimitive {
|
||||
primName := b.jsonSchemaType(elemName)
|
||||
prop.Items = &Item{Ref: &primName}
|
||||
} else {
|
||||
prop.Items = &Item{Ref: &elemName}
|
||||
}
|
||||
if !isPrimitive {
|
||||
// add|overwrite model for element type
|
||||
b.addModel(fieldType.Elem().Elem(), elemName)
|
||||
}
|
||||
} else {
|
||||
// non-array, pointer type
|
||||
fieldTypeName := b.keyFrom(fieldType.Elem())
|
||||
var pType = b.jsonSchemaType(fieldTypeName) // no star, include pkg path
|
||||
if b.isPrimitiveType(fieldTypeName) {
|
||||
prop.Type = &pType
|
||||
prop.Format = b.jsonSchemaFormat(fieldTypeName)
|
||||
return jsonName, prop
|
||||
}
|
||||
prop.Ref = &pType
|
||||
elemName := ""
|
||||
if fieldType.Elem().Name() == "" {
|
||||
elemName = modelName + "." + jsonName
|
||||
prop.Ref = &elemName
|
||||
}
|
||||
b.addModel(fieldType.Elem(), elemName)
|
||||
}
|
||||
return jsonName, prop
|
||||
}
|
||||
|
||||
func (b modelBuilder) getElementTypeName(modelName, jsonName string, t reflect.Type) string {
|
||||
if t.Kind() == reflect.Ptr {
|
||||
t = t.Elem()
|
||||
}
|
||||
if t.Name() == "" {
|
||||
return modelName + "." + jsonName
|
||||
}
|
||||
return b.keyFrom(t)
|
||||
}
|
||||
|
||||
func (b modelBuilder) keyFrom(st reflect.Type) string {
|
||||
key := st.String()
|
||||
if b.Config != nil && b.Config.ModelTypeNameHandler != nil {
|
||||
if name, ok := b.Config.ModelTypeNameHandler(st); ok {
|
||||
key = name
|
||||
}
|
||||
}
|
||||
if len(st.Name()) == 0 { // unnamed type
|
||||
// Swagger UI has special meaning for [
|
||||
key = strings.Replace(key, "[]", "||", -1)
|
||||
}
|
||||
return key
|
||||
}
|
||||
|
||||
// see also https://golang.org/ref/spec#Numeric_types
|
||||
func (b modelBuilder) isPrimitiveType(modelName string) bool {
|
||||
if len(modelName) == 0 {
|
||||
return false
|
||||
}
|
||||
return strings.Contains("uint uint8 uint16 uint32 uint64 int int8 int16 int32 int64 float32 float64 bool string byte rune time.Time", modelName)
|
||||
}
|
||||
|
||||
// jsonNameOfField returns the name of the field as it should appear in JSON format
|
||||
// An empty string indicates that this field is not part of the JSON representation
|
||||
func (b modelBuilder) jsonNameOfField(field reflect.StructField) string {
|
||||
if jsonTag := field.Tag.Get("json"); jsonTag != "" {
|
||||
s := strings.Split(jsonTag, ",")
|
||||
if s[0] == "-" {
|
||||
// empty name signals skip property
|
||||
return ""
|
||||
} else if s[0] != "" {
|
||||
return s[0]
|
||||
}
|
||||
}
|
||||
return field.Name
|
||||
}
|
||||
|
||||
// see also http://json-schema.org/latest/json-schema-core.html#anchor8
|
||||
func (b modelBuilder) jsonSchemaType(modelName string) string {
|
||||
schemaMap := map[string]string{
|
||||
"uint": "integer",
|
||||
"uint8": "integer",
|
||||
"uint16": "integer",
|
||||
"uint32": "integer",
|
||||
"uint64": "integer",
|
||||
|
||||
"int": "integer",
|
||||
"int8": "integer",
|
||||
"int16": "integer",
|
||||
"int32": "integer",
|
||||
"int64": "integer",
|
||||
|
||||
"byte": "integer",
|
||||
"float64": "number",
|
||||
"float32": "number",
|
||||
"bool": "boolean",
|
||||
"time.Time": "string",
|
||||
}
|
||||
mapped, ok := schemaMap[modelName]
|
||||
if !ok {
|
||||
return modelName // use as is (custom or struct)
|
||||
}
|
||||
return mapped
|
||||
}
|
||||
|
||||
func (b modelBuilder) jsonSchemaFormat(modelName string) string {
|
||||
if b.Config != nil && b.Config.SchemaFormatHandler != nil {
|
||||
if mapped := b.Config.SchemaFormatHandler(modelName); mapped != "" {
|
||||
return mapped
|
||||
}
|
||||
}
|
||||
schemaMap := map[string]string{
|
||||
"int": "int32",
|
||||
"int32": "int32",
|
||||
"int64": "int64",
|
||||
"byte": "byte",
|
||||
"uint": "integer",
|
||||
"uint8": "byte",
|
||||
"float64": "double",
|
||||
"float32": "float",
|
||||
"time.Time": "date-time",
|
||||
"*time.Time": "date-time",
|
||||
}
|
||||
mapped, ok := schemaMap[modelName]
|
||||
if !ok {
|
||||
return "" // no format
|
||||
}
|
||||
return mapped
|
||||
}
|
86
vendor/github.com/emicklei/go-restful-swagger12/model_list.go
generated
vendored
Normal file
86
vendor/github.com/emicklei/go-restful-swagger12/model_list.go
generated
vendored
Normal file
@ -0,0 +1,86 @@
|
||||
package swagger
|
||||
|
||||
// Copyright 2015 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
)
|
||||
|
||||
// NamedModel associates a name with a Model (not using its Id)
|
||||
type NamedModel struct {
|
||||
Name string
|
||||
Model Model
|
||||
}
|
||||
|
||||
// ModelList encapsulates a list of NamedModel (association)
|
||||
type ModelList struct {
|
||||
List []NamedModel
|
||||
}
|
||||
|
||||
// Put adds or replaces a Model by its name
|
||||
func (l *ModelList) Put(name string, model Model) {
|
||||
for i, each := range l.List {
|
||||
if each.Name == name {
|
||||
// replace
|
||||
l.List[i] = NamedModel{name, model}
|
||||
return
|
||||
}
|
||||
}
|
||||
// add
|
||||
l.List = append(l.List, NamedModel{name, model})
|
||||
}
|
||||
|
||||
// At returns a Model by its name, ok is false if absent
|
||||
func (l *ModelList) At(name string) (m Model, ok bool) {
|
||||
for _, each := range l.List {
|
||||
if each.Name == name {
|
||||
return each.Model, true
|
||||
}
|
||||
}
|
||||
return m, false
|
||||
}
|
||||
|
||||
// Do enumerates all the models, each with its assigned name
|
||||
func (l *ModelList) Do(block func(name string, value Model)) {
|
||||
for _, each := range l.List {
|
||||
block(each.Name, each.Model)
|
||||
}
|
||||
}
|
||||
|
||||
// MarshalJSON writes the ModelList as if it was a map[string]Model
|
||||
func (l ModelList) MarshalJSON() ([]byte, error) {
|
||||
var buf bytes.Buffer
|
||||
encoder := json.NewEncoder(&buf)
|
||||
buf.WriteString("{\n")
|
||||
for i, each := range l.List {
|
||||
buf.WriteString("\"")
|
||||
buf.WriteString(each.Name)
|
||||
buf.WriteString("\": ")
|
||||
encoder.Encode(each.Model)
|
||||
if i < len(l.List)-1 {
|
||||
buf.WriteString(",\n")
|
||||
}
|
||||
}
|
||||
buf.WriteString("}")
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON reads back a ModelList. This is an expensive operation.
|
||||
func (l *ModelList) UnmarshalJSON(data []byte) error {
|
||||
raw := map[string]interface{}{}
|
||||
json.NewDecoder(bytes.NewReader(data)).Decode(&raw)
|
||||
for k, v := range raw {
|
||||
// produces JSON bytes for each value
|
||||
data, err := json.Marshal(v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var m Model
|
||||
json.NewDecoder(bytes.NewReader(data)).Decode(&m)
|
||||
l.Put(k, m)
|
||||
}
|
||||
return nil
|
||||
}
|
81
vendor/github.com/emicklei/go-restful-swagger12/model_property_ext.go
generated
vendored
Normal file
81
vendor/github.com/emicklei/go-restful-swagger12/model_property_ext.go
generated
vendored
Normal file
@ -0,0 +1,81 @@
|
||||
package swagger
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func (prop *ModelProperty) setDescription(field reflect.StructField) {
|
||||
if tag := field.Tag.Get("description"); tag != "" {
|
||||
prop.Description = tag
|
||||
}
|
||||
}
|
||||
|
||||
func (prop *ModelProperty) setDefaultValue(field reflect.StructField) {
|
||||
if tag := field.Tag.Get("default"); tag != "" {
|
||||
prop.DefaultValue = Special(tag)
|
||||
}
|
||||
}
|
||||
|
||||
func (prop *ModelProperty) setEnumValues(field reflect.StructField) {
|
||||
// We use | to separate the enum values. This value is chosen
|
||||
// since its unlikely to be useful in actual enumeration values.
|
||||
if tag := field.Tag.Get("enum"); tag != "" {
|
||||
prop.Enum = strings.Split(tag, "|")
|
||||
}
|
||||
}
|
||||
|
||||
func (prop *ModelProperty) setMaximum(field reflect.StructField) {
|
||||
if tag := field.Tag.Get("maximum"); tag != "" {
|
||||
prop.Maximum = tag
|
||||
}
|
||||
}
|
||||
|
||||
func (prop *ModelProperty) setType(field reflect.StructField) {
|
||||
if tag := field.Tag.Get("type"); tag != "" {
|
||||
// Check if the first two characters of the type tag are
|
||||
// intended to emulate slice/array behaviour.
|
||||
//
|
||||
// If type is intended to be a slice/array then add the
|
||||
// overriden type to the array item instead of the main property
|
||||
if len(tag) > 2 && tag[0:2] == "[]" {
|
||||
pType := "array"
|
||||
prop.Type = &pType
|
||||
prop.Items = new(Item)
|
||||
|
||||
iType := tag[2:]
|
||||
prop.Items.Type = &iType
|
||||
return
|
||||
}
|
||||
|
||||
prop.Type = &tag
|
||||
}
|
||||
}
|
||||
|
||||
func (prop *ModelProperty) setMinimum(field reflect.StructField) {
|
||||
if tag := field.Tag.Get("minimum"); tag != "" {
|
||||
prop.Minimum = tag
|
||||
}
|
||||
}
|
||||
|
||||
func (prop *ModelProperty) setUniqueItems(field reflect.StructField) {
|
||||
tag := field.Tag.Get("unique")
|
||||
switch tag {
|
||||
case "true":
|
||||
v := true
|
||||
prop.UniqueItems = &v
|
||||
case "false":
|
||||
v := false
|
||||
prop.UniqueItems = &v
|
||||
}
|
||||
}
|
||||
|
||||
func (prop *ModelProperty) setPropertyMetadata(field reflect.StructField) {
|
||||
prop.setDescription(field)
|
||||
prop.setEnumValues(field)
|
||||
prop.setMinimum(field)
|
||||
prop.setMaximum(field)
|
||||
prop.setUniqueItems(field)
|
||||
prop.setDefaultValue(field)
|
||||
prop.setType(field)
|
||||
}
|
87
vendor/github.com/emicklei/go-restful-swagger12/model_property_list.go
generated
vendored
Normal file
87
vendor/github.com/emicklei/go-restful-swagger12/model_property_list.go
generated
vendored
Normal file
@ -0,0 +1,87 @@
|
||||
package swagger
|
||||
|
||||
// Copyright 2015 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
)
|
||||
|
||||
// NamedModelProperty associates a name to a ModelProperty
|
||||
type NamedModelProperty struct {
|
||||
Name string
|
||||
Property ModelProperty
|
||||
}
|
||||
|
||||
// ModelPropertyList encapsulates a list of NamedModelProperty (association)
|
||||
type ModelPropertyList struct {
|
||||
List []NamedModelProperty
|
||||
}
|
||||
|
||||
// At returns the ModelPropety by its name unless absent, then ok is false
|
||||
func (l *ModelPropertyList) At(name string) (p ModelProperty, ok bool) {
|
||||
for _, each := range l.List {
|
||||
if each.Name == name {
|
||||
return each.Property, true
|
||||
}
|
||||
}
|
||||
return p, false
|
||||
}
|
||||
|
||||
// Put adds or replaces a ModelProperty with this name
|
||||
func (l *ModelPropertyList) Put(name string, prop ModelProperty) {
|
||||
// maybe replace existing
|
||||
for i, each := range l.List {
|
||||
if each.Name == name {
|
||||
// replace
|
||||
l.List[i] = NamedModelProperty{Name: name, Property: prop}
|
||||
return
|
||||
}
|
||||
}
|
||||
// add
|
||||
l.List = append(l.List, NamedModelProperty{Name: name, Property: prop})
|
||||
}
|
||||
|
||||
// Do enumerates all the properties, each with its assigned name
|
||||
func (l *ModelPropertyList) Do(block func(name string, value ModelProperty)) {
|
||||
for _, each := range l.List {
|
||||
block(each.Name, each.Property)
|
||||
}
|
||||
}
|
||||
|
||||
// MarshalJSON writes the ModelPropertyList as if it was a map[string]ModelProperty
|
||||
func (l ModelPropertyList) MarshalJSON() ([]byte, error) {
|
||||
var buf bytes.Buffer
|
||||
encoder := json.NewEncoder(&buf)
|
||||
buf.WriteString("{\n")
|
||||
for i, each := range l.List {
|
||||
buf.WriteString("\"")
|
||||
buf.WriteString(each.Name)
|
||||
buf.WriteString("\": ")
|
||||
encoder.Encode(each.Property)
|
||||
if i < len(l.List)-1 {
|
||||
buf.WriteString(",\n")
|
||||
}
|
||||
}
|
||||
buf.WriteString("}")
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON reads back a ModelPropertyList. This is an expensive operation.
|
||||
func (l *ModelPropertyList) UnmarshalJSON(data []byte) error {
|
||||
raw := map[string]interface{}{}
|
||||
json.NewDecoder(bytes.NewReader(data)).Decode(&raw)
|
||||
for k, v := range raw {
|
||||
// produces JSON bytes for each value
|
||||
data, err := json.Marshal(v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var m ModelProperty
|
||||
json.NewDecoder(bytes.NewReader(data)).Decode(&m)
|
||||
l.Put(k, m)
|
||||
}
|
||||
return nil
|
||||
}
|
36
vendor/github.com/emicklei/go-restful-swagger12/ordered_route_map.go
generated
vendored
Normal file
36
vendor/github.com/emicklei/go-restful-swagger12/ordered_route_map.go
generated
vendored
Normal file
@ -0,0 +1,36 @@
|
||||
package swagger
|
||||
|
||||
// Copyright 2015 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
import "github.com/emicklei/go-restful"
|
||||
|
||||
type orderedRouteMap struct {
|
||||
elements map[string][]restful.Route
|
||||
keys []string
|
||||
}
|
||||
|
||||
func newOrderedRouteMap() *orderedRouteMap {
|
||||
return &orderedRouteMap{
|
||||
elements: map[string][]restful.Route{},
|
||||
keys: []string{},
|
||||
}
|
||||
}
|
||||
|
||||
func (o *orderedRouteMap) Add(key string, route restful.Route) {
|
||||
routes, ok := o.elements[key]
|
||||
if ok {
|
||||
routes = append(routes, route)
|
||||
o.elements[key] = routes
|
||||
return
|
||||
}
|
||||
o.elements[key] = []restful.Route{route}
|
||||
o.keys = append(o.keys, key)
|
||||
}
|
||||
|
||||
func (o *orderedRouteMap) Do(block func(key string, routes []restful.Route)) {
|
||||
for _, k := range o.keys {
|
||||
block(k, o.elements[k])
|
||||
}
|
||||
}
|
185
vendor/github.com/emicklei/go-restful-swagger12/swagger.go
generated
vendored
Normal file
185
vendor/github.com/emicklei/go-restful-swagger12/swagger.go
generated
vendored
Normal file
@ -0,0 +1,185 @@
|
||||
// Package swagger implements the structures of the Swagger
|
||||
// https://github.com/wordnik/swagger-spec/blob/master/versions/1.2.md
|
||||
package swagger
|
||||
|
||||
const swaggerVersion = "1.2"
|
||||
|
||||
// 4.3.3 Data Type Fields
|
||||
type DataTypeFields struct {
|
||||
Type *string `json:"type,omitempty"` // if Ref not used
|
||||
Ref *string `json:"$ref,omitempty"` // if Type not used
|
||||
Format string `json:"format,omitempty"`
|
||||
DefaultValue Special `json:"defaultValue,omitempty"`
|
||||
Enum []string `json:"enum,omitempty"`
|
||||
Minimum string `json:"minimum,omitempty"`
|
||||
Maximum string `json:"maximum,omitempty"`
|
||||
Items *Item `json:"items,omitempty"`
|
||||
UniqueItems *bool `json:"uniqueItems,omitempty"`
|
||||
}
|
||||
|
||||
type Special string
|
||||
|
||||
// 4.3.4 Items Object
|
||||
type Item struct {
|
||||
Type *string `json:"type,omitempty"`
|
||||
Ref *string `json:"$ref,omitempty"`
|
||||
Format string `json:"format,omitempty"`
|
||||
}
|
||||
|
||||
// 5.1 Resource Listing
|
||||
type ResourceListing struct {
|
||||
SwaggerVersion string `json:"swaggerVersion"` // e.g 1.2
|
||||
Apis []Resource `json:"apis"`
|
||||
ApiVersion string `json:"apiVersion"`
|
||||
Info Info `json:"info"`
|
||||
Authorizations []Authorization `json:"authorizations,omitempty"`
|
||||
}
|
||||
|
||||
// 5.1.2 Resource Object
|
||||
type Resource struct {
|
||||
Path string `json:"path"` // relative or absolute, must start with /
|
||||
Description string `json:"description"`
|
||||
}
|
||||
|
||||
// 5.1.3 Info Object
|
||||
type Info struct {
|
||||
Title string `json:"title"`
|
||||
Description string `json:"description"`
|
||||
TermsOfServiceUrl string `json:"termsOfServiceUrl,omitempty"`
|
||||
Contact string `json:"contact,omitempty"`
|
||||
License string `json:"license,omitempty"`
|
||||
LicenseUrl string `json:"licenseUrl,omitempty"`
|
||||
}
|
||||
|
||||
// 5.1.5
|
||||
type Authorization struct {
|
||||
Type string `json:"type"`
|
||||
PassAs string `json:"passAs"`
|
||||
Keyname string `json:"keyname"`
|
||||
Scopes []Scope `json:"scopes"`
|
||||
GrantTypes []GrantType `json:"grandTypes"`
|
||||
}
|
||||
|
||||
// 5.1.6, 5.2.11
|
||||
type Scope struct {
|
||||
// Required. The name of the scope.
|
||||
Scope string `json:"scope"`
|
||||
// Recommended. A short description of the scope.
|
||||
Description string `json:"description"`
|
||||
}
|
||||
|
||||
// 5.1.7
|
||||
type GrantType struct {
|
||||
Implicit Implicit `json:"implicit"`
|
||||
AuthorizationCode AuthorizationCode `json:"authorization_code"`
|
||||
}
|
||||
|
||||
// 5.1.8 Implicit Object
|
||||
type Implicit struct {
|
||||
// Required. The login endpoint definition.
|
||||
loginEndpoint LoginEndpoint `json:"loginEndpoint"`
|
||||
// An optional alternative name to standard "access_token" OAuth2 parameter.
|
||||
TokenName string `json:"tokenName"`
|
||||
}
|
||||
|
||||
// 5.1.9 Authorization Code Object
|
||||
type AuthorizationCode struct {
|
||||
TokenRequestEndpoint TokenRequestEndpoint `json:"tokenRequestEndpoint"`
|
||||
TokenEndpoint TokenEndpoint `json:"tokenEndpoint"`
|
||||
}
|
||||
|
||||
// 5.1.10 Login Endpoint Object
|
||||
type LoginEndpoint struct {
|
||||
// Required. The URL of the authorization endpoint for the implicit grant flow. The value SHOULD be in a URL format.
|
||||
Url string `json:"url"`
|
||||
}
|
||||
|
||||
// 5.1.11 Token Request Endpoint Object
|
||||
type TokenRequestEndpoint struct {
|
||||
// Required. The URL of the authorization endpoint for the authentication code grant flow. The value SHOULD be in a URL format.
|
||||
Url string `json:"url"`
|
||||
// An optional alternative name to standard "client_id" OAuth2 parameter.
|
||||
ClientIdName string `json:"clientIdName"`
|
||||
// An optional alternative name to the standard "client_secret" OAuth2 parameter.
|
||||
ClientSecretName string `json:"clientSecretName"`
|
||||
}
|
||||
|
||||
// 5.1.12 Token Endpoint Object
|
||||
type TokenEndpoint struct {
|
||||
// Required. The URL of the token endpoint for the authentication code grant flow. The value SHOULD be in a URL format.
|
||||
Url string `json:"url"`
|
||||
// An optional alternative name to standard "access_token" OAuth2 parameter.
|
||||
TokenName string `json:"tokenName"`
|
||||
}
|
||||
|
||||
// 5.2 API Declaration
|
||||
type ApiDeclaration struct {
|
||||
SwaggerVersion string `json:"swaggerVersion"`
|
||||
ApiVersion string `json:"apiVersion"`
|
||||
BasePath string `json:"basePath"`
|
||||
ResourcePath string `json:"resourcePath"` // must start with /
|
||||
Info Info `json:"info"`
|
||||
Apis []Api `json:"apis,omitempty"`
|
||||
Models ModelList `json:"models,omitempty"`
|
||||
Produces []string `json:"produces,omitempty"`
|
||||
Consumes []string `json:"consumes,omitempty"`
|
||||
Authorizations []Authorization `json:"authorizations,omitempty"`
|
||||
}
|
||||
|
||||
// 5.2.2 API Object
|
||||
type Api struct {
|
||||
Path string `json:"path"` // relative or absolute, must start with /
|
||||
Description string `json:"description"`
|
||||
Operations []Operation `json:"operations,omitempty"`
|
||||
}
|
||||
|
||||
// 5.2.3 Operation Object
|
||||
type Operation struct {
|
||||
DataTypeFields
|
||||
Method string `json:"method"`
|
||||
Summary string `json:"summary,omitempty"`
|
||||
Notes string `json:"notes,omitempty"`
|
||||
Nickname string `json:"nickname"`
|
||||
Authorizations []Authorization `json:"authorizations,omitempty"`
|
||||
Parameters []Parameter `json:"parameters"`
|
||||
ResponseMessages []ResponseMessage `json:"responseMessages,omitempty"` // optional
|
||||
Produces []string `json:"produces,omitempty"`
|
||||
Consumes []string `json:"consumes,omitempty"`
|
||||
Deprecated string `json:"deprecated,omitempty"`
|
||||
}
|
||||
|
||||
// 5.2.4 Parameter Object
|
||||
type Parameter struct {
|
||||
DataTypeFields
|
||||
ParamType string `json:"paramType"` // path,query,body,header,form
|
||||
Name string `json:"name"`
|
||||
Description string `json:"description"`
|
||||
Required bool `json:"required"`
|
||||
AllowMultiple bool `json:"allowMultiple"`
|
||||
}
|
||||
|
||||
// 5.2.5 Response Message Object
|
||||
type ResponseMessage struct {
|
||||
Code int `json:"code"`
|
||||
Message string `json:"message"`
|
||||
ResponseModel string `json:"responseModel,omitempty"`
|
||||
}
|
||||
|
||||
// 5.2.6, 5.2.7 Models Object
|
||||
type Model struct {
|
||||
Id string `json:"id"`
|
||||
Description string `json:"description,omitempty"`
|
||||
Required []string `json:"required,omitempty"`
|
||||
Properties ModelPropertyList `json:"properties"`
|
||||
SubTypes []string `json:"subTypes,omitempty"`
|
||||
Discriminator string `json:"discriminator,omitempty"`
|
||||
}
|
||||
|
||||
// 5.2.8 Properties Object
|
||||
type ModelProperty struct {
|
||||
DataTypeFields
|
||||
Description string `json:"description,omitempty"`
|
||||
}
|
||||
|
||||
// 5.2.10
|
||||
type Authorizations map[string]Authorization
|
21
vendor/github.com/emicklei/go-restful-swagger12/swagger_builder.go
generated
vendored
Normal file
21
vendor/github.com/emicklei/go-restful-swagger12/swagger_builder.go
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
package swagger
|
||||
|
||||
type SwaggerBuilder struct {
|
||||
SwaggerService
|
||||
}
|
||||
|
||||
func NewSwaggerBuilder(config Config) *SwaggerBuilder {
|
||||
return &SwaggerBuilder{*newSwaggerService(config)}
|
||||
}
|
||||
|
||||
func (sb SwaggerBuilder) ProduceListing() ResourceListing {
|
||||
return sb.SwaggerService.produceListing()
|
||||
}
|
||||
|
||||
func (sb SwaggerBuilder) ProduceAllDeclarations() map[string]ApiDeclaration {
|
||||
return sb.SwaggerService.produceAllDeclarations()
|
||||
}
|
||||
|
||||
func (sb SwaggerBuilder) ProduceDeclarations(route string) (*ApiDeclaration, bool) {
|
||||
return sb.SwaggerService.produceDeclarations(route)
|
||||
}
|
443
vendor/github.com/emicklei/go-restful-swagger12/swagger_webservice.go
generated
vendored
Normal file
443
vendor/github.com/emicklei/go-restful-swagger12/swagger_webservice.go
generated
vendored
Normal file
@ -0,0 +1,443 @@
|
||||
package swagger
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/emicklei/go-restful"
|
||||
// "github.com/emicklei/hopwatch"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/emicklei/go-restful/log"
|
||||
)
|
||||
|
||||
type SwaggerService struct {
|
||||
config Config
|
||||
apiDeclarationMap *ApiDeclarationList
|
||||
}
|
||||
|
||||
func newSwaggerService(config Config) *SwaggerService {
|
||||
sws := &SwaggerService{
|
||||
config: config,
|
||||
apiDeclarationMap: new(ApiDeclarationList)}
|
||||
|
||||
// Build all ApiDeclarations
|
||||
for _, each := range config.WebServices {
|
||||
rootPath := each.RootPath()
|
||||
// skip the api service itself
|
||||
if rootPath != config.ApiPath {
|
||||
if rootPath == "" || rootPath == "/" {
|
||||
// use routes
|
||||
for _, route := range each.Routes() {
|
||||
entry := staticPathFromRoute(route)
|
||||
_, exists := sws.apiDeclarationMap.At(entry)
|
||||
if !exists {
|
||||
sws.apiDeclarationMap.Put(entry, sws.composeDeclaration(each, entry))
|
||||
}
|
||||
}
|
||||
} else { // use root path
|
||||
sws.apiDeclarationMap.Put(each.RootPath(), sws.composeDeclaration(each, each.RootPath()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// if specified then call the PostBuilderHandler
|
||||
if config.PostBuildHandler != nil {
|
||||
config.PostBuildHandler(sws.apiDeclarationMap)
|
||||
}
|
||||
return sws
|
||||
}
|
||||
|
||||
// LogInfo is the function that is called when this package needs to log. It defaults to log.Printf
|
||||
var LogInfo = func(format string, v ...interface{}) {
|
||||
// use the restful package-wide logger
|
||||
log.Printf(format, v...)
|
||||
}
|
||||
|
||||
// InstallSwaggerService add the WebService that provides the API documentation of all services
|
||||
// conform the Swagger documentation specifcation. (https://github.com/wordnik/swagger-core/wiki).
|
||||
func InstallSwaggerService(aSwaggerConfig Config) {
|
||||
RegisterSwaggerService(aSwaggerConfig, restful.DefaultContainer)
|
||||
}
|
||||
|
||||
// RegisterSwaggerService add the WebService that provides the API documentation of all services
|
||||
// conform the Swagger documentation specifcation. (https://github.com/wordnik/swagger-core/wiki).
|
||||
func RegisterSwaggerService(config Config, wsContainer *restful.Container) {
|
||||
sws := newSwaggerService(config)
|
||||
ws := new(restful.WebService)
|
||||
ws.Path(config.ApiPath)
|
||||
ws.Produces(restful.MIME_JSON)
|
||||
if config.DisableCORS {
|
||||
ws.Filter(enableCORS)
|
||||
}
|
||||
ws.Route(ws.GET("/").To(sws.getListing))
|
||||
ws.Route(ws.GET("/{a}").To(sws.getDeclarations))
|
||||
ws.Route(ws.GET("/{a}/{b}").To(sws.getDeclarations))
|
||||
ws.Route(ws.GET("/{a}/{b}/{c}").To(sws.getDeclarations))
|
||||
ws.Route(ws.GET("/{a}/{b}/{c}/{d}").To(sws.getDeclarations))
|
||||
ws.Route(ws.GET("/{a}/{b}/{c}/{d}/{e}").To(sws.getDeclarations))
|
||||
ws.Route(ws.GET("/{a}/{b}/{c}/{d}/{e}/{f}").To(sws.getDeclarations))
|
||||
ws.Route(ws.GET("/{a}/{b}/{c}/{d}/{e}/{f}/{g}").To(sws.getDeclarations))
|
||||
LogInfo("[restful/swagger] listing is available at %v%v", config.WebServicesUrl, config.ApiPath)
|
||||
wsContainer.Add(ws)
|
||||
|
||||
// Check paths for UI serving
|
||||
if config.StaticHandler == nil && config.SwaggerFilePath != "" && config.SwaggerPath != "" {
|
||||
swaggerPathSlash := config.SwaggerPath
|
||||
// path must end with slash /
|
||||
if "/" != config.SwaggerPath[len(config.SwaggerPath)-1:] {
|
||||
LogInfo("[restful/swagger] use corrected SwaggerPath ; must end with slash (/)")
|
||||
swaggerPathSlash += "/"
|
||||
}
|
||||
|
||||
LogInfo("[restful/swagger] %v%v is mapped to folder %v", config.WebServicesUrl, swaggerPathSlash, config.SwaggerFilePath)
|
||||
wsContainer.Handle(swaggerPathSlash, http.StripPrefix(swaggerPathSlash, http.FileServer(http.Dir(config.SwaggerFilePath))))
|
||||
|
||||
//if we define a custom static handler use it
|
||||
} else if config.StaticHandler != nil && config.SwaggerPath != "" {
|
||||
swaggerPathSlash := config.SwaggerPath
|
||||
// path must end with slash /
|
||||
if "/" != config.SwaggerPath[len(config.SwaggerPath)-1:] {
|
||||
LogInfo("[restful/swagger] use corrected SwaggerFilePath ; must end with slash (/)")
|
||||
swaggerPathSlash += "/"
|
||||
|
||||
}
|
||||
LogInfo("[restful/swagger] %v%v is mapped to custom Handler %T", config.WebServicesUrl, swaggerPathSlash, config.StaticHandler)
|
||||
wsContainer.Handle(swaggerPathSlash, config.StaticHandler)
|
||||
|
||||
} else {
|
||||
LogInfo("[restful/swagger] Swagger(File)Path is empty ; no UI is served")
|
||||
}
|
||||
}
|
||||
|
||||
func staticPathFromRoute(r restful.Route) string {
|
||||
static := r.Path
|
||||
bracket := strings.Index(static, "{")
|
||||
if bracket <= 1 { // result cannot be empty
|
||||
return static
|
||||
}
|
||||
if bracket != -1 {
|
||||
static = r.Path[:bracket]
|
||||
}
|
||||
if strings.HasSuffix(static, "/") {
|
||||
return static[:len(static)-1]
|
||||
} else {
|
||||
return static
|
||||
}
|
||||
}
|
||||
|
||||
func enableCORS(req *restful.Request, resp *restful.Response, chain *restful.FilterChain) {
|
||||
if origin := req.HeaderParameter(restful.HEADER_Origin); origin != "" {
|
||||
// prevent duplicate header
|
||||
if len(resp.Header().Get(restful.HEADER_AccessControlAllowOrigin)) == 0 {
|
||||
resp.AddHeader(restful.HEADER_AccessControlAllowOrigin, origin)
|
||||
}
|
||||
}
|
||||
chain.ProcessFilter(req, resp)
|
||||
}
|
||||
|
||||
func (sws SwaggerService) getListing(req *restful.Request, resp *restful.Response) {
|
||||
listing := sws.produceListing()
|
||||
resp.WriteAsJson(listing)
|
||||
}
|
||||
|
||||
func (sws SwaggerService) produceListing() ResourceListing {
|
||||
listing := ResourceListing{SwaggerVersion: swaggerVersion, ApiVersion: sws.config.ApiVersion, Info: sws.config.Info}
|
||||
sws.apiDeclarationMap.Do(func(k string, v ApiDeclaration) {
|
||||
ref := Resource{Path: k}
|
||||
if len(v.Apis) > 0 { // use description of first (could still be empty)
|
||||
ref.Description = v.Apis[0].Description
|
||||
}
|
||||
listing.Apis = append(listing.Apis, ref)
|
||||
})
|
||||
return listing
|
||||
}
|
||||
|
||||
func (sws SwaggerService) getDeclarations(req *restful.Request, resp *restful.Response) {
|
||||
decl, ok := sws.produceDeclarations(composeRootPath(req))
|
||||
if !ok {
|
||||
resp.WriteErrorString(http.StatusNotFound, "ApiDeclaration not found")
|
||||
return
|
||||
}
|
||||
// unless WebServicesUrl is given
|
||||
if len(sws.config.WebServicesUrl) == 0 {
|
||||
// update base path from the actual request
|
||||
// TODO how to detect https? assume http for now
|
||||
var host string
|
||||
// X-Forwarded-Host or Host or Request.Host
|
||||
hostvalues, ok := req.Request.Header["X-Forwarded-Host"] // apache specific?
|
||||
if !ok || len(hostvalues) == 0 {
|
||||
forwarded, ok := req.Request.Header["Host"] // without reverse-proxy
|
||||
if !ok || len(forwarded) == 0 {
|
||||
// fallback to Host field
|
||||
host = req.Request.Host
|
||||
} else {
|
||||
host = forwarded[0]
|
||||
}
|
||||
} else {
|
||||
host = hostvalues[0]
|
||||
}
|
||||
// inspect Referer for the scheme (http vs https)
|
||||
scheme := "http"
|
||||
if referer := req.Request.Header["Referer"]; len(referer) > 0 {
|
||||
if strings.HasPrefix(referer[0], "https") {
|
||||
scheme = "https"
|
||||
}
|
||||
}
|
||||
decl.BasePath = fmt.Sprintf("%s://%s", scheme, host)
|
||||
}
|
||||
resp.WriteAsJson(decl)
|
||||
}
|
||||
|
||||
func (sws SwaggerService) produceAllDeclarations() map[string]ApiDeclaration {
|
||||
decls := map[string]ApiDeclaration{}
|
||||
sws.apiDeclarationMap.Do(func(k string, v ApiDeclaration) {
|
||||
decls[k] = v
|
||||
})
|
||||
return decls
|
||||
}
|
||||
|
||||
func (sws SwaggerService) produceDeclarations(route string) (*ApiDeclaration, bool) {
|
||||
decl, ok := sws.apiDeclarationMap.At(route)
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
decl.BasePath = sws.config.WebServicesUrl
|
||||
return &decl, true
|
||||
}
|
||||
|
||||
// composeDeclaration uses all routes and parameters to create a ApiDeclaration
|
||||
func (sws SwaggerService) composeDeclaration(ws *restful.WebService, pathPrefix string) ApiDeclaration {
|
||||
decl := ApiDeclaration{
|
||||
SwaggerVersion: swaggerVersion,
|
||||
BasePath: sws.config.WebServicesUrl,
|
||||
ResourcePath: pathPrefix,
|
||||
Models: ModelList{},
|
||||
ApiVersion: ws.Version()}
|
||||
|
||||
// collect any path parameters
|
||||
rootParams := []Parameter{}
|
||||
for _, param := range ws.PathParameters() {
|
||||
rootParams = append(rootParams, asSwaggerParameter(param.Data()))
|
||||
}
|
||||
// aggregate by path
|
||||
pathToRoutes := newOrderedRouteMap()
|
||||
for _, other := range ws.Routes() {
|
||||
if strings.HasPrefix(other.Path, pathPrefix) {
|
||||
if len(pathPrefix) > 1 && len(other.Path) > len(pathPrefix) && other.Path[len(pathPrefix)] != '/' {
|
||||
continue
|
||||
}
|
||||
pathToRoutes.Add(other.Path, other)
|
||||
}
|
||||
}
|
||||
pathToRoutes.Do(func(path string, routes []restful.Route) {
|
||||
api := Api{Path: strings.TrimSuffix(withoutWildcard(path), "/"), Description: ws.Documentation()}
|
||||
voidString := "void"
|
||||
for _, route := range routes {
|
||||
operation := Operation{
|
||||
Method: route.Method,
|
||||
Summary: route.Doc,
|
||||
Notes: route.Notes,
|
||||
// Type gets overwritten if there is a write sample
|
||||
DataTypeFields: DataTypeFields{Type: &voidString},
|
||||
Parameters: []Parameter{},
|
||||
Nickname: route.Operation,
|
||||
ResponseMessages: composeResponseMessages(route, &decl, &sws.config)}
|
||||
|
||||
operation.Consumes = route.Consumes
|
||||
operation.Produces = route.Produces
|
||||
|
||||
// share root params if any
|
||||
for _, swparam := range rootParams {
|
||||
operation.Parameters = append(operation.Parameters, swparam)
|
||||
}
|
||||
// route specific params
|
||||
for _, param := range route.ParameterDocs {
|
||||
operation.Parameters = append(operation.Parameters, asSwaggerParameter(param.Data()))
|
||||
}
|
||||
|
||||
sws.addModelsFromRouteTo(&operation, route, &decl)
|
||||
api.Operations = append(api.Operations, operation)
|
||||
}
|
||||
decl.Apis = append(decl.Apis, api)
|
||||
})
|
||||
return decl
|
||||
}
|
||||
|
||||
func withoutWildcard(path string) string {
|
||||
if strings.HasSuffix(path, ":*}") {
|
||||
return path[0:len(path)-3] + "}"
|
||||
}
|
||||
return path
|
||||
}
|
||||
|
||||
// composeResponseMessages takes the ResponseErrors (if any) and creates ResponseMessages from them.
|
||||
func composeResponseMessages(route restful.Route, decl *ApiDeclaration, config *Config) (messages []ResponseMessage) {
|
||||
if route.ResponseErrors == nil {
|
||||
return messages
|
||||
}
|
||||
// sort by code
|
||||
codes := sort.IntSlice{}
|
||||
for code := range route.ResponseErrors {
|
||||
codes = append(codes, code)
|
||||
}
|
||||
codes.Sort()
|
||||
for _, code := range codes {
|
||||
each := route.ResponseErrors[code]
|
||||
message := ResponseMessage{
|
||||
Code: code,
|
||||
Message: each.Message,
|
||||
}
|
||||
if each.Model != nil {
|
||||
st := reflect.TypeOf(each.Model)
|
||||
isCollection, st := detectCollectionType(st)
|
||||
// collection cannot be in responsemodel
|
||||
if !isCollection {
|
||||
modelName := modelBuilder{}.keyFrom(st)
|
||||
modelBuilder{Models: &decl.Models, Config: config}.addModel(st, "")
|
||||
message.ResponseModel = modelName
|
||||
}
|
||||
}
|
||||
messages = append(messages, message)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// addModelsFromRoute takes any read or write sample from the Route and creates a Swagger model from it.
|
||||
func (sws SwaggerService) addModelsFromRouteTo(operation *Operation, route restful.Route, decl *ApiDeclaration) {
|
||||
if route.ReadSample != nil {
|
||||
sws.addModelFromSampleTo(operation, false, route.ReadSample, &decl.Models)
|
||||
}
|
||||
if route.WriteSample != nil {
|
||||
sws.addModelFromSampleTo(operation, true, route.WriteSample, &decl.Models)
|
||||
}
|
||||
}
|
||||
|
||||
func detectCollectionType(st reflect.Type) (bool, reflect.Type) {
|
||||
isCollection := false
|
||||
if st.Kind() == reflect.Slice || st.Kind() == reflect.Array {
|
||||
st = st.Elem()
|
||||
isCollection = true
|
||||
} else {
|
||||
if st.Kind() == reflect.Ptr {
|
||||
if st.Elem().Kind() == reflect.Slice || st.Elem().Kind() == reflect.Array {
|
||||
st = st.Elem().Elem()
|
||||
isCollection = true
|
||||
}
|
||||
}
|
||||
}
|
||||
return isCollection, st
|
||||
}
|
||||
|
||||
// addModelFromSample creates and adds (or overwrites) a Model from a sample resource
|
||||
func (sws SwaggerService) addModelFromSampleTo(operation *Operation, isResponse bool, sample interface{}, models *ModelList) {
|
||||
mb := modelBuilder{Models: models, Config: &sws.config}
|
||||
if isResponse {
|
||||
sampleType, items := asDataType(sample, &sws.config)
|
||||
operation.Type = sampleType
|
||||
operation.Items = items
|
||||
}
|
||||
mb.addModelFrom(sample)
|
||||
}
|
||||
|
||||
func asSwaggerParameter(param restful.ParameterData) Parameter {
|
||||
return Parameter{
|
||||
DataTypeFields: DataTypeFields{
|
||||
Type: ¶m.DataType,
|
||||
Format: asFormat(param.DataType, param.DataFormat),
|
||||
DefaultValue: Special(param.DefaultValue),
|
||||
},
|
||||
Name: param.Name,
|
||||
Description: param.Description,
|
||||
ParamType: asParamType(param.Kind),
|
||||
|
||||
Required: param.Required}
|
||||
}
|
||||
|
||||
// Between 1..7 path parameters is supported
|
||||
func composeRootPath(req *restful.Request) string {
|
||||
path := "/" + req.PathParameter("a")
|
||||
b := req.PathParameter("b")
|
||||
if b == "" {
|
||||
return path
|
||||
}
|
||||
path = path + "/" + b
|
||||
c := req.PathParameter("c")
|
||||
if c == "" {
|
||||
return path
|
||||
}
|
||||
path = path + "/" + c
|
||||
d := req.PathParameter("d")
|
||||
if d == "" {
|
||||
return path
|
||||
}
|
||||
path = path + "/" + d
|
||||
e := req.PathParameter("e")
|
||||
if e == "" {
|
||||
return path
|
||||
}
|
||||
path = path + "/" + e
|
||||
f := req.PathParameter("f")
|
||||
if f == "" {
|
||||
return path
|
||||
}
|
||||
path = path + "/" + f
|
||||
g := req.PathParameter("g")
|
||||
if g == "" {
|
||||
return path
|
||||
}
|
||||
return path + "/" + g
|
||||
}
|
||||
|
||||
func asFormat(dataType string, dataFormat string) string {
|
||||
if dataFormat != "" {
|
||||
return dataFormat
|
||||
}
|
||||
return "" // TODO
|
||||
}
|
||||
|
||||
func asParamType(kind int) string {
|
||||
switch {
|
||||
case kind == restful.PathParameterKind:
|
||||
return "path"
|
||||
case kind == restful.QueryParameterKind:
|
||||
return "query"
|
||||
case kind == restful.BodyParameterKind:
|
||||
return "body"
|
||||
case kind == restful.HeaderParameterKind:
|
||||
return "header"
|
||||
case kind == restful.FormParameterKind:
|
||||
return "form"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func asDataType(any interface{}, config *Config) (*string, *Item) {
|
||||
// If it's not a collection, return the suggested model name
|
||||
st := reflect.TypeOf(any)
|
||||
isCollection, st := detectCollectionType(st)
|
||||
modelName := modelBuilder{}.keyFrom(st)
|
||||
// if it's not a collection we are done
|
||||
if !isCollection {
|
||||
return &modelName, nil
|
||||
}
|
||||
|
||||
// XXX: This is not very elegant
|
||||
// We create an Item object referring to the given model
|
||||
models := ModelList{}
|
||||
mb := modelBuilder{Models: &models, Config: config}
|
||||
mb.addModelFrom(any)
|
||||
|
||||
elemTypeName := mb.getElementTypeName(modelName, "", st)
|
||||
item := new(Item)
|
||||
if mb.isPrimitiveType(elemTypeName) {
|
||||
mapped := mb.jsonSchemaType(elemTypeName)
|
||||
item.Type = &mapped
|
||||
} else {
|
||||
item.Ref = &elemTypeName
|
||||
}
|
||||
tmp := "array"
|
||||
return &tmp, item
|
||||
}
|
20
vendor/github.com/ghodss/yaml/.gitignore
generated
vendored
Normal file
20
vendor/github.com/ghodss/yaml/.gitignore
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
# OSX leaves these everywhere on SMB shares
|
||||
._*
|
||||
|
||||
# Eclipse files
|
||||
.classpath
|
||||
.project
|
||||
.settings/**
|
||||
|
||||
# Emacs save files
|
||||
*~
|
||||
|
||||
# Vim-related files
|
||||
[._]*.s[a-w][a-z]
|
||||
[._]s[a-w][a-z]
|
||||
*.un~
|
||||
Session.vim
|
||||
.netrwhist
|
||||
|
||||
# Go test binaries
|
||||
*.test
|
7
vendor/github.com/ghodss/yaml/.travis.yml
generated
vendored
Normal file
7
vendor/github.com/ghodss/yaml/.travis.yml
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
language: go
|
||||
go:
|
||||
- 1.3
|
||||
- 1.4
|
||||
script:
|
||||
- go test
|
||||
- go build
|
50
vendor/github.com/ghodss/yaml/LICENSE
generated
vendored
Normal file
50
vendor/github.com/ghodss/yaml/LICENSE
generated
vendored
Normal file
@ -0,0 +1,50 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014 Sam Ghods
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
|
||||
Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
121
vendor/github.com/ghodss/yaml/README.md
generated
vendored
Normal file
121
vendor/github.com/ghodss/yaml/README.md
generated
vendored
Normal file
@ -0,0 +1,121 @@
|
||||
# YAML marshaling and unmarshaling support for Go
|
||||
|
||||
[![Build Status](https://travis-ci.org/ghodss/yaml.svg)](https://travis-ci.org/ghodss/yaml)
|
||||
|
||||
## Introduction
|
||||
|
||||
A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs.
|
||||
|
||||
In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/).
|
||||
|
||||
## Compatibility
|
||||
|
||||
This package uses [go-yaml](https://github.com/go-yaml/yaml) and therefore supports [everything go-yaml supports](https://github.com/go-yaml/yaml#compatibility).
|
||||
|
||||
## Caveats
|
||||
|
||||
**Caveat #1:** When using `yaml.Marshal` and `yaml.Unmarshal`, binary data should NOT be preceded with the `!!binary` YAML tag. If you do, go-yaml will convert the binary data from base64 to native binary data, which is not compatible with JSON. You can still use binary in your YAML files though - just store them without the `!!binary` tag and decode the base64 in your code (e.g. in the custom JSON methods `MarshalJSON` and `UnmarshalJSON`). This also has the benefit that your YAML and your JSON binary data will be decoded exactly the same way. As an example:
|
||||
|
||||
```
|
||||
BAD:
|
||||
exampleKey: !!binary gIGC
|
||||
|
||||
GOOD:
|
||||
exampleKey: gIGC
|
||||
... and decode the base64 data in your code.
|
||||
```
|
||||
|
||||
**Caveat #2:** When using `YAMLToJSON` directly, maps with keys that are maps will result in an error since this is not supported by JSON. This error will occur in `Unmarshal` as well since you can't unmarshal map keys anyways since struct fields can't be keys.
|
||||
|
||||
## Installation and usage
|
||||
|
||||
To install, run:
|
||||
|
||||
```
|
||||
$ go get github.com/ghodss/yaml
|
||||
```
|
||||
|
||||
And import using:
|
||||
|
||||
```
|
||||
import "github.com/ghodss/yaml"
|
||||
```
|
||||
|
||||
Usage is very similar to the JSON library:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ghodss/yaml"
|
||||
)
|
||||
|
||||
type Person struct {
|
||||
Name string `json:"name"` // Affects YAML field names too.
|
||||
Age int `json:"age"`
|
||||
}
|
||||
|
||||
func main() {
|
||||
// Marshal a Person struct to YAML.
|
||||
p := Person{"John", 30}
|
||||
y, err := yaml.Marshal(p)
|
||||
if err != nil {
|
||||
fmt.Printf("err: %v\n", err)
|
||||
return
|
||||
}
|
||||
fmt.Println(string(y))
|
||||
/* Output:
|
||||
age: 30
|
||||
name: John
|
||||
*/
|
||||
|
||||
// Unmarshal the YAML back into a Person struct.
|
||||
var p2 Person
|
||||
err = yaml.Unmarshal(y, &p2)
|
||||
if err != nil {
|
||||
fmt.Printf("err: %v\n", err)
|
||||
return
|
||||
}
|
||||
fmt.Println(p2)
|
||||
/* Output:
|
||||
{John 30}
|
||||
*/
|
||||
}
|
||||
```
|
||||
|
||||
`yaml.YAMLToJSON` and `yaml.JSONToYAML` methods are also available:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ghodss/yaml"
|
||||
)
|
||||
|
||||
func main() {
|
||||
j := []byte(`{"name": "John", "age": 30}`)
|
||||
y, err := yaml.JSONToYAML(j)
|
||||
if err != nil {
|
||||
fmt.Printf("err: %v\n", err)
|
||||
return
|
||||
}
|
||||
fmt.Println(string(y))
|
||||
/* Output:
|
||||
name: John
|
||||
age: 30
|
||||
*/
|
||||
j2, err := yaml.YAMLToJSON(y)
|
||||
if err != nil {
|
||||
fmt.Printf("err: %v\n", err)
|
||||
return
|
||||
}
|
||||
fmt.Println(string(j2))
|
||||
/* Output:
|
||||
{"age":30,"name":"John"}
|
||||
*/
|
||||
}
|
||||
```
|
501
vendor/github.com/ghodss/yaml/fields.go
generated
vendored
Normal file
501
vendor/github.com/ghodss/yaml/fields.go
generated
vendored
Normal file
@ -0,0 +1,501 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
package yaml
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding"
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// indirect walks down v allocating pointers as needed,
|
||||
// until it gets to a non-pointer.
|
||||
// if it encounters an Unmarshaler, indirect stops and returns that.
|
||||
// if decodingNull is true, indirect stops at the last pointer so it can be set to nil.
|
||||
func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.TextUnmarshaler, reflect.Value) {
|
||||
// If v is a named type and is addressable,
|
||||
// start with its address, so that if the type has pointer methods,
|
||||
// we find them.
|
||||
if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() {
|
||||
v = v.Addr()
|
||||
}
|
||||
for {
|
||||
// Load value from interface, but only if the result will be
|
||||
// usefully addressable.
|
||||
if v.Kind() == reflect.Interface && !v.IsNil() {
|
||||
e := v.Elem()
|
||||
if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) {
|
||||
v = e
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if v.Kind() != reflect.Ptr {
|
||||
break
|
||||
}
|
||||
|
||||
if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() {
|
||||
break
|
||||
}
|
||||
if v.IsNil() {
|
||||
if v.CanSet() {
|
||||
v.Set(reflect.New(v.Type().Elem()))
|
||||
} else {
|
||||
v = reflect.New(v.Type().Elem())
|
||||
}
|
||||
}
|
||||
if v.Type().NumMethod() > 0 {
|
||||
if u, ok := v.Interface().(json.Unmarshaler); ok {
|
||||
return u, nil, reflect.Value{}
|
||||
}
|
||||
if u, ok := v.Interface().(encoding.TextUnmarshaler); ok {
|
||||
return nil, u, reflect.Value{}
|
||||
}
|
||||
}
|
||||
v = v.Elem()
|
||||
}
|
||||
return nil, nil, v
|
||||
}
|
||||
|
||||
// A field represents a single field found in a struct.
|
||||
type field struct {
|
||||
name string
|
||||
nameBytes []byte // []byte(name)
|
||||
equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent
|
||||
|
||||
tag bool
|
||||
index []int
|
||||
typ reflect.Type
|
||||
omitEmpty bool
|
||||
quoted bool
|
||||
}
|
||||
|
||||
func fillField(f field) field {
|
||||
f.nameBytes = []byte(f.name)
|
||||
f.equalFold = foldFunc(f.nameBytes)
|
||||
return f
|
||||
}
|
||||
|
||||
// byName sorts field by name, breaking ties with depth,
|
||||
// then breaking ties with "name came from json tag", then
|
||||
// breaking ties with index sequence.
|
||||
type byName []field
|
||||
|
||||
func (x byName) Len() int { return len(x) }
|
||||
|
||||
func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||
|
||||
func (x byName) Less(i, j int) bool {
|
||||
if x[i].name != x[j].name {
|
||||
return x[i].name < x[j].name
|
||||
}
|
||||
if len(x[i].index) != len(x[j].index) {
|
||||
return len(x[i].index) < len(x[j].index)
|
||||
}
|
||||
if x[i].tag != x[j].tag {
|
||||
return x[i].tag
|
||||
}
|
||||
return byIndex(x).Less(i, j)
|
||||
}
|
||||
|
||||
// byIndex sorts field by index sequence.
|
||||
type byIndex []field
|
||||
|
||||
func (x byIndex) Len() int { return len(x) }
|
||||
|
||||
func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||
|
||||
func (x byIndex) Less(i, j int) bool {
|
||||
for k, xik := range x[i].index {
|
||||
if k >= len(x[j].index) {
|
||||
return false
|
||||
}
|
||||
if xik != x[j].index[k] {
|
||||
return xik < x[j].index[k]
|
||||
}
|
||||
}
|
||||
return len(x[i].index) < len(x[j].index)
|
||||
}
|
||||
|
||||
// typeFields returns a list of fields that JSON should recognize for the given type.
|
||||
// The algorithm is breadth-first search over the set of structs to include - the top struct
|
||||
// and then any reachable anonymous structs.
|
||||
func typeFields(t reflect.Type) []field {
|
||||
// Anonymous fields to explore at the current level and the next.
|
||||
current := []field{}
|
||||
next := []field{{typ: t}}
|
||||
|
||||
// Count of queued names for current level and the next.
|
||||
count := map[reflect.Type]int{}
|
||||
nextCount := map[reflect.Type]int{}
|
||||
|
||||
// Types already visited at an earlier level.
|
||||
visited := map[reflect.Type]bool{}
|
||||
|
||||
// Fields found.
|
||||
var fields []field
|
||||
|
||||
for len(next) > 0 {
|
||||
current, next = next, current[:0]
|
||||
count, nextCount = nextCount, map[reflect.Type]int{}
|
||||
|
||||
for _, f := range current {
|
||||
if visited[f.typ] {
|
||||
continue
|
||||
}
|
||||
visited[f.typ] = true
|
||||
|
||||
// Scan f.typ for fields to include.
|
||||
for i := 0; i < f.typ.NumField(); i++ {
|
||||
sf := f.typ.Field(i)
|
||||
if sf.PkgPath != "" { // unexported
|
||||
continue
|
||||
}
|
||||
tag := sf.Tag.Get("json")
|
||||
if tag == "-" {
|
||||
continue
|
||||
}
|
||||
name, opts := parseTag(tag)
|
||||
if !isValidTag(name) {
|
||||
name = ""
|
||||
}
|
||||
index := make([]int, len(f.index)+1)
|
||||
copy(index, f.index)
|
||||
index[len(f.index)] = i
|
||||
|
||||
ft := sf.Type
|
||||
if ft.Name() == "" && ft.Kind() == reflect.Ptr {
|
||||
// Follow pointer.
|
||||
ft = ft.Elem()
|
||||
}
|
||||
|
||||
// Record found field and index sequence.
|
||||
if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
|
||||
tagged := name != ""
|
||||
if name == "" {
|
||||
name = sf.Name
|
||||
}
|
||||
fields = append(fields, fillField(field{
|
||||
name: name,
|
||||
tag: tagged,
|
||||
index: index,
|
||||
typ: ft,
|
||||
omitEmpty: opts.Contains("omitempty"),
|
||||
quoted: opts.Contains("string"),
|
||||
}))
|
||||
if count[f.typ] > 1 {
|
||||
// If there were multiple instances, add a second,
|
||||
// so that the annihilation code will see a duplicate.
|
||||
// It only cares about the distinction between 1 or 2,
|
||||
// so don't bother generating any more copies.
|
||||
fields = append(fields, fields[len(fields)-1])
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Record new anonymous struct to explore in next round.
|
||||
nextCount[ft]++
|
||||
if nextCount[ft] == 1 {
|
||||
next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft}))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sort.Sort(byName(fields))
|
||||
|
||||
// Delete all fields that are hidden by the Go rules for embedded fields,
|
||||
// except that fields with JSON tags are promoted.
|
||||
|
||||
// The fields are sorted in primary order of name, secondary order
|
||||
// of field index length. Loop over names; for each name, delete
|
||||
// hidden fields by choosing the one dominant field that survives.
|
||||
out := fields[:0]
|
||||
for advance, i := 0, 0; i < len(fields); i += advance {
|
||||
// One iteration per name.
|
||||
// Find the sequence of fields with the name of this first field.
|
||||
fi := fields[i]
|
||||
name := fi.name
|
||||
for advance = 1; i+advance < len(fields); advance++ {
|
||||
fj := fields[i+advance]
|
||||
if fj.name != name {
|
||||
break
|
||||
}
|
||||
}
|
||||
if advance == 1 { // Only one field with this name
|
||||
out = append(out, fi)
|
||||
continue
|
||||
}
|
||||
dominant, ok := dominantField(fields[i : i+advance])
|
||||
if ok {
|
||||
out = append(out, dominant)
|
||||
}
|
||||
}
|
||||
|
||||
fields = out
|
||||
sort.Sort(byIndex(fields))
|
||||
|
||||
return fields
|
||||
}
|
||||
|
||||
// dominantField looks through the fields, all of which are known to
|
||||
// have the same name, to find the single field that dominates the
|
||||
// others using Go's embedding rules, modified by the presence of
|
||||
// JSON tags. If there are multiple top-level fields, the boolean
|
||||
// will be false: This condition is an error in Go and we skip all
|
||||
// the fields.
|
||||
func dominantField(fields []field) (field, bool) {
|
||||
// The fields are sorted in increasing index-length order. The winner
|
||||
// must therefore be one with the shortest index length. Drop all
|
||||
// longer entries, which is easy: just truncate the slice.
|
||||
length := len(fields[0].index)
|
||||
tagged := -1 // Index of first tagged field.
|
||||
for i, f := range fields {
|
||||
if len(f.index) > length {
|
||||
fields = fields[:i]
|
||||
break
|
||||
}
|
||||
if f.tag {
|
||||
if tagged >= 0 {
|
||||
// Multiple tagged fields at the same level: conflict.
|
||||
// Return no field.
|
||||
return field{}, false
|
||||
}
|
||||
tagged = i
|
||||
}
|
||||
}
|
||||
if tagged >= 0 {
|
||||
return fields[tagged], true
|
||||
}
|
||||
// All remaining fields have the same length. If there's more than one,
|
||||
// we have a conflict (two fields named "X" at the same level) and we
|
||||
// return no field.
|
||||
if len(fields) > 1 {
|
||||
return field{}, false
|
||||
}
|
||||
return fields[0], true
|
||||
}
|
||||
|
||||
var fieldCache struct {
|
||||
sync.RWMutex
|
||||
m map[reflect.Type][]field
|
||||
}
|
||||
|
||||
// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
|
||||
func cachedTypeFields(t reflect.Type) []field {
|
||||
fieldCache.RLock()
|
||||
f := fieldCache.m[t]
|
||||
fieldCache.RUnlock()
|
||||
if f != nil {
|
||||
return f
|
||||
}
|
||||
|
||||
// Compute fields without lock.
|
||||
// Might duplicate effort but won't hold other computations back.
|
||||
f = typeFields(t)
|
||||
if f == nil {
|
||||
f = []field{}
|
||||
}
|
||||
|
||||
fieldCache.Lock()
|
||||
if fieldCache.m == nil {
|
||||
fieldCache.m = map[reflect.Type][]field{}
|
||||
}
|
||||
fieldCache.m[t] = f
|
||||
fieldCache.Unlock()
|
||||
return f
|
||||
}
|
||||
|
||||
func isValidTag(s string) bool {
|
||||
if s == "" {
|
||||
return false
|
||||
}
|
||||
for _, c := range s {
|
||||
switch {
|
||||
case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c):
|
||||
// Backslash and quote chars are reserved, but
|
||||
// otherwise any punctuation chars are allowed
|
||||
// in a tag name.
|
||||
default:
|
||||
if !unicode.IsLetter(c) && !unicode.IsDigit(c) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
const (
|
||||
caseMask = ^byte(0x20) // Mask to ignore case in ASCII.
|
||||
kelvin = '\u212a'
|
||||
smallLongEss = '\u017f'
|
||||
)
|
||||
|
||||
// foldFunc returns one of four different case folding equivalence
|
||||
// functions, from most general (and slow) to fastest:
|
||||
//
|
||||
// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8
|
||||
// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S')
|
||||
// 3) asciiEqualFold, no special, but includes non-letters (including _)
|
||||
// 4) simpleLetterEqualFold, no specials, no non-letters.
|
||||
//
|
||||
// The letters S and K are special because they map to 3 runes, not just 2:
|
||||
// * S maps to s and to U+017F 'ſ' Latin small letter long s
|
||||
// * k maps to K and to U+212A 'K' Kelvin sign
|
||||
// See http://play.golang.org/p/tTxjOc0OGo
|
||||
//
|
||||
// The returned function is specialized for matching against s and
|
||||
// should only be given s. It's not curried for performance reasons.
|
||||
func foldFunc(s []byte) func(s, t []byte) bool {
|
||||
nonLetter := false
|
||||
special := false // special letter
|
||||
for _, b := range s {
|
||||
if b >= utf8.RuneSelf {
|
||||
return bytes.EqualFold
|
||||
}
|
||||
upper := b & caseMask
|
||||
if upper < 'A' || upper > 'Z' {
|
||||
nonLetter = true
|
||||
} else if upper == 'K' || upper == 'S' {
|
||||
// See above for why these letters are special.
|
||||
special = true
|
||||
}
|
||||
}
|
||||
if special {
|
||||
return equalFoldRight
|
||||
}
|
||||
if nonLetter {
|
||||
return asciiEqualFold
|
||||
}
|
||||
return simpleLetterEqualFold
|
||||
}
|
||||
|
||||
// equalFoldRight is a specialization of bytes.EqualFold when s is
|
||||
// known to be all ASCII (including punctuation), but contains an 's',
|
||||
// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t.
|
||||
// See comments on foldFunc.
|
||||
func equalFoldRight(s, t []byte) bool {
|
||||
for _, sb := range s {
|
||||
if len(t) == 0 {
|
||||
return false
|
||||
}
|
||||
tb := t[0]
|
||||
if tb < utf8.RuneSelf {
|
||||
if sb != tb {
|
||||
sbUpper := sb & caseMask
|
||||
if 'A' <= sbUpper && sbUpper <= 'Z' {
|
||||
if sbUpper != tb&caseMask {
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
t = t[1:]
|
||||
continue
|
||||
}
|
||||
// sb is ASCII and t is not. t must be either kelvin
|
||||
// sign or long s; sb must be s, S, k, or K.
|
||||
tr, size := utf8.DecodeRune(t)
|
||||
switch sb {
|
||||
case 's', 'S':
|
||||
if tr != smallLongEss {
|
||||
return false
|
||||
}
|
||||
case 'k', 'K':
|
||||
if tr != kelvin {
|
||||
return false
|
||||
}
|
||||
default:
|
||||
return false
|
||||
}
|
||||
t = t[size:]
|
||||
|
||||
}
|
||||
if len(t) > 0 {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// asciiEqualFold is a specialization of bytes.EqualFold for use when
|
||||
// s is all ASCII (but may contain non-letters) and contains no
|
||||
// special-folding letters.
|
||||
// See comments on foldFunc.
|
||||
func asciiEqualFold(s, t []byte) bool {
|
||||
if len(s) != len(t) {
|
||||
return false
|
||||
}
|
||||
for i, sb := range s {
|
||||
tb := t[i]
|
||||
if sb == tb {
|
||||
continue
|
||||
}
|
||||
if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') {
|
||||
if sb&caseMask != tb&caseMask {
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// simpleLetterEqualFold is a specialization of bytes.EqualFold for
|
||||
// use when s is all ASCII letters (no underscores, etc) and also
|
||||
// doesn't contain 'k', 'K', 's', or 'S'.
|
||||
// See comments on foldFunc.
|
||||
func simpleLetterEqualFold(s, t []byte) bool {
|
||||
if len(s) != len(t) {
|
||||
return false
|
||||
}
|
||||
for i, b := range s {
|
||||
if b&caseMask != t[i]&caseMask {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// tagOptions is the string following a comma in a struct field's "json"
|
||||
// tag, or the empty string. It does not include the leading comma.
|
||||
type tagOptions string
|
||||
|
||||
// parseTag splits a struct field's json tag into its name and
|
||||
// comma-separated options.
|
||||
func parseTag(tag string) (string, tagOptions) {
|
||||
if idx := strings.Index(tag, ","); idx != -1 {
|
||||
return tag[:idx], tagOptions(tag[idx+1:])
|
||||
}
|
||||
return tag, tagOptions("")
|
||||
}
|
||||
|
||||
// Contains reports whether a comma-separated list of options
|
||||
// contains a particular substr flag. substr must be surrounded by a
|
||||
// string boundary or commas.
|
||||
func (o tagOptions) Contains(optionName string) bool {
|
||||
if len(o) == 0 {
|
||||
return false
|
||||
}
|
||||
s := string(o)
|
||||
for s != "" {
|
||||
var next string
|
||||
i := strings.Index(s, ",")
|
||||
if i >= 0 {
|
||||
s, next = s[:i], s[i+1:]
|
||||
}
|
||||
if s == optionName {
|
||||
return true
|
||||
}
|
||||
s = next
|
||||
}
|
||||
return false
|
||||
}
|
277
vendor/github.com/ghodss/yaml/yaml.go
generated
vendored
Normal file
277
vendor/github.com/ghodss/yaml/yaml.go
generated
vendored
Normal file
@ -0,0 +1,277 @@
|
||||
package yaml
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
// Marshals the object into JSON then converts JSON to YAML and returns the
|
||||
// YAML.
|
||||
func Marshal(o interface{}) ([]byte, error) {
|
||||
j, err := json.Marshal(o)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error marshaling into JSON: %v", err)
|
||||
}
|
||||
|
||||
y, err := JSONToYAML(j)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error converting JSON to YAML: %v", err)
|
||||
}
|
||||
|
||||
return y, nil
|
||||
}
|
||||
|
||||
// Converts YAML to JSON then uses JSON to unmarshal into an object.
|
||||
func Unmarshal(y []byte, o interface{}) error {
|
||||
vo := reflect.ValueOf(o)
|
||||
j, err := yamlToJSON(y, &vo)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error converting YAML to JSON: %v", err)
|
||||
}
|
||||
|
||||
err = json.Unmarshal(j, o)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error unmarshaling JSON: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert JSON to YAML.
|
||||
func JSONToYAML(j []byte) ([]byte, error) {
|
||||
// Convert the JSON to an object.
|
||||
var jsonObj interface{}
|
||||
// We are using yaml.Unmarshal here (instead of json.Unmarshal) because the
|
||||
// Go JSON library doesn't try to pick the right number type (int, float,
|
||||
// etc.) when unmarshalling to interface{}, it just picks float64
|
||||
// universally. go-yaml does go through the effort of picking the right
|
||||
// number type, so we can preserve number type throughout this process.
|
||||
err := yaml.Unmarshal(j, &jsonObj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Marshal this object into YAML.
|
||||
return yaml.Marshal(jsonObj)
|
||||
}
|
||||
|
||||
// Convert YAML to JSON. Since JSON is a subset of YAML, passing JSON through
|
||||
// this method should be a no-op.
|
||||
//
|
||||
// Things YAML can do that are not supported by JSON:
|
||||
// * In YAML you can have binary and null keys in your maps. These are invalid
|
||||
// in JSON. (int and float keys are converted to strings.)
|
||||
// * Binary data in YAML with the !!binary tag is not supported. If you want to
|
||||
// use binary data with this library, encode the data as base64 as usual but do
|
||||
// not use the !!binary tag in your YAML. This will ensure the original base64
|
||||
// encoded data makes it all the way through to the JSON.
|
||||
func YAMLToJSON(y []byte) ([]byte, error) {
|
||||
return yamlToJSON(y, nil)
|
||||
}
|
||||
|
||||
func yamlToJSON(y []byte, jsonTarget *reflect.Value) ([]byte, error) {
|
||||
// Convert the YAML to an object.
|
||||
var yamlObj interface{}
|
||||
err := yaml.Unmarshal(y, &yamlObj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// YAML objects are not completely compatible with JSON objects (e.g. you
|
||||
// can have non-string keys in YAML). So, convert the YAML-compatible object
|
||||
// to a JSON-compatible object, failing with an error if irrecoverable
|
||||
// incompatibilties happen along the way.
|
||||
jsonObj, err := convertToJSONableObject(yamlObj, jsonTarget)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Convert this object to JSON and return the data.
|
||||
return json.Marshal(jsonObj)
|
||||
}
|
||||
|
||||
func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (interface{}, error) {
|
||||
var err error
|
||||
|
||||
// Resolve jsonTarget to a concrete value (i.e. not a pointer or an
|
||||
// interface). We pass decodingNull as false because we're not actually
|
||||
// decoding into the value, we're just checking if the ultimate target is a
|
||||
// string.
|
||||
if jsonTarget != nil {
|
||||
ju, tu, pv := indirect(*jsonTarget, false)
|
||||
// We have a JSON or Text Umarshaler at this level, so we can't be trying
|
||||
// to decode into a string.
|
||||
if ju != nil || tu != nil {
|
||||
jsonTarget = nil
|
||||
} else {
|
||||
jsonTarget = &pv
|
||||
}
|
||||
}
|
||||
|
||||
// If yamlObj is a number or a boolean, check if jsonTarget is a string -
|
||||
// if so, coerce. Else return normal.
|
||||
// If yamlObj is a map or array, find the field that each key is
|
||||
// unmarshaling to, and when you recurse pass the reflect.Value for that
|
||||
// field back into this function.
|
||||
switch typedYAMLObj := yamlObj.(type) {
|
||||
case map[interface{}]interface{}:
|
||||
// JSON does not support arbitrary keys in a map, so we must convert
|
||||
// these keys to strings.
|
||||
//
|
||||
// From my reading of go-yaml v2 (specifically the resolve function),
|
||||
// keys can only have the types string, int, int64, float64, binary
|
||||
// (unsupported), or null (unsupported).
|
||||
strMap := make(map[string]interface{})
|
||||
for k, v := range typedYAMLObj {
|
||||
// Resolve the key to a string first.
|
||||
var keyString string
|
||||
switch typedKey := k.(type) {
|
||||
case string:
|
||||
keyString = typedKey
|
||||
case int:
|
||||
keyString = strconv.Itoa(typedKey)
|
||||
case int64:
|
||||
// go-yaml will only return an int64 as a key if the system
|
||||
// architecture is 32-bit and the key's value is between 32-bit
|
||||
// and 64-bit. Otherwise the key type will simply be int.
|
||||
keyString = strconv.FormatInt(typedKey, 10)
|
||||
case float64:
|
||||
// Stolen from go-yaml to use the same conversion to string as
|
||||
// the go-yaml library uses to convert float to string when
|
||||
// Marshaling.
|
||||
s := strconv.FormatFloat(typedKey, 'g', -1, 32)
|
||||
switch s {
|
||||
case "+Inf":
|
||||
s = ".inf"
|
||||
case "-Inf":
|
||||
s = "-.inf"
|
||||
case "NaN":
|
||||
s = ".nan"
|
||||
}
|
||||
keyString = s
|
||||
case bool:
|
||||
if typedKey {
|
||||
keyString = "true"
|
||||
} else {
|
||||
keyString = "false"
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("Unsupported map key of type: %s, key: %+#v, value: %+#v",
|
||||
reflect.TypeOf(k), k, v)
|
||||
}
|
||||
|
||||
// jsonTarget should be a struct or a map. If it's a struct, find
|
||||
// the field it's going to map to and pass its reflect.Value. If
|
||||
// it's a map, find the element type of the map and pass the
|
||||
// reflect.Value created from that type. If it's neither, just pass
|
||||
// nil - JSON conversion will error for us if it's a real issue.
|
||||
if jsonTarget != nil {
|
||||
t := *jsonTarget
|
||||
if t.Kind() == reflect.Struct {
|
||||
keyBytes := []byte(keyString)
|
||||
// Find the field that the JSON library would use.
|
||||
var f *field
|
||||
fields := cachedTypeFields(t.Type())
|
||||
for i := range fields {
|
||||
ff := &fields[i]
|
||||
if bytes.Equal(ff.nameBytes, keyBytes) {
|
||||
f = ff
|
||||
break
|
||||
}
|
||||
// Do case-insensitive comparison.
|
||||
if f == nil && ff.equalFold(ff.nameBytes, keyBytes) {
|
||||
f = ff
|
||||
}
|
||||
}
|
||||
if f != nil {
|
||||
// Find the reflect.Value of the most preferential
|
||||
// struct field.
|
||||
jtf := t.Field(f.index[0])
|
||||
strMap[keyString], err = convertToJSONableObject(v, &jtf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
continue
|
||||
}
|
||||
} else if t.Kind() == reflect.Map {
|
||||
// Create a zero value of the map's element type to use as
|
||||
// the JSON target.
|
||||
jtv := reflect.Zero(t.Type().Elem())
|
||||
strMap[keyString], err = convertToJSONableObject(v, &jtv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
strMap[keyString], err = convertToJSONableObject(v, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return strMap, nil
|
||||
case []interface{}:
|
||||
// We need to recurse into arrays in case there are any
|
||||
// map[interface{}]interface{}'s inside and to convert any
|
||||
// numbers to strings.
|
||||
|
||||
// If jsonTarget is a slice (which it really should be), find the
|
||||
// thing it's going to map to. If it's not a slice, just pass nil
|
||||
// - JSON conversion will error for us if it's a real issue.
|
||||
var jsonSliceElemValue *reflect.Value
|
||||
if jsonTarget != nil {
|
||||
t := *jsonTarget
|
||||
if t.Kind() == reflect.Slice {
|
||||
// By default slices point to nil, but we need a reflect.Value
|
||||
// pointing to a value of the slice type, so we create one here.
|
||||
ev := reflect.Indirect(reflect.New(t.Type().Elem()))
|
||||
jsonSliceElemValue = &ev
|
||||
}
|
||||
}
|
||||
|
||||
// Make and use a new array.
|
||||
arr := make([]interface{}, len(typedYAMLObj))
|
||||
for i, v := range typedYAMLObj {
|
||||
arr[i], err = convertToJSONableObject(v, jsonSliceElemValue)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return arr, nil
|
||||
default:
|
||||
// If the target type is a string and the YAML type is a number,
|
||||
// convert the YAML type to a string.
|
||||
if jsonTarget != nil && (*jsonTarget).Kind() == reflect.String {
|
||||
// Based on my reading of go-yaml, it may return int, int64,
|
||||
// float64, or uint64.
|
||||
var s string
|
||||
switch typedVal := typedYAMLObj.(type) {
|
||||
case int:
|
||||
s = strconv.FormatInt(int64(typedVal), 10)
|
||||
case int64:
|
||||
s = strconv.FormatInt(typedVal, 10)
|
||||
case float64:
|
||||
s = strconv.FormatFloat(typedVal, 'g', -1, 32)
|
||||
case uint64:
|
||||
s = strconv.FormatUint(typedVal, 10)
|
||||
case bool:
|
||||
if typedVal {
|
||||
s = "true"
|
||||
} else {
|
||||
s = "false"
|
||||
}
|
||||
}
|
||||
if len(s) > 0 {
|
||||
yamlObj = interface{}(s)
|
||||
}
|
||||
}
|
||||
return yamlObj, nil
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
1
vendor/github.com/go-openapi/analysis/.drone.sec
generated
vendored
Normal file
1
vendor/github.com/go-openapi/analysis/.drone.sec
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
eyJhbGciOiJSU0EtT0FFUCIsImVuYyI6IkExMjhHQ00ifQ.gU9bGHDmyTBt_nmOh7gxp_8MUeL4ckrzh3yys59Crqypnp0Rq52Q4B7T8_4u4elt-KF3jwHXfIjTWByyce8yS6FiYfKPVuCF_7qbSC5ES84Hqs0uigZP5qGFFYXC5EZlSw7giMgLrqfjbOLPhCpnTlWjfvlRssVbvWx4I6eXQEp5EaymJn2K-pgbKLaMpgkFcDUGxDeIkK9X9XQ2sYicoRPdRZgIZfMXBL05hphlCorXp1TCNAYaoI3qDmerzDGcT5YzIRKrlW2TUEy0EI4iySCbTpdlWhj3S1mt03P9Hmo8TkTfAG2Eu6XYs59RoqXCoGZNXHfsPqV5hyLpINNSzg.4VNQ5QHe2Ig7Giam.1fN53mPplTSOg0Mr8fwNH6FVjf8DNc-YyHhaET5IK4LeY0FPoyQZIjEEIqXAgzdKJ7uNfjf_dqLe2hRd-QqYjPacIHqI8FHWTqsDHC9maL3gouDxHZ3TsYVtCnO5iXrqZXpSWjDjEHKR3PQdUbBEIpEDBgkhkAN2eUHZuD1Hjy65SMWNX0eQ2CbIEgcPxHnoeGXx8k1c0VHZuXecuYYJPyGG88UWQD0aIusIR99za5cIIflT7pfvXQMuLLCcy5Y6RWkzLaNBg7R1GOZvCiOi7jmjYSKMdIxHELC4uO37n_UenvE3KWqjt9i82jbq6XMNoN6Gcwq0Qwl7PN1rtJRK6tlczm0G24Tq8t94cDLmEz2AAHdQ9T0d7rz3hS66BK4h49D_1HYoq1ZQ9lOT_Ph0TtVFjd0-_wR3k5h5A1_0azFRt_udYbn_v7-Wbga8CjGnaIpHz5hWTrutP4euorJAyyiANnOUVDeJNZYX4D-zdjT4Yoplk0mU5zo4Uo-oKDS_Nirr71uaZHcvh3jlryi0vMiDCg61CI1i1ulHeTiT65G2zDR4byOcL4cCa_vTRnmjK-2I3arPyYQfDVmXicKo8pP8RghdQ9c8ad5XkTbxOodcONU_yVqoKr8JQDuS6FNx3ck.0btSTYMPtKCu86bQsBX5Ew
|
36
vendor/github.com/go-openapi/analysis/.drone.yml
generated
vendored
Normal file
36
vendor/github.com/go-openapi/analysis/.drone.yml
generated
vendored
Normal file
@ -0,0 +1,36 @@
|
||||
clone:
|
||||
path: github.com/go-openapi/analysis
|
||||
|
||||
matrix:
|
||||
GO_VERSION:
|
||||
- "1.6"
|
||||
|
||||
build:
|
||||
integration:
|
||||
image: golang:$$GO_VERSION
|
||||
pull: true
|
||||
commands:
|
||||
- go get -u github.com/stretchr/testify/assert
|
||||
- go get -u gopkg.in/yaml.v2
|
||||
- go get -u github.com/go-openapi/swag
|
||||
- go get -u github.com/go-openapi/jsonpointer
|
||||
- go get -u github.com/go-openapi/spec
|
||||
- go get -u github.com/go-openapi/loads/fmts
|
||||
- go test -race ./...
|
||||
- go test -v -cover -coverprofile=coverage.out -covermode=count ./...
|
||||
|
||||
notify:
|
||||
slack:
|
||||
channel: bots
|
||||
webhook_url: $$SLACK_URL
|
||||
username: drone
|
||||
|
||||
publish:
|
||||
coverage:
|
||||
server: https://coverage.vmware.run
|
||||
token: $$GITHUB_TOKEN
|
||||
# threshold: 70
|
||||
# must_increase: true
|
||||
when:
|
||||
matrix:
|
||||
GO_VERSION: "1.6"
|
2
vendor/github.com/go-openapi/analysis/.gitignore
generated
vendored
Normal file
2
vendor/github.com/go-openapi/analysis/.gitignore
generated
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
secrets.yml
|
||||
coverage.out
|
12
vendor/github.com/go-openapi/analysis/.pullapprove.yml
generated
vendored
Normal file
12
vendor/github.com/go-openapi/analysis/.pullapprove.yml
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
approve_by_comment: true
|
||||
approve_regex: '^(:shipit:|:\+1:|\+1|LGTM|lgtm|Approved)'
|
||||
reject_regex: ^[Rr]ejected
|
||||
reset_on_push: false
|
||||
reviewers:
|
||||
members:
|
||||
- casualjim
|
||||
- frapposelli
|
||||
- vburenin
|
||||
- pytlesk4
|
||||
name: pullapprove
|
||||
required: 1
|
74
vendor/github.com/go-openapi/analysis/CODE_OF_CONDUCT.md
generated
vendored
Normal file
74
vendor/github.com/go-openapi/analysis/CODE_OF_CONDUCT.md
generated
vendored
Normal file
@ -0,0 +1,74 @@
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
In the interest of fostering an open and welcoming environment, we as
|
||||
contributors and maintainers pledge to making participation in our project and
|
||||
our community a harassment-free experience for everyone, regardless of age, body
|
||||
size, disability, ethnicity, gender identity and expression, level of experience,
|
||||
nationality, personal appearance, race, religion, or sexual identity and
|
||||
orientation.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to creating a positive environment
|
||||
include:
|
||||
|
||||
* Using welcoming and inclusive language
|
||||
* Being respectful of differing viewpoints and experiences
|
||||
* Gracefully accepting constructive criticism
|
||||
* Focusing on what is best for the community
|
||||
* Showing empathy towards other community members
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery and unwelcome sexual attention or
|
||||
advances
|
||||
* Trolling, insulting/derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or electronic
|
||||
address, without explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Our Responsibilities
|
||||
|
||||
Project maintainers are responsible for clarifying the standards of acceptable
|
||||
behavior and are expected to take appropriate and fair corrective action in
|
||||
response to any instances of unacceptable behavior.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or
|
||||
reject comments, commits, code, wiki edits, issues, and other contributions
|
||||
that are not aligned to this Code of Conduct, or to ban temporarily or
|
||||
permanently any contributor for other behaviors that they deem inappropriate,
|
||||
threatening, offensive, or harmful.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies both within project spaces and in public spaces
|
||||
when an individual is representing the project or its community. Examples of
|
||||
representing a project or community include using an official project e-mail
|
||||
address, posting via an official social media account, or acting as an appointed
|
||||
representative at an online or offline event. Representation of a project may be
|
||||
further defined and clarified by project maintainers.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported by contacting the project team at ivan+abuse@flanders.co.nz. All
|
||||
complaints will be reviewed and investigated and will result in a response that
|
||||
is deemed necessary and appropriate to the circumstances. The project team is
|
||||
obligated to maintain confidentiality with regard to the reporter of an incident.
|
||||
Further details of specific enforcement policies may be posted separately.
|
||||
|
||||
Project maintainers who do not follow or enforce the Code of Conduct in good
|
||||
faith may face temporary or permanent repercussions as determined by other
|
||||
members of the project's leadership.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
|
||||
available at [http://contributor-covenant.org/version/1/4][version]
|
||||
|
||||
[homepage]: http://contributor-covenant.org
|
||||
[version]: http://contributor-covenant.org/version/1/4/
|
202
vendor/github.com/go-openapi/analysis/LICENSE
generated
vendored
Normal file
202
vendor/github.com/go-openapi/analysis/LICENSE
generated
vendored
Normal file
@ -0,0 +1,202 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
6
vendor/github.com/go-openapi/analysis/README.md
generated
vendored
Normal file
6
vendor/github.com/go-openapi/analysis/README.md
generated
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
# OpenAPI initiative analysis [![Build Status](https://ci.vmware.run/api/badges/go-openapi/analysis/status.svg)](https://ci.vmware.run/go-openapi/analysis) [![Coverage](https://coverage.vmware.run/badges/go-openapi/analysis/coverage.svg)](https://coverage.vmware.run/go-openapi/analysis) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io)
|
||||
|
||||
[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/analysis/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/analysis?status.svg)](http://godoc.org/github.com/go-openapi/analysis)
|
||||
|
||||
|
||||
A foundational library to analyze an OAI specification document for easier reasoning about the content.
|
614
vendor/github.com/go-openapi/analysis/analyzer.go
generated
vendored
Normal file
614
vendor/github.com/go-openapi/analysis/analyzer.go
generated
vendored
Normal file
@ -0,0 +1,614 @@
|
||||
// Copyright 2015 go-swagger maintainers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package analysis
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
slashpath "path"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/go-openapi/jsonpointer"
|
||||
"github.com/go-openapi/spec"
|
||||
"github.com/go-openapi/swag"
|
||||
)
|
||||
|
||||
type referenceAnalysis struct {
|
||||
schemas map[string]spec.Ref
|
||||
responses map[string]spec.Ref
|
||||
parameters map[string]spec.Ref
|
||||
items map[string]spec.Ref
|
||||
allRefs map[string]spec.Ref
|
||||
referenced struct {
|
||||
schemas map[string]SchemaRef
|
||||
responses map[string]*spec.Response
|
||||
parameters map[string]*spec.Parameter
|
||||
}
|
||||
}
|
||||
|
||||
func (r *referenceAnalysis) addRef(key string, ref spec.Ref) {
|
||||
r.allRefs["#"+key] = ref
|
||||
}
|
||||
|
||||
func (r *referenceAnalysis) addItemsRef(key string, items *spec.Items) {
|
||||
r.items["#"+key] = items.Ref
|
||||
r.addRef(key, items.Ref)
|
||||
}
|
||||
|
||||
func (r *referenceAnalysis) addSchemaRef(key string, ref SchemaRef) {
|
||||
r.schemas["#"+key] = ref.Schema.Ref
|
||||
r.addRef(key, ref.Schema.Ref)
|
||||
}
|
||||
|
||||
func (r *referenceAnalysis) addResponseRef(key string, resp *spec.Response) {
|
||||
r.responses["#"+key] = resp.Ref
|
||||
r.addRef(key, resp.Ref)
|
||||
}
|
||||
|
||||
func (r *referenceAnalysis) addParamRef(key string, param *spec.Parameter) {
|
||||
r.parameters["#"+key] = param.Ref
|
||||
r.addRef(key, param.Ref)
|
||||
}
|
||||
|
||||
// New takes a swagger spec object and returns an analyzed spec document.
|
||||
// The analyzed document contains a number of indices that make it easier to
|
||||
// reason about semantics of a swagger specification for use in code generation
|
||||
// or validation etc.
|
||||
func New(doc *spec.Swagger) *Spec {
|
||||
a := &Spec{
|
||||
spec: doc,
|
||||
consumes: make(map[string]struct{}, 150),
|
||||
produces: make(map[string]struct{}, 150),
|
||||
authSchemes: make(map[string]struct{}, 150),
|
||||
operations: make(map[string]map[string]*spec.Operation, 150),
|
||||
allSchemas: make(map[string]SchemaRef, 150),
|
||||
allOfs: make(map[string]SchemaRef, 150),
|
||||
references: referenceAnalysis{
|
||||
schemas: make(map[string]spec.Ref, 150),
|
||||
responses: make(map[string]spec.Ref, 150),
|
||||
parameters: make(map[string]spec.Ref, 150),
|
||||
items: make(map[string]spec.Ref, 150),
|
||||
allRefs: make(map[string]spec.Ref, 150),
|
||||
},
|
||||
}
|
||||
a.references.referenced.schemas = make(map[string]SchemaRef, 150)
|
||||
a.references.referenced.responses = make(map[string]*spec.Response, 150)
|
||||
a.references.referenced.parameters = make(map[string]*spec.Parameter, 150)
|
||||
a.initialize()
|
||||
return a
|
||||
}
|
||||
|
||||
// Spec takes a swagger spec object and turns it into a registry
|
||||
// with a bunch of utility methods to act on the information in the spec
|
||||
type Spec struct {
|
||||
spec *spec.Swagger
|
||||
consumes map[string]struct{}
|
||||
produces map[string]struct{}
|
||||
authSchemes map[string]struct{}
|
||||
operations map[string]map[string]*spec.Operation
|
||||
references referenceAnalysis
|
||||
allSchemas map[string]SchemaRef
|
||||
allOfs map[string]SchemaRef
|
||||
}
|
||||
|
||||
func (s *Spec) initialize() {
|
||||
for _, c := range s.spec.Consumes {
|
||||
s.consumes[c] = struct{}{}
|
||||
}
|
||||
for _, c := range s.spec.Produces {
|
||||
s.produces[c] = struct{}{}
|
||||
}
|
||||
for _, ss := range s.spec.Security {
|
||||
for k := range ss {
|
||||
s.authSchemes[k] = struct{}{}
|
||||
}
|
||||
}
|
||||
for path, pathItem := range s.AllPaths() {
|
||||
s.analyzeOperations(path, &pathItem)
|
||||
}
|
||||
|
||||
for name, parameter := range s.spec.Parameters {
|
||||
refPref := slashpath.Join("/parameters", jsonpointer.Escape(name))
|
||||
if parameter.Items != nil {
|
||||
s.analyzeItems("items", parameter.Items, refPref)
|
||||
}
|
||||
if parameter.In == "body" && parameter.Schema != nil {
|
||||
s.analyzeSchema("schema", *parameter.Schema, refPref)
|
||||
}
|
||||
}
|
||||
|
||||
for name, response := range s.spec.Responses {
|
||||
refPref := slashpath.Join("/responses", jsonpointer.Escape(name))
|
||||
for _, v := range response.Headers {
|
||||
if v.Items != nil {
|
||||
s.analyzeItems("items", v.Items, refPref)
|
||||
}
|
||||
}
|
||||
if response.Schema != nil {
|
||||
s.analyzeSchema("schema", *response.Schema, refPref)
|
||||
}
|
||||
}
|
||||
|
||||
for name, schema := range s.spec.Definitions {
|
||||
s.analyzeSchema(name, schema, "/definitions")
|
||||
}
|
||||
// TODO: after analyzing all things and flattening schemas etc
|
||||
// resolve all the collected references to their final representations
|
||||
// best put in a separate method because this could get expensive
|
||||
}
|
||||
|
||||
func (s *Spec) analyzeOperations(path string, pi *spec.PathItem) {
|
||||
// TODO: resolve refs here?
|
||||
op := pi
|
||||
s.analyzeOperation("GET", path, op.Get)
|
||||
s.analyzeOperation("PUT", path, op.Put)
|
||||
s.analyzeOperation("POST", path, op.Post)
|
||||
s.analyzeOperation("PATCH", path, op.Patch)
|
||||
s.analyzeOperation("DELETE", path, op.Delete)
|
||||
s.analyzeOperation("HEAD", path, op.Head)
|
||||
s.analyzeOperation("OPTIONS", path, op.Options)
|
||||
for i, param := range op.Parameters {
|
||||
refPref := slashpath.Join("/paths", jsonpointer.Escape(path), "parameters", strconv.Itoa(i))
|
||||
if param.Ref.String() != "" {
|
||||
s.references.addParamRef(refPref, ¶m)
|
||||
}
|
||||
if param.Items != nil {
|
||||
s.analyzeItems("items", param.Items, refPref)
|
||||
}
|
||||
if param.Schema != nil {
|
||||
s.analyzeSchema("schema", *param.Schema, refPref)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Spec) analyzeItems(name string, items *spec.Items, prefix string) {
|
||||
if items == nil {
|
||||
return
|
||||
}
|
||||
refPref := slashpath.Join(prefix, name)
|
||||
s.analyzeItems(name, items.Items, refPref)
|
||||
if items.Ref.String() != "" {
|
||||
s.references.addItemsRef(refPref, items)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Spec) analyzeOperation(method, path string, op *spec.Operation) {
|
||||
if op == nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, c := range op.Consumes {
|
||||
s.consumes[c] = struct{}{}
|
||||
}
|
||||
for _, c := range op.Produces {
|
||||
s.produces[c] = struct{}{}
|
||||
}
|
||||
for _, ss := range op.Security {
|
||||
for k := range ss {
|
||||
s.authSchemes[k] = struct{}{}
|
||||
}
|
||||
}
|
||||
if _, ok := s.operations[method]; !ok {
|
||||
s.operations[method] = make(map[string]*spec.Operation)
|
||||
}
|
||||
s.operations[method][path] = op
|
||||
prefix := slashpath.Join("/paths", jsonpointer.Escape(path), strings.ToLower(method))
|
||||
for i, param := range op.Parameters {
|
||||
refPref := slashpath.Join(prefix, "parameters", strconv.Itoa(i))
|
||||
if param.Ref.String() != "" {
|
||||
s.references.addParamRef(refPref, ¶m)
|
||||
}
|
||||
s.analyzeItems("items", param.Items, refPref)
|
||||
if param.In == "body" && param.Schema != nil {
|
||||
s.analyzeSchema("schema", *param.Schema, refPref)
|
||||
}
|
||||
}
|
||||
if op.Responses != nil {
|
||||
if op.Responses.Default != nil {
|
||||
refPref := slashpath.Join(prefix, "responses", "default")
|
||||
if op.Responses.Default.Ref.String() != "" {
|
||||
s.references.addResponseRef(refPref, op.Responses.Default)
|
||||
}
|
||||
for _, v := range op.Responses.Default.Headers {
|
||||
s.analyzeItems("items", v.Items, refPref)
|
||||
}
|
||||
if op.Responses.Default.Schema != nil {
|
||||
s.analyzeSchema("schema", *op.Responses.Default.Schema, refPref)
|
||||
}
|
||||
}
|
||||
for k, res := range op.Responses.StatusCodeResponses {
|
||||
refPref := slashpath.Join(prefix, "responses", strconv.Itoa(k))
|
||||
if res.Ref.String() != "" {
|
||||
s.references.addResponseRef(refPref, &res)
|
||||
}
|
||||
for _, v := range res.Headers {
|
||||
s.analyzeItems("items", v.Items, refPref)
|
||||
}
|
||||
if res.Schema != nil {
|
||||
s.analyzeSchema("schema", *res.Schema, refPref)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Spec) analyzeSchema(name string, schema spec.Schema, prefix string) {
|
||||
refURI := slashpath.Join(prefix, jsonpointer.Escape(name))
|
||||
schRef := SchemaRef{
|
||||
Name: name,
|
||||
Schema: &schema,
|
||||
Ref: spec.MustCreateRef("#" + refURI),
|
||||
}
|
||||
s.allSchemas["#"+refURI] = schRef
|
||||
if schema.Ref.String() != "" {
|
||||
s.references.addSchemaRef(refURI, schRef)
|
||||
}
|
||||
for k, v := range schema.Definitions {
|
||||
s.analyzeSchema(k, v, slashpath.Join(refURI, "definitions"))
|
||||
}
|
||||
for k, v := range schema.Properties {
|
||||
s.analyzeSchema(k, v, slashpath.Join(refURI, "properties"))
|
||||
}
|
||||
for k, v := range schema.PatternProperties {
|
||||
s.analyzeSchema(k, v, slashpath.Join(refURI, "patternProperties"))
|
||||
}
|
||||
for i, v := range schema.AllOf {
|
||||
s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "allOf"))
|
||||
}
|
||||
if len(schema.AllOf) > 0 {
|
||||
s.allOfs["#"+refURI] = SchemaRef{Name: name, Schema: &schema, Ref: spec.MustCreateRef("#" + refURI)}
|
||||
}
|
||||
for i, v := range schema.AnyOf {
|
||||
s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "anyOf"))
|
||||
}
|
||||
for i, v := range schema.OneOf {
|
||||
s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "oneOf"))
|
||||
}
|
||||
if schema.Not != nil {
|
||||
s.analyzeSchema("not", *schema.Not, refURI)
|
||||
}
|
||||
if schema.AdditionalProperties != nil && schema.AdditionalProperties.Schema != nil {
|
||||
s.analyzeSchema("additionalProperties", *schema.AdditionalProperties.Schema, refURI)
|
||||
}
|
||||
if schema.AdditionalItems != nil && schema.AdditionalItems.Schema != nil {
|
||||
s.analyzeSchema("additionalItems", *schema.AdditionalItems.Schema, refURI)
|
||||
}
|
||||
if schema.Items != nil {
|
||||
if schema.Items.Schema != nil {
|
||||
s.analyzeSchema("items", *schema.Items.Schema, refURI)
|
||||
}
|
||||
for i, sch := range schema.Items.Schemas {
|
||||
s.analyzeSchema(strconv.Itoa(i), sch, slashpath.Join(refURI, "items"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SecurityRequirement is a representation of a security requirement for an operation
|
||||
type SecurityRequirement struct {
|
||||
Name string
|
||||
Scopes []string
|
||||
}
|
||||
|
||||
// SecurityRequirementsFor gets the security requirements for the operation
|
||||
func (s *Spec) SecurityRequirementsFor(operation *spec.Operation) []SecurityRequirement {
|
||||
if s.spec.Security == nil && operation.Security == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
schemes := s.spec.Security
|
||||
if operation.Security != nil {
|
||||
schemes = operation.Security
|
||||
}
|
||||
|
||||
unique := make(map[string]SecurityRequirement)
|
||||
for _, scheme := range schemes {
|
||||
for k, v := range scheme {
|
||||
if _, ok := unique[k]; !ok {
|
||||
unique[k] = SecurityRequirement{Name: k, Scopes: v}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var result []SecurityRequirement
|
||||
for _, v := range unique {
|
||||
result = append(result, v)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// SecurityDefinitionsFor gets the matching security definitions for a set of requirements
|
||||
func (s *Spec) SecurityDefinitionsFor(operation *spec.Operation) map[string]spec.SecurityScheme {
|
||||
requirements := s.SecurityRequirementsFor(operation)
|
||||
if len(requirements) == 0 {
|
||||
return nil
|
||||
}
|
||||
result := make(map[string]spec.SecurityScheme)
|
||||
for _, v := range requirements {
|
||||
if definition, ok := s.spec.SecurityDefinitions[v.Name]; ok {
|
||||
if definition != nil {
|
||||
result[v.Name] = *definition
|
||||
}
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// ConsumesFor gets the mediatypes for the operation
|
||||
func (s *Spec) ConsumesFor(operation *spec.Operation) []string {
|
||||
|
||||
if len(operation.Consumes) == 0 {
|
||||
cons := make(map[string]struct{}, len(s.spec.Consumes))
|
||||
for _, k := range s.spec.Consumes {
|
||||
cons[k] = struct{}{}
|
||||
}
|
||||
return s.structMapKeys(cons)
|
||||
}
|
||||
|
||||
cons := make(map[string]struct{}, len(operation.Consumes))
|
||||
for _, c := range operation.Consumes {
|
||||
cons[c] = struct{}{}
|
||||
}
|
||||
return s.structMapKeys(cons)
|
||||
}
|
||||
|
||||
// ProducesFor gets the mediatypes for the operation
|
||||
func (s *Spec) ProducesFor(operation *spec.Operation) []string {
|
||||
if len(operation.Produces) == 0 {
|
||||
prod := make(map[string]struct{}, len(s.spec.Produces))
|
||||
for _, k := range s.spec.Produces {
|
||||
prod[k] = struct{}{}
|
||||
}
|
||||
return s.structMapKeys(prod)
|
||||
}
|
||||
|
||||
prod := make(map[string]struct{}, len(operation.Produces))
|
||||
for _, c := range operation.Produces {
|
||||
prod[c] = struct{}{}
|
||||
}
|
||||
return s.structMapKeys(prod)
|
||||
}
|
||||
|
||||
func mapKeyFromParam(param *spec.Parameter) string {
|
||||
return fmt.Sprintf("%s#%s", param.In, fieldNameFromParam(param))
|
||||
}
|
||||
|
||||
func fieldNameFromParam(param *spec.Parameter) string {
|
||||
if nm, ok := param.Extensions.GetString("go-name"); ok {
|
||||
return nm
|
||||
}
|
||||
return swag.ToGoName(param.Name)
|
||||
}
|
||||
|
||||
func (s *Spec) paramsAsMap(parameters []spec.Parameter, res map[string]spec.Parameter) {
|
||||
for _, param := range parameters {
|
||||
pr := param
|
||||
if pr.Ref.String() != "" {
|
||||
obj, _, err := pr.Ref.GetPointer().Get(s.spec)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
pr = obj.(spec.Parameter)
|
||||
}
|
||||
res[mapKeyFromParam(&pr)] = pr
|
||||
}
|
||||
}
|
||||
|
||||
// ParametersFor the specified operation id
|
||||
func (s *Spec) ParametersFor(operationID string) []spec.Parameter {
|
||||
gatherParams := func(pi *spec.PathItem, op *spec.Operation) []spec.Parameter {
|
||||
bag := make(map[string]spec.Parameter)
|
||||
s.paramsAsMap(pi.Parameters, bag)
|
||||
s.paramsAsMap(op.Parameters, bag)
|
||||
|
||||
var res []spec.Parameter
|
||||
for _, v := range bag {
|
||||
res = append(res, v)
|
||||
}
|
||||
return res
|
||||
}
|
||||
for _, pi := range s.spec.Paths.Paths {
|
||||
if pi.Get != nil && pi.Get.ID == operationID {
|
||||
return gatherParams(&pi, pi.Get)
|
||||
}
|
||||
if pi.Head != nil && pi.Head.ID == operationID {
|
||||
return gatherParams(&pi, pi.Head)
|
||||
}
|
||||
if pi.Options != nil && pi.Options.ID == operationID {
|
||||
return gatherParams(&pi, pi.Options)
|
||||
}
|
||||
if pi.Post != nil && pi.Post.ID == operationID {
|
||||
return gatherParams(&pi, pi.Post)
|
||||
}
|
||||
if pi.Patch != nil && pi.Patch.ID == operationID {
|
||||
return gatherParams(&pi, pi.Patch)
|
||||
}
|
||||
if pi.Put != nil && pi.Put.ID == operationID {
|
||||
return gatherParams(&pi, pi.Put)
|
||||
}
|
||||
if pi.Delete != nil && pi.Delete.ID == operationID {
|
||||
return gatherParams(&pi, pi.Delete)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ParamsFor the specified method and path. Aggregates them with the defaults etc, so it's all the params that
|
||||
// apply for the method and path.
|
||||
func (s *Spec) ParamsFor(method, path string) map[string]spec.Parameter {
|
||||
res := make(map[string]spec.Parameter)
|
||||
if pi, ok := s.spec.Paths.Paths[path]; ok {
|
||||
s.paramsAsMap(pi.Parameters, res)
|
||||
s.paramsAsMap(s.operations[strings.ToUpper(method)][path].Parameters, res)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// OperationForName gets the operation for the given id
|
||||
func (s *Spec) OperationForName(operationID string) (string, string, *spec.Operation, bool) {
|
||||
for method, pathItem := range s.operations {
|
||||
for path, op := range pathItem {
|
||||
if operationID == op.ID {
|
||||
return method, path, op, true
|
||||
}
|
||||
}
|
||||
}
|
||||
return "", "", nil, false
|
||||
}
|
||||
|
||||
// OperationFor the given method and path
|
||||
func (s *Spec) OperationFor(method, path string) (*spec.Operation, bool) {
|
||||
if mp, ok := s.operations[strings.ToUpper(method)]; ok {
|
||||
op, fn := mp[path]
|
||||
return op, fn
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// Operations gathers all the operations specified in the spec document
|
||||
func (s *Spec) Operations() map[string]map[string]*spec.Operation {
|
||||
return s.operations
|
||||
}
|
||||
|
||||
func (s *Spec) structMapKeys(mp map[string]struct{}) []string {
|
||||
if len(mp) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
result := make([]string, 0, len(mp))
|
||||
for k := range mp {
|
||||
result = append(result, k)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// AllPaths returns all the paths in the swagger spec
|
||||
func (s *Spec) AllPaths() map[string]spec.PathItem {
|
||||
if s.spec == nil || s.spec.Paths == nil {
|
||||
return nil
|
||||
}
|
||||
return s.spec.Paths.Paths
|
||||
}
|
||||
|
||||
// OperationIDs gets all the operation ids based on method an dpath
|
||||
func (s *Spec) OperationIDs() []string {
|
||||
if len(s.operations) == 0 {
|
||||
return nil
|
||||
}
|
||||
result := make([]string, 0, len(s.operations))
|
||||
for method, v := range s.operations {
|
||||
for p, o := range v {
|
||||
if o.ID != "" {
|
||||
result = append(result, o.ID)
|
||||
} else {
|
||||
result = append(result, fmt.Sprintf("%s %s", strings.ToUpper(method), p))
|
||||
}
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// RequiredConsumes gets all the distinct consumes that are specified in the specification document
|
||||
func (s *Spec) RequiredConsumes() []string {
|
||||
return s.structMapKeys(s.consumes)
|
||||
}
|
||||
|
||||
// RequiredProduces gets all the distinct produces that are specified in the specification document
|
||||
func (s *Spec) RequiredProduces() []string {
|
||||
return s.structMapKeys(s.produces)
|
||||
}
|
||||
|
||||
// RequiredSecuritySchemes gets all the distinct security schemes that are specified in the swagger spec
|
||||
func (s *Spec) RequiredSecuritySchemes() []string {
|
||||
return s.structMapKeys(s.authSchemes)
|
||||
}
|
||||
|
||||
// SchemaRef is a reference to a schema
|
||||
type SchemaRef struct {
|
||||
Name string
|
||||
Ref spec.Ref
|
||||
Schema *spec.Schema
|
||||
}
|
||||
|
||||
// SchemasWithAllOf returns schema references to all schemas that are defined
|
||||
// with an allOf key
|
||||
func (s *Spec) SchemasWithAllOf() (result []SchemaRef) {
|
||||
for _, v := range s.allOfs {
|
||||
result = append(result, v)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// AllDefinitions returns schema references for all the definitions that were discovered
|
||||
func (s *Spec) AllDefinitions() (result []SchemaRef) {
|
||||
for _, v := range s.allSchemas {
|
||||
result = append(result, v)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// AllDefinitionReferences returns json refs for all the discovered schemas
|
||||
func (s *Spec) AllDefinitionReferences() (result []string) {
|
||||
for _, v := range s.references.schemas {
|
||||
result = append(result, v.String())
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// AllParameterReferences returns json refs for all the discovered parameters
|
||||
func (s *Spec) AllParameterReferences() (result []string) {
|
||||
for _, v := range s.references.parameters {
|
||||
result = append(result, v.String())
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// AllResponseReferences returns json refs for all the discovered responses
|
||||
func (s *Spec) AllResponseReferences() (result []string) {
|
||||
for _, v := range s.references.responses {
|
||||
result = append(result, v.String())
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// AllItemsReferences returns the references for all the items
|
||||
func (s *Spec) AllItemsReferences() (result []string) {
|
||||
for _, v := range s.references.items {
|
||||
result = append(result, v.String())
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// AllReferences returns all the references found in the document
|
||||
func (s *Spec) AllReferences() (result []string) {
|
||||
for _, v := range s.references.allRefs {
|
||||
result = append(result, v.String())
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// AllRefs returns all the unique references found in the document
|
||||
func (s *Spec) AllRefs() (result []spec.Ref) {
|
||||
set := make(map[string]struct{})
|
||||
for _, v := range s.references.allRefs {
|
||||
a := v.String()
|
||||
if a == "" {
|
||||
continue
|
||||
}
|
||||
if _, ok := set[a]; !ok {
|
||||
set[a] = struct{}{}
|
||||
result = append(result, v)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
1
vendor/github.com/go-openapi/loads/.drone.sec
generated
vendored
Normal file
1
vendor/github.com/go-openapi/loads/.drone.sec
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
eyJhbGciOiJSU0EtT0FFUCIsImVuYyI6IkExMjhHQ00ifQ.xUjixvmMMeampw0Doyr_XLvcV5ICmDgDFmlcWqgmO84O3Hwn6dqbMkwOjpKMOyEJW_98b5Om5ED59IFt2S0T_OarlrsJL8jOK5fqxSMNXy2w8LfI-e5l1URverW41ofAVK8m9wK05q2BSJM_M6PyyODaQeDBiCVK1HreMZBlXpuUDVtBMPILQoga0eSZOsTR3DYEpZIS0A0Rsa5yIhMYR5d5-JMYqbqOCB7tNJ-BM83OzYgL7Hrz0J15kqaJmhQ-GJoMJDzOemSO9KxLCOfSPp11R_G3Mfd48xYnuiRuPOTakbOCLxuYviH6uoGVIOhnMyY9qKiDKbOn4BQUi1-igA.6qjQzq9nzAxRRKV_.z79R5cMFAEuEaAh6U9ykiL8oIqzMbs_I2C-hSFRh3HYRJ4fTB-9LrcbF0uASIOq7bBn4OQzW-0QFwYOs1uaawmrByGngV5d0afiZf_LBKcmTF2vtxRi_A_nxD-EHoPmh3lKBU5WNDe_8kLjEeS89HeyyFPuv5iQbqhzdqPFohHKVigwVqVYYLjB8GWQ4t7tC4c8l5rHanaXf71W0e3op2m8bebpZL0JPGhnULVA1oU27TYeLsO112JkIYtBwZxzvAs--bBFoKeGJWVMFzrKN68UACGZ9RFw0uGJbBmVC4-jRuIc6XpqeEqw3KG-rjFzkeEor3575qW-8kiXYqpub9SFUc3SSZkxJ8hB3SrnMBOuDUSenrXNpAbltmV3KAALzN3_bMBQuihwSRIn0Hg7-Dpni8BieMe44RMDvRu6p_71aeU_KW4V7Umy_h8gpIvQFuKGdTQH2ahsyCXL0ojqjMbVMdoWpDQTQ2_Fy8Qt_p2kJ8BgDo-1Akd4a6BNU2NGqsdnrJmtVKcTqLBadf9ylCwxHdGVrtNYORALSms2T6Q1s-poQnMjIwN8lnUD8ABUBpt4uVtrYkiWPVwrwywLQeiHhR-pboe_53kWDAx4Hy4rpbKsaxanYhy_bEbAYKb3aIUA.75GD4kRBCQdcGFYP1QYdCg
|
39
vendor/github.com/go-openapi/loads/.drone.yml
generated
vendored
Normal file
39
vendor/github.com/go-openapi/loads/.drone.yml
generated
vendored
Normal file
@ -0,0 +1,39 @@
|
||||
clone:
|
||||
path: github.com/go-openapi/loads
|
||||
|
||||
matrix:
|
||||
GO_VERSION:
|
||||
- "1.6"
|
||||
|
||||
build:
|
||||
integration:
|
||||
image: golang:$$GO_VERSION
|
||||
pull: true
|
||||
environment:
|
||||
GOCOVMODE: "count"
|
||||
commands:
|
||||
- go get -u github.com/axw/gocov/gocov
|
||||
- go get -u gopkg.in/matm/v1/gocov-html
|
||||
- go get -u github.com/cee-dub/go-junit-report
|
||||
- go get -u github.com/stretchr/testify/assert
|
||||
- go get -u gopkg.in/yaml.v2
|
||||
- go get -u github.com/go-openapi/swag
|
||||
- go get -u github.com/go-openapi/analysis
|
||||
- go get -u github.com/go-openapi/spec
|
||||
- ./hack/build-drone.sh
|
||||
|
||||
notify:
|
||||
slack:
|
||||
channel: bots
|
||||
webhook_url: $$SLACK_URL
|
||||
username: drone
|
||||
|
||||
publish:
|
||||
coverage:
|
||||
server: https://coverage.vmware.run
|
||||
token: $$GITHUB_TOKEN
|
||||
# threshold: 70
|
||||
# must_increase: true
|
||||
when:
|
||||
matrix:
|
||||
GO_VERSION: "1.6"
|
4
vendor/github.com/go-openapi/loads/.gitignore
generated
vendored
Normal file
4
vendor/github.com/go-openapi/loads/.gitignore
generated
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
secrets.yml
|
||||
coverage.out
|
||||
profile.cov
|
||||
profile.out
|
13
vendor/github.com/go-openapi/loads/.pullapprove.yml
generated
vendored
Normal file
13
vendor/github.com/go-openapi/loads/.pullapprove.yml
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
approve_by_comment: true
|
||||
approve_regex: '^(:shipit:|:\+1:|\+1|LGTM|lgtm|Approved)'
|
||||
reject_regex: ^[Rr]ejected
|
||||
reset_on_push: false
|
||||
reviewers:
|
||||
members:
|
||||
- casualjim
|
||||
- chancez
|
||||
- frapposelli
|
||||
- vburenin
|
||||
- pytlesk4
|
||||
name: pullapprove
|
||||
required: 1
|
74
vendor/github.com/go-openapi/loads/CODE_OF_CONDUCT.md
generated
vendored
Normal file
74
vendor/github.com/go-openapi/loads/CODE_OF_CONDUCT.md
generated
vendored
Normal file
@ -0,0 +1,74 @@
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
In the interest of fostering an open and welcoming environment, we as
|
||||
contributors and maintainers pledge to making participation in our project and
|
||||
our community a harassment-free experience for everyone, regardless of age, body
|
||||
size, disability, ethnicity, gender identity and expression, level of experience,
|
||||
nationality, personal appearance, race, religion, or sexual identity and
|
||||
orientation.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to creating a positive environment
|
||||
include:
|
||||
|
||||
* Using welcoming and inclusive language
|
||||
* Being respectful of differing viewpoints and experiences
|
||||
* Gracefully accepting constructive criticism
|
||||
* Focusing on what is best for the community
|
||||
* Showing empathy towards other community members
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery and unwelcome sexual attention or
|
||||
advances
|
||||
* Trolling, insulting/derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or electronic
|
||||
address, without explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Our Responsibilities
|
||||
|
||||
Project maintainers are responsible for clarifying the standards of acceptable
|
||||
behavior and are expected to take appropriate and fair corrective action in
|
||||
response to any instances of unacceptable behavior.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or
|
||||
reject comments, commits, code, wiki edits, issues, and other contributions
|
||||
that are not aligned to this Code of Conduct, or to ban temporarily or
|
||||
permanently any contributor for other behaviors that they deem inappropriate,
|
||||
threatening, offensive, or harmful.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies both within project spaces and in public spaces
|
||||
when an individual is representing the project or its community. Examples of
|
||||
representing a project or community include using an official project e-mail
|
||||
address, posting via an official social media account, or acting as an appointed
|
||||
representative at an online or offline event. Representation of a project may be
|
||||
further defined and clarified by project maintainers.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported by contacting the project team at ivan+abuse@flanders.co.nz. All
|
||||
complaints will be reviewed and investigated and will result in a response that
|
||||
is deemed necessary and appropriate to the circumstances. The project team is
|
||||
obligated to maintain confidentiality with regard to the reporter of an incident.
|
||||
Further details of specific enforcement policies may be posted separately.
|
||||
|
||||
Project maintainers who do not follow or enforce the Code of Conduct in good
|
||||
faith may face temporary or permanent repercussions as determined by other
|
||||
members of the project's leadership.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
|
||||
available at [http://contributor-covenant.org/version/1/4][version]
|
||||
|
||||
[homepage]: http://contributor-covenant.org
|
||||
[version]: http://contributor-covenant.org/version/1/4/
|
202
vendor/github.com/go-openapi/loads/LICENSE
generated
vendored
Normal file
202
vendor/github.com/go-openapi/loads/LICENSE
generated
vendored
Normal file
@ -0,0 +1,202 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
5
vendor/github.com/go-openapi/loads/README.md
generated
vendored
Normal file
5
vendor/github.com/go-openapi/loads/README.md
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
# Loads OAI specs [![Build Status](https://ci.vmware.run/api/badges/go-openapi/loads/status.svg)](https://ci.vmware.run/go-openapi/loads) [![Coverage](https://coverage.vmware.run/badges/go-openapi/loads/coverage.svg)](https://coverage.vmware.run/go-openapi/loads) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io)
|
||||
|
||||
[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/loads/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/loads?status.svg)](http://godoc.org/github.com/go-openapi/loads)
|
||||
|
||||
Loading of OAI specification documents from local or remote locations.
|
203
vendor/github.com/go-openapi/loads/spec.go
generated
vendored
Normal file
203
vendor/github.com/go-openapi/loads/spec.go
generated
vendored
Normal file
@ -0,0 +1,203 @@
|
||||
// Copyright 2015 go-swagger maintainers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package loads
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/url"
|
||||
|
||||
"github.com/go-openapi/analysis"
|
||||
"github.com/go-openapi/spec"
|
||||
"github.com/go-openapi/swag"
|
||||
)
|
||||
|
||||
// JSONDoc loads a json document from either a file or a remote url
|
||||
func JSONDoc(path string) (json.RawMessage, error) {
|
||||
data, err := swag.LoadFromFileOrHTTP(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return json.RawMessage(data), nil
|
||||
}
|
||||
|
||||
// DocLoader represents a doc loader type
|
||||
type DocLoader func(string) (json.RawMessage, error)
|
||||
|
||||
// DocMatcher represents a predicate to check if a loader matches
|
||||
type DocMatcher func(string) bool
|
||||
|
||||
var loaders = &loader{Match: func(_ string) bool { return true }, Fn: JSONDoc}
|
||||
|
||||
// AddLoader for a document
|
||||
func AddLoader(predicate DocMatcher, load DocLoader) {
|
||||
prev := loaders
|
||||
loaders = &loader{
|
||||
Match: predicate,
|
||||
Fn: load,
|
||||
Next: prev,
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
type loader struct {
|
||||
Fn DocLoader
|
||||
Match DocMatcher
|
||||
Next *loader
|
||||
}
|
||||
|
||||
// JSONSpec loads a spec from a json document
|
||||
func JSONSpec(path string) (*Document, error) {
|
||||
data, err := JSONDoc(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// convert to json
|
||||
return Analyzed(json.RawMessage(data), "")
|
||||
}
|
||||
|
||||
// Document represents a swagger spec document
|
||||
type Document struct {
|
||||
// specAnalyzer
|
||||
Analyzer *analysis.Spec
|
||||
spec *spec.Swagger
|
||||
origSpec *spec.Swagger
|
||||
schema *spec.Schema
|
||||
raw json.RawMessage
|
||||
}
|
||||
|
||||
// Spec loads a new spec document
|
||||
func Spec(path string) (*Document, error) {
|
||||
specURL, err := url.Parse(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for l := loaders.Next; l != nil; l = l.Next {
|
||||
if loaders.Match(specURL.Path) {
|
||||
b, err2 := loaders.Fn(path)
|
||||
if err2 != nil {
|
||||
return nil, err2
|
||||
}
|
||||
return Analyzed(b, "")
|
||||
}
|
||||
}
|
||||
b, err := loaders.Fn(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return Analyzed(b, "")
|
||||
}
|
||||
|
||||
var swag20Schema = spec.MustLoadSwagger20Schema()
|
||||
|
||||
// Analyzed creates a new analyzed spec document
|
||||
func Analyzed(data json.RawMessage, version string) (*Document, error) {
|
||||
if version == "" {
|
||||
version = "2.0"
|
||||
}
|
||||
if version != "2.0" {
|
||||
return nil, fmt.Errorf("spec version %q is not supported", version)
|
||||
}
|
||||
|
||||
swspec := new(spec.Swagger)
|
||||
if err := json.Unmarshal(data, swspec); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
origsqspec := new(spec.Swagger)
|
||||
if err := json.Unmarshal(data, origsqspec); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
d := &Document{
|
||||
Analyzer: analysis.New(swspec),
|
||||
schema: swag20Schema,
|
||||
spec: swspec,
|
||||
raw: data,
|
||||
origSpec: origsqspec,
|
||||
}
|
||||
return d, nil
|
||||
}
|
||||
|
||||
// Expanded expands the ref fields in the spec document and returns a new spec document
|
||||
func (d *Document) Expanded() (*Document, error) {
|
||||
swspec := new(spec.Swagger)
|
||||
if err := json.Unmarshal(d.raw, swspec); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := spec.ExpandSpec(swspec); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dd := &Document{
|
||||
Analyzer: analysis.New(swspec),
|
||||
spec: swspec,
|
||||
schema: swag20Schema,
|
||||
raw: d.raw,
|
||||
origSpec: d.origSpec,
|
||||
}
|
||||
return dd, nil
|
||||
}
|
||||
|
||||
// BasePath the base path for this spec
|
||||
func (d *Document) BasePath() string {
|
||||
return d.spec.BasePath
|
||||
}
|
||||
|
||||
// Version returns the version of this spec
|
||||
func (d *Document) Version() string {
|
||||
return d.spec.Swagger
|
||||
}
|
||||
|
||||
// Schema returns the swagger 2.0 schema
|
||||
func (d *Document) Schema() *spec.Schema {
|
||||
return d.schema
|
||||
}
|
||||
|
||||
// Spec returns the swagger spec object model
|
||||
func (d *Document) Spec() *spec.Swagger {
|
||||
return d.spec
|
||||
}
|
||||
|
||||
// Host returns the host for the API
|
||||
func (d *Document) Host() string {
|
||||
return d.spec.Host
|
||||
}
|
||||
|
||||
// Raw returns the raw swagger spec as json bytes
|
||||
func (d *Document) Raw() json.RawMessage {
|
||||
return d.raw
|
||||
}
|
||||
|
||||
func (d *Document) OrigSpec() *spec.Swagger {
|
||||
return d.origSpec
|
||||
}
|
||||
|
||||
// ResetDefinitions gives a shallow copy with the models reset
|
||||
func (d *Document) ResetDefinitions() *Document {
|
||||
defs := make(map[string]spec.Schema, len(d.origSpec.Definitions))
|
||||
for k, v := range d.origSpec.Definitions {
|
||||
defs[k] = v
|
||||
}
|
||||
|
||||
d.spec.Definitions = defs
|
||||
return d
|
||||
}
|
||||
|
||||
// Pristine creates a new pristine document instance based on the input data
|
||||
func (d *Document) Pristine() *Document {
|
||||
dd, _ := Analyzed(d.Raw(), d.Version())
|
||||
return dd
|
||||
}
|
1
vendor/github.com/gophercloud/gophercloud/.gitignore
generated
vendored
Normal file
1
vendor/github.com/gophercloud/gophercloud/.gitignore
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
**/*.swp
|
19
vendor/github.com/gophercloud/gophercloud/.travis.yml
generated
vendored
Normal file
19
vendor/github.com/gophercloud/gophercloud/.travis.yml
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
language: go
|
||||
sudo: false
|
||||
install:
|
||||
- go get golang.org/x/crypto/ssh
|
||||
- go get -v -tags 'fixtures acceptance' ./...
|
||||
- go get github.com/wadey/gocovmerge
|
||||
- go get github.com/mattn/goveralls
|
||||
- go get golang.org/x/tools/cmd/goimports
|
||||
go:
|
||||
- 1.8
|
||||
- tip
|
||||
env:
|
||||
global:
|
||||
- secure: "xSQsAG5wlL9emjbCdxzz/hYQsSpJ/bABO1kkbwMSISVcJ3Nk0u4ywF+LS4bgeOnwPfmFvNTOqVDu3RwEvMeWXSI76t1piCPcObutb2faKLVD/hLoAS76gYX+Z8yGWGHrSB7Do5vTPj1ERe2UljdrnsSeOXzoDwFxYRaZLX4bBOB4AyoGvRniil5QXPATiA1tsWX1VMicj8a4F8X+xeESzjt1Q5Iy31e7vkptu71bhvXCaoo5QhYwT+pLR9dN0S1b7Ro0KVvkRefmr1lUOSYd2e74h6Lc34tC1h3uYZCS4h47t7v5cOXvMNxinEj2C51RvbjvZI1RLVdkuAEJD1Iz4+Ote46nXbZ//6XRZMZz/YxQ13l7ux1PFjgEB6HAapmF5Xd8PRsgeTU9LRJxpiTJ3P5QJ3leS1va8qnziM5kYipj/Rn+V8g2ad/rgkRox9LSiR9VYZD2Pe45YCb1mTKSl2aIJnV7nkOqsShY5LNB4JZSg7xIffA+9YVDktw8dJlATjZqt7WvJJ49g6A61mIUV4C15q2JPGKTkZzDiG81NtmS7hFa7k0yaE2ELgYocbcuyUcAahhxntYTC0i23nJmEHVNiZmBO3u7EgpWe4KGVfumU+lt12tIn5b3dZRBBUk3QakKKozSK1QPHGpk/AZGrhu7H6l8to6IICKWtDcyMPQ="
|
||||
script:
|
||||
- ./script/coverage
|
||||
- ./script/format
|
||||
after_success:
|
||||
- $HOME/gopath/bin/goveralls -service=travis-ci -coverprofile=cover.out
|
0
vendor/github.com/gophercloud/gophercloud/CHANGELOG.md
generated
vendored
Normal file
0
vendor/github.com/gophercloud/gophercloud/CHANGELOG.md
generated
vendored
Normal file
148
vendor/github.com/gophercloud/gophercloud/FAQ.md
generated
vendored
Normal file
148
vendor/github.com/gophercloud/gophercloud/FAQ.md
generated
vendored
Normal file
@ -0,0 +1,148 @@
|
||||
# Tips
|
||||
|
||||
## Implementing default logging and re-authentication attempts
|
||||
|
||||
You can implement custom logging and/or limit re-auth attempts by creating a custom HTTP client
|
||||
like the following and setting it as the provider client's HTTP Client (via the
|
||||
`gophercloud.ProviderClient.HTTPClient` field):
|
||||
|
||||
```go
|
||||
//...
|
||||
|
||||
// LogRoundTripper satisfies the http.RoundTripper interface and is used to
|
||||
// customize the default Gophercloud RoundTripper to allow for logging.
|
||||
type LogRoundTripper struct {
|
||||
rt http.RoundTripper
|
||||
numReauthAttempts int
|
||||
}
|
||||
|
||||
// newHTTPClient return a custom HTTP client that allows for logging relevant
|
||||
// information before and after the HTTP request.
|
||||
func newHTTPClient() http.Client {
|
||||
return http.Client{
|
||||
Transport: &LogRoundTripper{
|
||||
rt: http.DefaultTransport,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// RoundTrip performs a round-trip HTTP request and logs relevant information about it.
|
||||
func (lrt *LogRoundTripper) RoundTrip(request *http.Request) (*http.Response, error) {
|
||||
glog.Infof("Request URL: %s\n", request.URL)
|
||||
|
||||
response, err := lrt.rt.RoundTrip(request)
|
||||
if response == nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if response.StatusCode == http.StatusUnauthorized {
|
||||
if lrt.numReauthAttempts == 3 {
|
||||
return response, fmt.Errorf("Tried to re-authenticate 3 times with no success.")
|
||||
}
|
||||
lrt.numReauthAttempts++
|
||||
}
|
||||
|
||||
glog.Debugf("Response Status: %s\n", response.Status)
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
endpoint := "https://127.0.0.1/auth"
|
||||
pc := openstack.NewClient(endpoint)
|
||||
pc.HTTPClient = newHTTPClient()
|
||||
|
||||
//...
|
||||
```
|
||||
|
||||
|
||||
## Implementing custom objects
|
||||
|
||||
OpenStack request/response objects may differ among variable names or types.
|
||||
|
||||
### Custom request objects
|
||||
|
||||
To pass custom options to a request, implement the desired `<ACTION>OptsBuilder` interface. For
|
||||
example, to pass in
|
||||
|
||||
```go
|
||||
type MyCreateServerOpts struct {
|
||||
Name string
|
||||
Size int
|
||||
}
|
||||
```
|
||||
|
||||
to `servers.Create`, simply implement the `servers.CreateOptsBuilder` interface:
|
||||
|
||||
```go
|
||||
func (o MyCreateServeropts) ToServerCreateMap() (map[string]interface{}, error) {
|
||||
return map[string]interface{}{
|
||||
"name": o.Name,
|
||||
"size": o.Size,
|
||||
}, nil
|
||||
}
|
||||
```
|
||||
|
||||
create an instance of your custom options object, and pass it to `servers.Create`:
|
||||
|
||||
```go
|
||||
// ...
|
||||
myOpts := MyCreateServerOpts{
|
||||
Name: "s1",
|
||||
Size: "100",
|
||||
}
|
||||
server, err := servers.Create(computeClient, myOpts).Extract()
|
||||
// ...
|
||||
```
|
||||
|
||||
### Custom response objects
|
||||
|
||||
Some OpenStack services have extensions. Extensions that are supported in Gophercloud can be
|
||||
combined to create a custom object:
|
||||
|
||||
```go
|
||||
// ...
|
||||
type MyVolume struct {
|
||||
volumes.Volume
|
||||
tenantattr.VolumeExt
|
||||
}
|
||||
|
||||
var v struct {
|
||||
MyVolume `json:"volume"`
|
||||
}
|
||||
|
||||
err := volumes.Get(client, volID).ExtractInto(&v)
|
||||
// ...
|
||||
```
|
||||
|
||||
## Overriding default `UnmarshalJSON` method
|
||||
|
||||
For some response objects, a field may be a custom type or may be allowed to take on
|
||||
different types. In these cases, overriding the default `UnmarshalJSON` method may be
|
||||
necessary. To do this, declare the JSON `struct` field tag as "-" and create an `UnmarshalJSON`
|
||||
method on the type:
|
||||
|
||||
```go
|
||||
// ...
|
||||
type MyVolume struct {
|
||||
ID string `json: "id"`
|
||||
TimeCreated time.Time `json: "-"`
|
||||
}
|
||||
|
||||
func (r *MyVolume) UnmarshalJSON(b []byte) error {
|
||||
type tmp MyVolume
|
||||
var s struct {
|
||||
tmp
|
||||
TimeCreated gophercloud.JSONRFC3339MilliNoZ `json:"created_at"`
|
||||
}
|
||||
err := json.Unmarshal(b, &s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*r = Volume(s.tmp)
|
||||
|
||||
r.TimeCreated = time.Time(s.CreatedAt)
|
||||
|
||||
return err
|
||||
}
|
||||
// ...
|
||||
```
|
191
vendor/github.com/gophercloud/gophercloud/LICENSE
generated
vendored
Normal file
191
vendor/github.com/gophercloud/gophercloud/LICENSE
generated
vendored
Normal file
@ -0,0 +1,191 @@
|
||||
Copyright 2012-2013 Rackspace, Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use
|
||||
this file except in compliance with the License. You may obtain a copy of the
|
||||
License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed
|
||||
under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
------
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
32
vendor/github.com/gophercloud/gophercloud/MIGRATING.md
generated
vendored
Normal file
32
vendor/github.com/gophercloud/gophercloud/MIGRATING.md
generated
vendored
Normal file
@ -0,0 +1,32 @@
|
||||
# Compute
|
||||
|
||||
## Floating IPs
|
||||
|
||||
* `github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingip` is now `github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips`
|
||||
* `floatingips.Associate` and `floatingips.Disassociate` have been removed.
|
||||
* `floatingips.DisassociateOpts` is now required to disassociate a Floating IP.
|
||||
|
||||
## Security Groups
|
||||
|
||||
* `secgroups.AddServerToGroup` is now `secgroups.AddServer`.
|
||||
* `secgroups.RemoveServerFromGroup` is now `secgroups.RemoveServer`.
|
||||
|
||||
## Servers
|
||||
|
||||
* `servers.Reboot` now requires a `servers.RebootOpts` struct:
|
||||
|
||||
```golang
|
||||
rebootOpts := &servers.RebootOpts{
|
||||
Type: servers.SoftReboot,
|
||||
}
|
||||
res := servers.Reboot(client, server.ID, rebootOpts)
|
||||
```
|
||||
|
||||
# Identity
|
||||
|
||||
## V3
|
||||
|
||||
### Tokens
|
||||
|
||||
* `Token.ExpiresAt` is now of type `gophercloud.JSONRFC3339Milli` instead of
|
||||
`time.Time`
|
143
vendor/github.com/gophercloud/gophercloud/README.md
generated
vendored
Normal file
143
vendor/github.com/gophercloud/gophercloud/README.md
generated
vendored
Normal file
@ -0,0 +1,143 @@
|
||||
# Gophercloud: an OpenStack SDK for Go
|
||||
[![Build Status](https://travis-ci.org/gophercloud/gophercloud.svg?branch=master)](https://travis-ci.org/gophercloud/gophercloud)
|
||||
[![Coverage Status](https://coveralls.io/repos/github/gophercloud/gophercloud/badge.svg?branch=master)](https://coveralls.io/github/gophercloud/gophercloud?branch=master)
|
||||
|
||||
Gophercloud is an OpenStack Go SDK.
|
||||
|
||||
## Useful links
|
||||
|
||||
* [Reference documentation](http://godoc.org/github.com/gophercloud/gophercloud)
|
||||
* [Effective Go](https://golang.org/doc/effective_go.html)
|
||||
|
||||
## How to install
|
||||
|
||||
Before installing, you need to ensure that your [GOPATH environment variable](https://golang.org/doc/code.html#GOPATH)
|
||||
is pointing to an appropriate directory where you want to install Gophercloud:
|
||||
|
||||
```bash
|
||||
mkdir $HOME/go
|
||||
export GOPATH=$HOME/go
|
||||
```
|
||||
|
||||
To protect yourself against changes in your dependencies, we highly recommend choosing a
|
||||
[dependency management solution](https://github.com/golang/go/wiki/PackageManagementTools) for
|
||||
your projects, such as [godep](https://github.com/tools/godep). Once this is set up, you can install
|
||||
Gophercloud as a dependency like so:
|
||||
|
||||
```bash
|
||||
go get github.com/gophercloud/gophercloud
|
||||
|
||||
# Edit your code to import relevant packages from "github.com/gophercloud/gophercloud"
|
||||
|
||||
godep save ./...
|
||||
```
|
||||
|
||||
This will install all the source files you need into a `Godeps/_workspace` directory, which is
|
||||
referenceable from your own source files when you use the `godep go` command.
|
||||
|
||||
## Getting started
|
||||
|
||||
### Credentials
|
||||
|
||||
Because you'll be hitting an API, you will need to retrieve your OpenStack
|
||||
credentials and either store them as environment variables or in your local Go
|
||||
files. The first method is recommended because it decouples credential
|
||||
information from source code, allowing you to push the latter to your version
|
||||
control system without any security risk.
|
||||
|
||||
You will need to retrieve the following:
|
||||
|
||||
* username
|
||||
* password
|
||||
* a valid Keystone identity URL
|
||||
|
||||
For users that have the OpenStack dashboard installed, there's a shortcut. If
|
||||
you visit the `project/access_and_security` path in Horizon and click on the
|
||||
"Download OpenStack RC File" button at the top right hand corner, you will
|
||||
download a bash file that exports all of your access details to environment
|
||||
variables. To execute the file, run `source admin-openrc.sh` and you will be
|
||||
prompted for your password.
|
||||
|
||||
### Authentication
|
||||
|
||||
Once you have access to your credentials, you can begin plugging them into
|
||||
Gophercloud. The next step is authentication, and this is handled by a base
|
||||
"Provider" struct. To get one, you can either pass in your credentials
|
||||
explicitly, or tell Gophercloud to use environment variables:
|
||||
|
||||
```go
|
||||
import (
|
||||
"github.com/gophercloud/gophercloud"
|
||||
"github.com/gophercloud/gophercloud/openstack"
|
||||
"github.com/gophercloud/gophercloud/openstack/utils"
|
||||
)
|
||||
|
||||
// Option 1: Pass in the values yourself
|
||||
opts := gophercloud.AuthOptions{
|
||||
IdentityEndpoint: "https://openstack.example.com:5000/v2.0",
|
||||
Username: "{username}",
|
||||
Password: "{password}",
|
||||
}
|
||||
|
||||
// Option 2: Use a utility function to retrieve all your environment variables
|
||||
opts, err := openstack.AuthOptionsFromEnv()
|
||||
```
|
||||
|
||||
Once you have the `opts` variable, you can pass it in and get back a
|
||||
`ProviderClient` struct:
|
||||
|
||||
```go
|
||||
provider, err := openstack.AuthenticatedClient(opts)
|
||||
```
|
||||
|
||||
The `ProviderClient` is the top-level client that all of your OpenStack services
|
||||
derive from. The provider contains all of the authentication details that allow
|
||||
your Go code to access the API - such as the base URL and token ID.
|
||||
|
||||
### Provision a server
|
||||
|
||||
Once we have a base Provider, we inject it as a dependency into each OpenStack
|
||||
service. In order to work with the Compute API, we need a Compute service
|
||||
client; which can be created like so:
|
||||
|
||||
```go
|
||||
client, err := openstack.NewComputeV2(provider, gophercloud.EndpointOpts{
|
||||
Region: os.Getenv("OS_REGION_NAME"),
|
||||
})
|
||||
```
|
||||
|
||||
We then use this `client` for any Compute API operation we want. In our case,
|
||||
we want to provision a new server - so we invoke the `Create` method and pass
|
||||
in the flavor ID (hardware specification) and image ID (operating system) we're
|
||||
interested in:
|
||||
|
||||
```go
|
||||
import "github.com/gophercloud/gophercloud/openstack/compute/v2/servers"
|
||||
|
||||
server, err := servers.Create(client, servers.CreateOpts{
|
||||
Name: "My new server!",
|
||||
FlavorRef: "flavor_id",
|
||||
ImageRef: "image_id",
|
||||
}).Extract()
|
||||
```
|
||||
|
||||
The above code sample creates a new server with the parameters, and embodies the
|
||||
new resource in the `server` variable (a
|
||||
[`servers.Server`](http://godoc.org/github.com/gophercloud/gophercloud) struct).
|
||||
|
||||
## Advanced Usage
|
||||
|
||||
Have a look at the [FAQ](./FAQ.md) for some tips on customizing the way Gophercloud works.
|
||||
|
||||
## Backwards-Compatibility Guarantees
|
||||
|
||||
None. Vendor it and write tests covering the parts you use.
|
||||
|
||||
## Contributing
|
||||
|
||||
See the [contributing guide](./.github/CONTRIBUTING.md).
|
||||
|
||||
## Help and feedback
|
||||
|
||||
If you're struggling with something or have spotted a potential bug, feel free
|
||||
to submit an issue to our [bug tracker](/issues).
|
74
vendor/github.com/gophercloud/gophercloud/STYLEGUIDE.md
generated
vendored
Normal file
74
vendor/github.com/gophercloud/gophercloud/STYLEGUIDE.md
generated
vendored
Normal file
@ -0,0 +1,74 @@
|
||||
|
||||
## On Pull Requests
|
||||
|
||||
- Before you start a PR there needs to be a Github issue and a discussion about it
|
||||
on that issue with a core contributor, even if it's just a 'SGTM'.
|
||||
|
||||
- A PR's description must reference the issue it closes with a `For <ISSUE NUMBER>` (e.g. For #293).
|
||||
|
||||
- A PR's description must contain link(s) to the line(s) in the OpenStack
|
||||
source code (on Github) that prove(s) the PR code to be valid. Links to documentation
|
||||
are not good enough. The link(s) should be to a non-`master` branch. For example,
|
||||
a pull request implementing the creation of a Neutron v2 subnet might put the
|
||||
following link in the description:
|
||||
|
||||
https://github.com/openstack/neutron/blob/stable/mitaka/neutron/api/v2/attributes.py#L749
|
||||
|
||||
From that link, a reviewer (or user) can verify the fields in the request/response
|
||||
objects in the PR.
|
||||
|
||||
- A PR that is in-progress should have `[wip]` in front of the PR's title. When
|
||||
ready for review, remove the `[wip]` and ping a core contributor with an `@`.
|
||||
|
||||
- Forcing PRs to be small can have the effect of users submitting PRs in a hierarchical chain, with
|
||||
one depending on the next. If a PR depends on another one, it should have a [Pending #PRNUM]
|
||||
prefix in the PR title. In addition, it will be the PR submitter's responsibility to remove the
|
||||
[Pending #PRNUM] tag once the PR has been updated with the merged, dependent PR. That will
|
||||
let reviewers know it is ready to review.
|
||||
|
||||
- A PR should be small. Even if you intend on implementing an entire
|
||||
service, a PR should only be one route of that service
|
||||
(e.g. create server or get server, but not both).
|
||||
|
||||
- Unless explicitly asked, do not squash commits in the middle of a review; only
|
||||
append. It makes it difficult for the reviewer to see what's changed from one
|
||||
review to the next.
|
||||
|
||||
## On Code
|
||||
|
||||
- In re design: follow as closely as is reasonable the code already in the library.
|
||||
Most operations (e.g. create, delete) admit the same design.
|
||||
|
||||
- Unit tests and acceptance (integration) tests must be written to cover each PR.
|
||||
Tests for operations with several options (e.g. list, create) should include all
|
||||
the options in the tests. This will allow users to verify an operation on their
|
||||
own infrastructure and see an example of usage.
|
||||
|
||||
- If in doubt, ask in-line on the PR.
|
||||
|
||||
### File Structure
|
||||
|
||||
- The following should be used in most cases:
|
||||
|
||||
- `requests.go`: contains all the functions that make HTTP requests and the
|
||||
types associated with the HTTP request (parameters for URL, body, etc)
|
||||
- `results.go`: contains all the response objects and their methods
|
||||
- `urls.go`: contains the endpoints to which the requests are made
|
||||
|
||||
### Naming
|
||||
|
||||
- For methods on a type in `results.go`, the receiver should be named `r` and the
|
||||
variable into which it will be unmarshalled `s`.
|
||||
|
||||
- Functions in `requests.go`, with the exception of functions that return a
|
||||
`pagination.Pager`, should be named returns of the name `r`.
|
||||
|
||||
- Functions in `requests.go` that accept request bodies should accept as their
|
||||
last parameter an `interface` named `<Action>OptsBuilder` (eg `CreateOptsBuilder`).
|
||||
This `interface` should have at the least a method named `To<Resource><Action>Map`
|
||||
(eg `ToPortCreateMap`).
|
||||
|
||||
- Functions in `requests.go` that accept query strings should accept as their
|
||||
last parameter an `interface` named `<Action>OptsBuilder` (eg `ListOptsBuilder`).
|
||||
This `interface` should have at the least a method named `To<Resource><Action>Query`
|
||||
(eg `ToServerListQuery`).
|
327
vendor/github.com/gophercloud/gophercloud/auth_options.go
generated
vendored
Normal file
327
vendor/github.com/gophercloud/gophercloud/auth_options.go
generated
vendored
Normal file
@ -0,0 +1,327 @@
|
||||
package gophercloud
|
||||
|
||||
/*
|
||||
AuthOptions stores information needed to authenticate to an OpenStack Cloud.
|
||||
You can populate one manually, or use a provider's AuthOptionsFromEnv() function
|
||||
to read relevant information from the standard environment variables. Pass one
|
||||
to a provider's AuthenticatedClient function to authenticate and obtain a
|
||||
ProviderClient representing an active session on that provider.
|
||||
|
||||
Its fields are the union of those recognized by each identity implementation and
|
||||
provider.
|
||||
*/
|
||||
type AuthOptions struct {
|
||||
// IdentityEndpoint specifies the HTTP endpoint that is required to work with
|
||||
// the Identity API of the appropriate version. While it's ultimately needed by
|
||||
// all of the identity services, it will often be populated by a provider-level
|
||||
// function.
|
||||
IdentityEndpoint string `json:"-"`
|
||||
|
||||
// Username is required if using Identity V2 API. Consult with your provider's
|
||||
// control panel to discover your account's username. In Identity V3, either
|
||||
// UserID or a combination of Username and DomainID or DomainName are needed.
|
||||
Username string `json:"username,omitempty"`
|
||||
UserID string `json:"-"`
|
||||
|
||||
Password string `json:"password,omitempty"`
|
||||
|
||||
// At most one of DomainID and DomainName must be provided if using Username
|
||||
// with Identity V3. Otherwise, either are optional.
|
||||
DomainID string `json:"-"`
|
||||
DomainName string `json:"name,omitempty"`
|
||||
|
||||
// The TenantID and TenantName fields are optional for the Identity V2 API.
|
||||
// The same fields are known as project_id and project_name in the Identity
|
||||
// V3 API, but are collected as TenantID and TenantName here in both cases.
|
||||
// Some providers allow you to specify a TenantName instead of the TenantId.
|
||||
// Some require both. Your provider's authentication policies will determine
|
||||
// how these fields influence authentication.
|
||||
// If DomainID or DomainName are provided, they will also apply to TenantName.
|
||||
// It is not currently possible to authenticate with Username and a Domain
|
||||
// and scope to a Project in a different Domain by using TenantName. To
|
||||
// accomplish that, the ProjectID will need to be provided to the TenantID
|
||||
// option.
|
||||
TenantID string `json:"tenantId,omitempty"`
|
||||
TenantName string `json:"tenantName,omitempty"`
|
||||
|
||||
// AllowReauth should be set to true if you grant permission for Gophercloud to
|
||||
// cache your credentials in memory, and to allow Gophercloud to attempt to
|
||||
// re-authenticate automatically if/when your token expires. If you set it to
|
||||
// false, it will not cache these settings, but re-authentication will not be
|
||||
// possible. This setting defaults to false.
|
||||
//
|
||||
// NOTE: The reauth function will try to re-authenticate endlessly if left unchecked.
|
||||
// The way to limit the number of attempts is to provide a custom HTTP client to the provider client
|
||||
// and provide a transport that implements the RoundTripper interface and stores the number of failed retries.
|
||||
// For an example of this, see here: https://github.com/rackspace/rack/blob/1.0.0/auth/clients.go#L311
|
||||
AllowReauth bool `json:"-"`
|
||||
|
||||
// TokenID allows users to authenticate (possibly as another user) with an
|
||||
// authentication token ID.
|
||||
TokenID string `json:"-"`
|
||||
}
|
||||
|
||||
// ToTokenV2CreateMap allows AuthOptions to satisfy the AuthOptionsBuilder
|
||||
// interface in the v2 tokens package
|
||||
func (opts AuthOptions) ToTokenV2CreateMap() (map[string]interface{}, error) {
|
||||
// Populate the request map.
|
||||
authMap := make(map[string]interface{})
|
||||
|
||||
if opts.Username != "" {
|
||||
if opts.Password != "" {
|
||||
authMap["passwordCredentials"] = map[string]interface{}{
|
||||
"username": opts.Username,
|
||||
"password": opts.Password,
|
||||
}
|
||||
} else {
|
||||
return nil, ErrMissingInput{Argument: "Password"}
|
||||
}
|
||||
} else if opts.TokenID != "" {
|
||||
authMap["token"] = map[string]interface{}{
|
||||
"id": opts.TokenID,
|
||||
}
|
||||
} else {
|
||||
return nil, ErrMissingInput{Argument: "Username"}
|
||||
}
|
||||
|
||||
if opts.TenantID != "" {
|
||||
authMap["tenantId"] = opts.TenantID
|
||||
}
|
||||
if opts.TenantName != "" {
|
||||
authMap["tenantName"] = opts.TenantName
|
||||
}
|
||||
|
||||
return map[string]interface{}{"auth": authMap}, nil
|
||||
}
|
||||
|
||||
func (opts *AuthOptions) ToTokenV3CreateMap(scope map[string]interface{}) (map[string]interface{}, error) {
|
||||
type domainReq struct {
|
||||
ID *string `json:"id,omitempty"`
|
||||
Name *string `json:"name,omitempty"`
|
||||
}
|
||||
|
||||
type projectReq struct {
|
||||
Domain *domainReq `json:"domain,omitempty"`
|
||||
Name *string `json:"name,omitempty"`
|
||||
ID *string `json:"id,omitempty"`
|
||||
}
|
||||
|
||||
type userReq struct {
|
||||
ID *string `json:"id,omitempty"`
|
||||
Name *string `json:"name,omitempty"`
|
||||
Password string `json:"password"`
|
||||
Domain *domainReq `json:"domain,omitempty"`
|
||||
}
|
||||
|
||||
type passwordReq struct {
|
||||
User userReq `json:"user"`
|
||||
}
|
||||
|
||||
type tokenReq struct {
|
||||
ID string `json:"id"`
|
||||
}
|
||||
|
||||
type identityReq struct {
|
||||
Methods []string `json:"methods"`
|
||||
Password *passwordReq `json:"password,omitempty"`
|
||||
Token *tokenReq `json:"token,omitempty"`
|
||||
}
|
||||
|
||||
type authReq struct {
|
||||
Identity identityReq `json:"identity"`
|
||||
}
|
||||
|
||||
type request struct {
|
||||
Auth authReq `json:"auth"`
|
||||
}
|
||||
|
||||
// Populate the request structure based on the provided arguments. Create and return an error
|
||||
// if insufficient or incompatible information is present.
|
||||
var req request
|
||||
|
||||
if opts.Password == "" {
|
||||
if opts.TokenID != "" {
|
||||
// Because we aren't using password authentication, it's an error to also provide any of the user-based authentication
|
||||
// parameters.
|
||||
if opts.Username != "" {
|
||||
return nil, ErrUsernameWithToken{}
|
||||
}
|
||||
if opts.UserID != "" {
|
||||
return nil, ErrUserIDWithToken{}
|
||||
}
|
||||
if opts.DomainID != "" {
|
||||
return nil, ErrDomainIDWithToken{}
|
||||
}
|
||||
if opts.DomainName != "" {
|
||||
return nil, ErrDomainNameWithToken{}
|
||||
}
|
||||
|
||||
// Configure the request for Token authentication.
|
||||
req.Auth.Identity.Methods = []string{"token"}
|
||||
req.Auth.Identity.Token = &tokenReq{
|
||||
ID: opts.TokenID,
|
||||
}
|
||||
} else {
|
||||
// If no password or token ID are available, authentication can't continue.
|
||||
return nil, ErrMissingPassword{}
|
||||
}
|
||||
} else {
|
||||
// Password authentication.
|
||||
req.Auth.Identity.Methods = []string{"password"}
|
||||
|
||||
// At least one of Username and UserID must be specified.
|
||||
if opts.Username == "" && opts.UserID == "" {
|
||||
return nil, ErrUsernameOrUserID{}
|
||||
}
|
||||
|
||||
if opts.Username != "" {
|
||||
// If Username is provided, UserID may not be provided.
|
||||
if opts.UserID != "" {
|
||||
return nil, ErrUsernameOrUserID{}
|
||||
}
|
||||
|
||||
// Either DomainID or DomainName must also be specified.
|
||||
if opts.DomainID == "" && opts.DomainName == "" {
|
||||
return nil, ErrDomainIDOrDomainName{}
|
||||
}
|
||||
|
||||
if opts.DomainID != "" {
|
||||
if opts.DomainName != "" {
|
||||
return nil, ErrDomainIDOrDomainName{}
|
||||
}
|
||||
|
||||
// Configure the request for Username and Password authentication with a DomainID.
|
||||
req.Auth.Identity.Password = &passwordReq{
|
||||
User: userReq{
|
||||
Name: &opts.Username,
|
||||
Password: opts.Password,
|
||||
Domain: &domainReq{ID: &opts.DomainID},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
if opts.DomainName != "" {
|
||||
// Configure the request for Username and Password authentication with a DomainName.
|
||||
req.Auth.Identity.Password = &passwordReq{
|
||||
User: userReq{
|
||||
Name: &opts.Username,
|
||||
Password: opts.Password,
|
||||
Domain: &domainReq{Name: &opts.DomainName},
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if opts.UserID != "" {
|
||||
// If UserID is specified, neither DomainID nor DomainName may be.
|
||||
if opts.DomainID != "" {
|
||||
return nil, ErrDomainIDWithUserID{}
|
||||
}
|
||||
if opts.DomainName != "" {
|
||||
return nil, ErrDomainNameWithUserID{}
|
||||
}
|
||||
|
||||
// Configure the request for UserID and Password authentication.
|
||||
req.Auth.Identity.Password = &passwordReq{
|
||||
User: userReq{ID: &opts.UserID, Password: opts.Password},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
b, err := BuildRequestBody(req, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(scope) != 0 {
|
||||
b["auth"].(map[string]interface{})["scope"] = scope
|
||||
}
|
||||
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (opts *AuthOptions) ToTokenV3ScopeMap() (map[string]interface{}, error) {
|
||||
|
||||
var scope struct {
|
||||
ProjectID string
|
||||
ProjectName string
|
||||
DomainID string
|
||||
DomainName string
|
||||
}
|
||||
|
||||
if opts.TenantID != "" {
|
||||
scope.ProjectID = opts.TenantID
|
||||
} else {
|
||||
if opts.TenantName != "" {
|
||||
scope.ProjectName = opts.TenantName
|
||||
scope.DomainID = opts.DomainID
|
||||
scope.DomainName = opts.DomainName
|
||||
}
|
||||
}
|
||||
|
||||
if scope.ProjectName != "" {
|
||||
// ProjectName provided: either DomainID or DomainName must also be supplied.
|
||||
// ProjectID may not be supplied.
|
||||
if scope.DomainID == "" && scope.DomainName == "" {
|
||||
return nil, ErrScopeDomainIDOrDomainName{}
|
||||
}
|
||||
if scope.ProjectID != "" {
|
||||
return nil, ErrScopeProjectIDOrProjectName{}
|
||||
}
|
||||
|
||||
if scope.DomainID != "" {
|
||||
// ProjectName + DomainID
|
||||
return map[string]interface{}{
|
||||
"project": map[string]interface{}{
|
||||
"name": &scope.ProjectName,
|
||||
"domain": map[string]interface{}{"id": &scope.DomainID},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
if scope.DomainName != "" {
|
||||
// ProjectName + DomainName
|
||||
return map[string]interface{}{
|
||||
"project": map[string]interface{}{
|
||||
"name": &scope.ProjectName,
|
||||
"domain": map[string]interface{}{"name": &scope.DomainName},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
} else if scope.ProjectID != "" {
|
||||
// ProjectID provided. ProjectName, DomainID, and DomainName may not be provided.
|
||||
if scope.DomainID != "" {
|
||||
return nil, ErrScopeProjectIDAlone{}
|
||||
}
|
||||
if scope.DomainName != "" {
|
||||
return nil, ErrScopeProjectIDAlone{}
|
||||
}
|
||||
|
||||
// ProjectID
|
||||
return map[string]interface{}{
|
||||
"project": map[string]interface{}{
|
||||
"id": &scope.ProjectID,
|
||||
},
|
||||
}, nil
|
||||
} else if scope.DomainID != "" {
|
||||
// DomainID provided. ProjectID, ProjectName, and DomainName may not be provided.
|
||||
if scope.DomainName != "" {
|
||||
return nil, ErrScopeDomainIDOrDomainName{}
|
||||
}
|
||||
|
||||
// DomainID
|
||||
return map[string]interface{}{
|
||||
"domain": map[string]interface{}{
|
||||
"id": &scope.DomainID,
|
||||
},
|
||||
}, nil
|
||||
} else if scope.DomainName != "" {
|
||||
return nil, ErrScopeDomainName{}
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (opts AuthOptions) CanReauth() bool {
|
||||
return opts.AllowReauth
|
||||
}
|
69
vendor/github.com/gophercloud/gophercloud/doc.go
generated
vendored
Normal file
69
vendor/github.com/gophercloud/gophercloud/doc.go
generated
vendored
Normal file
@ -0,0 +1,69 @@
|
||||
/*
|
||||
Package gophercloud provides a multi-vendor interface to OpenStack-compatible
|
||||
clouds. The library has a three-level hierarchy: providers, services, and
|
||||
resources.
|
||||
|
||||
Provider structs represent the service providers that offer and manage a
|
||||
collection of services. The IdentityEndpoint is typically refered to as
|
||||
"auth_url" in information provided by the cloud operator. Additionally,
|
||||
the cloud may refer to TenantID or TenantName as project_id and project_name.
|
||||
These are defined like so:
|
||||
|
||||
opts := gophercloud.AuthOptions{
|
||||
IdentityEndpoint: "https://openstack.example.com:5000/v2.0",
|
||||
Username: "{username}",
|
||||
Password: "{password}",
|
||||
TenantID: "{tenant_id}",
|
||||
}
|
||||
|
||||
provider, err := openstack.AuthenticatedClient(opts)
|
||||
|
||||
Service structs are specific to a provider and handle all of the logic and
|
||||
operations for a particular OpenStack service. Examples of services include:
|
||||
Compute, Object Storage, Block Storage. In order to define one, you need to
|
||||
pass in the parent provider, like so:
|
||||
|
||||
opts := gophercloud.EndpointOpts{Region: "RegionOne"}
|
||||
|
||||
client := openstack.NewComputeV2(provider, opts)
|
||||
|
||||
Resource structs are the domain models that services make use of in order
|
||||
to work with and represent the state of API resources:
|
||||
|
||||
server, err := servers.Get(client, "{serverId}").Extract()
|
||||
|
||||
Intermediate Result structs are returned for API operations, which allow
|
||||
generic access to the HTTP headers, response body, and any errors associated
|
||||
with the network transaction. To turn a result into a usable resource struct,
|
||||
you must call the Extract method which is chained to the response, or an
|
||||
Extract function from an applicable extension:
|
||||
|
||||
result := servers.Get(client, "{serverId}")
|
||||
|
||||
// Attempt to extract the disk configuration from the OS-DCF disk config
|
||||
// extension:
|
||||
config, err := diskconfig.ExtractGet(result)
|
||||
|
||||
All requests that enumerate a collection return a Pager struct that is used to
|
||||
iterate through the results one page at a time. Use the EachPage method on that
|
||||
Pager to handle each successive Page in a closure, then use the appropriate
|
||||
extraction method from that request's package to interpret that Page as a slice
|
||||
of results:
|
||||
|
||||
err := servers.List(client, nil).EachPage(func (page pagination.Page) (bool, error) {
|
||||
s, err := servers.ExtractServers(page)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Handle the []servers.Server slice.
|
||||
|
||||
// Return "false" or an error to prematurely stop fetching new pages.
|
||||
return true, nil
|
||||
})
|
||||
|
||||
This top-level package contains utility functions and data types that are used
|
||||
throughout the provider and service packages. Of particular note for end users
|
||||
are the AuthOptions and EndpointOpts structs.
|
||||
*/
|
||||
package gophercloud
|
76
vendor/github.com/gophercloud/gophercloud/endpoint_search.go
generated
vendored
Normal file
76
vendor/github.com/gophercloud/gophercloud/endpoint_search.go
generated
vendored
Normal file
@ -0,0 +1,76 @@
|
||||
package gophercloud
|
||||
|
||||
// Availability indicates to whom a specific service endpoint is accessible:
|
||||
// the internet at large, internal networks only, or only to administrators.
|
||||
// Different identity services use different terminology for these. Identity v2
|
||||
// lists them as different kinds of URLs within the service catalog ("adminURL",
|
||||
// "internalURL", and "publicURL"), while v3 lists them as "Interfaces" in an
|
||||
// endpoint's response.
|
||||
type Availability string
|
||||
|
||||
const (
|
||||
// AvailabilityAdmin indicates that an endpoint is only available to
|
||||
// administrators.
|
||||
AvailabilityAdmin Availability = "admin"
|
||||
|
||||
// AvailabilityPublic indicates that an endpoint is available to everyone on
|
||||
// the internet.
|
||||
AvailabilityPublic Availability = "public"
|
||||
|
||||
// AvailabilityInternal indicates that an endpoint is only available within
|
||||
// the cluster's internal network.
|
||||
AvailabilityInternal Availability = "internal"
|
||||
)
|
||||
|
||||
// EndpointOpts specifies search criteria used by queries against an
|
||||
// OpenStack service catalog. The options must contain enough information to
|
||||
// unambiguously identify one, and only one, endpoint within the catalog.
|
||||
//
|
||||
// Usually, these are passed to service client factory functions in a provider
|
||||
// package, like "rackspace.NewComputeV2()".
|
||||
type EndpointOpts struct {
|
||||
// Type [required] is the service type for the client (e.g., "compute",
|
||||
// "object-store"). Generally, this will be supplied by the service client
|
||||
// function, but a user-given value will be honored if provided.
|
||||
Type string
|
||||
|
||||
// Name [optional] is the service name for the client (e.g., "nova") as it
|
||||
// appears in the service catalog. Services can have the same Type but a
|
||||
// different Name, which is why both Type and Name are sometimes needed.
|
||||
Name string
|
||||
|
||||
// Region [required] is the geographic region in which the endpoint resides,
|
||||
// generally specifying which datacenter should house your resources.
|
||||
// Required only for services that span multiple regions.
|
||||
Region string
|
||||
|
||||
// Availability [optional] is the visibility of the endpoint to be returned.
|
||||
// Valid types include the constants AvailabilityPublic, AvailabilityInternal,
|
||||
// or AvailabilityAdmin from this package.
|
||||
//
|
||||
// Availability is not required, and defaults to AvailabilityPublic. Not all
|
||||
// providers or services offer all Availability options.
|
||||
Availability Availability
|
||||
}
|
||||
|
||||
/*
|
||||
EndpointLocator is an internal function to be used by provider implementations.
|
||||
|
||||
It provides an implementation that locates a single endpoint from a service
|
||||
catalog for a specific ProviderClient based on user-provided EndpointOpts. The
|
||||
provider then uses it to discover related ServiceClients.
|
||||
*/
|
||||
type EndpointLocator func(EndpointOpts) (string, error)
|
||||
|
||||
// ApplyDefaults is an internal method to be used by provider implementations.
|
||||
//
|
||||
// It sets EndpointOpts fields if not already set, including a default type.
|
||||
// Currently, EndpointOpts.Availability defaults to the public endpoint.
|
||||
func (eo *EndpointOpts) ApplyDefaults(t string) {
|
||||
if eo.Type == "" {
|
||||
eo.Type = t
|
||||
}
|
||||
if eo.Availability == "" {
|
||||
eo.Availability = AvailabilityPublic
|
||||
}
|
||||
}
|
408
vendor/github.com/gophercloud/gophercloud/errors.go
generated
vendored
Normal file
408
vendor/github.com/gophercloud/gophercloud/errors.go
generated
vendored
Normal file
@ -0,0 +1,408 @@
|
||||
package gophercloud
|
||||
|
||||
import "fmt"
|
||||
|
||||
// BaseError is an error type that all other error types embed.
|
||||
type BaseError struct {
|
||||
DefaultErrString string
|
||||
Info string
|
||||
}
|
||||
|
||||
func (e BaseError) Error() string {
|
||||
e.DefaultErrString = "An error occurred while executing a Gophercloud request."
|
||||
return e.choseErrString()
|
||||
}
|
||||
|
||||
func (e BaseError) choseErrString() string {
|
||||
if e.Info != "" {
|
||||
return e.Info
|
||||
}
|
||||
return e.DefaultErrString
|
||||
}
|
||||
|
||||
// ErrMissingInput is the error when input is required in a particular
|
||||
// situation but not provided by the user
|
||||
type ErrMissingInput struct {
|
||||
BaseError
|
||||
Argument string
|
||||
}
|
||||
|
||||
func (e ErrMissingInput) Error() string {
|
||||
e.DefaultErrString = fmt.Sprintf("Missing input for argument [%s]", e.Argument)
|
||||
return e.choseErrString()
|
||||
}
|
||||
|
||||
// ErrInvalidInput is an error type used for most non-HTTP Gophercloud errors.
|
||||
type ErrInvalidInput struct {
|
||||
ErrMissingInput
|
||||
Value interface{}
|
||||
}
|
||||
|
||||
func (e ErrInvalidInput) Error() string {
|
||||
e.DefaultErrString = fmt.Sprintf("Invalid input provided for argument [%s]: [%+v]", e.Argument, e.Value)
|
||||
return e.choseErrString()
|
||||
}
|
||||
|
||||
// ErrUnexpectedResponseCode is returned by the Request method when a response code other than
|
||||
// those listed in OkCodes is encountered.
|
||||
type ErrUnexpectedResponseCode struct {
|
||||
BaseError
|
||||
URL string
|
||||
Method string
|
||||
Expected []int
|
||||
Actual int
|
||||
Body []byte
|
||||
}
|
||||
|
||||
func (e ErrUnexpectedResponseCode) Error() string {
|
||||
e.DefaultErrString = fmt.Sprintf(
|
||||
"Expected HTTP response code %v when accessing [%s %s], but got %d instead\n%s",
|
||||
e.Expected, e.Method, e.URL, e.Actual, e.Body,
|
||||
)
|
||||
return e.choseErrString()
|
||||
}
|
||||
|
||||
// ErrDefault400 is the default error type returned on a 400 HTTP response code.
|
||||
type ErrDefault400 struct {
|
||||
ErrUnexpectedResponseCode
|
||||
}
|
||||
|
||||
// ErrDefault401 is the default error type returned on a 401 HTTP response code.
|
||||
type ErrDefault401 struct {
|
||||
ErrUnexpectedResponseCode
|
||||
}
|
||||
|
||||
// ErrDefault404 is the default error type returned on a 404 HTTP response code.
|
||||
type ErrDefault404 struct {
|
||||
ErrUnexpectedResponseCode
|
||||
}
|
||||
|
||||
// ErrDefault405 is the default error type returned on a 405 HTTP response code.
|
||||
type ErrDefault405 struct {
|
||||
ErrUnexpectedResponseCode
|
||||
}
|
||||
|
||||
// ErrDefault408 is the default error type returned on a 408 HTTP response code.
|
||||
type ErrDefault408 struct {
|
||||
ErrUnexpectedResponseCode
|
||||
}
|
||||
|
||||
// ErrDefault429 is the default error type returned on a 429 HTTP response code.
|
||||
type ErrDefault429 struct {
|
||||
ErrUnexpectedResponseCode
|
||||
}
|
||||
|
||||
// ErrDefault500 is the default error type returned on a 500 HTTP response code.
|
||||
type ErrDefault500 struct {
|
||||
ErrUnexpectedResponseCode
|
||||
}
|
||||
|
||||
// ErrDefault503 is the default error type returned on a 503 HTTP response code.
|
||||
type ErrDefault503 struct {
|
||||
ErrUnexpectedResponseCode
|
||||
}
|
||||
|
||||
func (e ErrDefault400) Error() string {
|
||||
return "Invalid request due to incorrect syntax or missing required parameters."
|
||||
}
|
||||
func (e ErrDefault401) Error() string {
|
||||
return "Authentication failed"
|
||||
}
|
||||
func (e ErrDefault404) Error() string {
|
||||
return "Resource not found"
|
||||
}
|
||||
func (e ErrDefault405) Error() string {
|
||||
return "Method not allowed"
|
||||
}
|
||||
func (e ErrDefault408) Error() string {
|
||||
return "The server timed out waiting for the request"
|
||||
}
|
||||
func (e ErrDefault429) Error() string {
|
||||
return "Too many requests have been sent in a given amount of time. Pause" +
|
||||
" requests, wait up to one minute, and try again."
|
||||
}
|
||||
func (e ErrDefault500) Error() string {
|
||||
return "Internal Server Error"
|
||||
}
|
||||
func (e ErrDefault503) Error() string {
|
||||
return "The service is currently unable to handle the request due to a temporary" +
|
||||
" overloading or maintenance. This is a temporary condition. Try again later."
|
||||
}
|
||||
|
||||
// Err400er is the interface resource error types implement to override the error message
|
||||
// from a 400 error.
|
||||
type Err400er interface {
|
||||
Error400(ErrUnexpectedResponseCode) error
|
||||
}
|
||||
|
||||
// Err401er is the interface resource error types implement to override the error message
|
||||
// from a 401 error.
|
||||
type Err401er interface {
|
||||
Error401(ErrUnexpectedResponseCode) error
|
||||
}
|
||||
|
||||
// Err404er is the interface resource error types implement to override the error message
|
||||
// from a 404 error.
|
||||
type Err404er interface {
|
||||
Error404(ErrUnexpectedResponseCode) error
|
||||
}
|
||||
|
||||
// Err405er is the interface resource error types implement to override the error message
|
||||
// from a 405 error.
|
||||
type Err405er interface {
|
||||
Error405(ErrUnexpectedResponseCode) error
|
||||
}
|
||||
|
||||
// Err408er is the interface resource error types implement to override the error message
|
||||
// from a 408 error.
|
||||
type Err408er interface {
|
||||
Error408(ErrUnexpectedResponseCode) error
|
||||
}
|
||||
|
||||
// Err429er is the interface resource error types implement to override the error message
|
||||
// from a 429 error.
|
||||
type Err429er interface {
|
||||
Error429(ErrUnexpectedResponseCode) error
|
||||
}
|
||||
|
||||
// Err500er is the interface resource error types implement to override the error message
|
||||
// from a 500 error.
|
||||
type Err500er interface {
|
||||
Error500(ErrUnexpectedResponseCode) error
|
||||
}
|
||||
|
||||
// Err503er is the interface resource error types implement to override the error message
|
||||
// from a 503 error.
|
||||
type Err503er interface {
|
||||
Error503(ErrUnexpectedResponseCode) error
|
||||
}
|
||||
|
||||
// ErrTimeOut is the error type returned when an operations times out.
|
||||
type ErrTimeOut struct {
|
||||
BaseError
|
||||
}
|
||||
|
||||
func (e ErrTimeOut) Error() string {
|
||||
e.DefaultErrString = "A time out occurred"
|
||||
return e.choseErrString()
|
||||
}
|
||||
|
||||
// ErrUnableToReauthenticate is the error type returned when reauthentication fails.
|
||||
type ErrUnableToReauthenticate struct {
|
||||
BaseError
|
||||
ErrOriginal error
|
||||
}
|
||||
|
||||
func (e ErrUnableToReauthenticate) Error() string {
|
||||
e.DefaultErrString = fmt.Sprintf("Unable to re-authenticate: %s", e.ErrOriginal)
|
||||
return e.choseErrString()
|
||||
}
|
||||
|
||||
// ErrErrorAfterReauthentication is the error type returned when reauthentication
|
||||
// succeeds, but an error occurs afterword (usually an HTTP error).
|
||||
type ErrErrorAfterReauthentication struct {
|
||||
BaseError
|
||||
ErrOriginal error
|
||||
}
|
||||
|
||||
func (e ErrErrorAfterReauthentication) Error() string {
|
||||
e.DefaultErrString = fmt.Sprintf("Successfully re-authenticated, but got error executing request: %s", e.ErrOriginal)
|
||||
return e.choseErrString()
|
||||
}
|
||||
|
||||
// ErrServiceNotFound is returned when no service in a service catalog matches
|
||||
// the provided EndpointOpts. This is generally returned by provider service
|
||||
// factory methods like "NewComputeV2()" and can mean that a service is not
|
||||
// enabled for your account.
|
||||
type ErrServiceNotFound struct {
|
||||
BaseError
|
||||
}
|
||||
|
||||
func (e ErrServiceNotFound) Error() string {
|
||||
e.DefaultErrString = "No suitable service could be found in the service catalog."
|
||||
return e.choseErrString()
|
||||
}
|
||||
|
||||
// ErrEndpointNotFound is returned when no available endpoints match the
|
||||
// provided EndpointOpts. This is also generally returned by provider service
|
||||
// factory methods, and usually indicates that a region was specified
|
||||
// incorrectly.
|
||||
type ErrEndpointNotFound struct {
|
||||
BaseError
|
||||
}
|
||||
|
||||
func (e ErrEndpointNotFound) Error() string {
|
||||
e.DefaultErrString = "No suitable endpoint could be found in the service catalog."
|
||||
return e.choseErrString()
|
||||
}
|
||||
|
||||
// ErrResourceNotFound is the error when trying to retrieve a resource's
|
||||
// ID by name and the resource doesn't exist.
|
||||
type ErrResourceNotFound struct {
|
||||
BaseError
|
||||
Name string
|
||||
ResourceType string
|
||||
}
|
||||
|
||||
func (e ErrResourceNotFound) Error() string {
|
||||
e.DefaultErrString = fmt.Sprintf("Unable to find %s with name %s", e.ResourceType, e.Name)
|
||||
return e.choseErrString()
|
||||
}
|
||||
|
||||
// ErrMultipleResourcesFound is the error when trying to retrieve a resource's
|
||||
// ID by name and multiple resources have the user-provided name.
|
||||
type ErrMultipleResourcesFound struct {
|
||||
BaseError
|
||||
Name string
|
||||
Count int
|
||||
ResourceType string
|
||||
}
|
||||
|
||||
func (e ErrMultipleResourcesFound) Error() string {
|
||||
e.DefaultErrString = fmt.Sprintf("Found %d %ss matching %s", e.Count, e.ResourceType, e.Name)
|
||||
return e.choseErrString()
|
||||
}
|
||||
|
||||
// ErrUnexpectedType is the error when an unexpected type is encountered
|
||||
type ErrUnexpectedType struct {
|
||||
BaseError
|
||||
Expected string
|
||||
Actual string
|
||||
}
|
||||
|
||||
func (e ErrUnexpectedType) Error() string {
|
||||
e.DefaultErrString = fmt.Sprintf("Expected %s but got %s", e.Expected, e.Actual)
|
||||
return e.choseErrString()
|
||||
}
|
||||
|
||||
func unacceptedAttributeErr(attribute string) string {
|
||||
return fmt.Sprintf("The base Identity V3 API does not accept authentication by %s", attribute)
|
||||
}
|
||||
|
||||
func redundantWithTokenErr(attribute string) string {
|
||||
return fmt.Sprintf("%s may not be provided when authenticating with a TokenID", attribute)
|
||||
}
|
||||
|
||||
func redundantWithUserID(attribute string) string {
|
||||
return fmt.Sprintf("%s may not be provided when authenticating with a UserID", attribute)
|
||||
}
|
||||
|
||||
// ErrAPIKeyProvided indicates that an APIKey was provided but can't be used.
|
||||
type ErrAPIKeyProvided struct{ BaseError }
|
||||
|
||||
func (e ErrAPIKeyProvided) Error() string {
|
||||
return unacceptedAttributeErr("APIKey")
|
||||
}
|
||||
|
||||
// ErrTenantIDProvided indicates that a TenantID was provided but can't be used.
|
||||
type ErrTenantIDProvided struct{ BaseError }
|
||||
|
||||
func (e ErrTenantIDProvided) Error() string {
|
||||
return unacceptedAttributeErr("TenantID")
|
||||
}
|
||||
|
||||
// ErrTenantNameProvided indicates that a TenantName was provided but can't be used.
|
||||
type ErrTenantNameProvided struct{ BaseError }
|
||||
|
||||
func (e ErrTenantNameProvided) Error() string {
|
||||
return unacceptedAttributeErr("TenantName")
|
||||
}
|
||||
|
||||
// ErrUsernameWithToken indicates that a Username was provided, but token authentication is being used instead.
|
||||
type ErrUsernameWithToken struct{ BaseError }
|
||||
|
||||
func (e ErrUsernameWithToken) Error() string {
|
||||
return redundantWithTokenErr("Username")
|
||||
}
|
||||
|
||||
// ErrUserIDWithToken indicates that a UserID was provided, but token authentication is being used instead.
|
||||
type ErrUserIDWithToken struct{ BaseError }
|
||||
|
||||
func (e ErrUserIDWithToken) Error() string {
|
||||
return redundantWithTokenErr("UserID")
|
||||
}
|
||||
|
||||
// ErrDomainIDWithToken indicates that a DomainID was provided, but token authentication is being used instead.
|
||||
type ErrDomainIDWithToken struct{ BaseError }
|
||||
|
||||
func (e ErrDomainIDWithToken) Error() string {
|
||||
return redundantWithTokenErr("DomainID")
|
||||
}
|
||||
|
||||
// ErrDomainNameWithToken indicates that a DomainName was provided, but token authentication is being used instead.s
|
||||
type ErrDomainNameWithToken struct{ BaseError }
|
||||
|
||||
func (e ErrDomainNameWithToken) Error() string {
|
||||
return redundantWithTokenErr("DomainName")
|
||||
}
|
||||
|
||||
// ErrUsernameOrUserID indicates that neither username nor userID are specified, or both are at once.
|
||||
type ErrUsernameOrUserID struct{ BaseError }
|
||||
|
||||
func (e ErrUsernameOrUserID) Error() string {
|
||||
return "Exactly one of Username and UserID must be provided for password authentication"
|
||||
}
|
||||
|
||||
// ErrDomainIDWithUserID indicates that a DomainID was provided, but unnecessary because a UserID is being used.
|
||||
type ErrDomainIDWithUserID struct{ BaseError }
|
||||
|
||||
func (e ErrDomainIDWithUserID) Error() string {
|
||||
return redundantWithUserID("DomainID")
|
||||
}
|
||||
|
||||
// ErrDomainNameWithUserID indicates that a DomainName was provided, but unnecessary because a UserID is being used.
|
||||
type ErrDomainNameWithUserID struct{ BaseError }
|
||||
|
||||
func (e ErrDomainNameWithUserID) Error() string {
|
||||
return redundantWithUserID("DomainName")
|
||||
}
|
||||
|
||||
// ErrDomainIDOrDomainName indicates that a username was provided, but no domain to scope it.
|
||||
// It may also indicate that both a DomainID and a DomainName were provided at once.
|
||||
type ErrDomainIDOrDomainName struct{ BaseError }
|
||||
|
||||
func (e ErrDomainIDOrDomainName) Error() string {
|
||||
return "You must provide exactly one of DomainID or DomainName to authenticate by Username"
|
||||
}
|
||||
|
||||
// ErrMissingPassword indicates that no password was provided and no token is available.
|
||||
type ErrMissingPassword struct{ BaseError }
|
||||
|
||||
func (e ErrMissingPassword) Error() string {
|
||||
return "You must provide a password to authenticate"
|
||||
}
|
||||
|
||||
// ErrScopeDomainIDOrDomainName indicates that a domain ID or Name was required in a Scope, but not present.
|
||||
type ErrScopeDomainIDOrDomainName struct{ BaseError }
|
||||
|
||||
func (e ErrScopeDomainIDOrDomainName) Error() string {
|
||||
return "You must provide exactly one of DomainID or DomainName in a Scope with ProjectName"
|
||||
}
|
||||
|
||||
// ErrScopeProjectIDOrProjectName indicates that both a ProjectID and a ProjectName were provided in a Scope.
|
||||
type ErrScopeProjectIDOrProjectName struct{ BaseError }
|
||||
|
||||
func (e ErrScopeProjectIDOrProjectName) Error() string {
|
||||
return "You must provide at most one of ProjectID or ProjectName in a Scope"
|
||||
}
|
||||
|
||||
// ErrScopeProjectIDAlone indicates that a ProjectID was provided with other constraints in a Scope.
|
||||
type ErrScopeProjectIDAlone struct{ BaseError }
|
||||
|
||||
func (e ErrScopeProjectIDAlone) Error() string {
|
||||
return "ProjectID must be supplied alone in a Scope"
|
||||
}
|
||||
|
||||
// ErrScopeDomainName indicates that a DomainName was provided alone in a Scope.
|
||||
type ErrScopeDomainName struct{ BaseError }
|
||||
|
||||
func (e ErrScopeDomainName) Error() string {
|
||||
return "DomainName must be supplied with a ProjectName or ProjectID in a Scope"
|
||||
}
|
||||
|
||||
// ErrScopeEmpty indicates that no credentials were provided in a Scope.
|
||||
type ErrScopeEmpty struct{ BaseError }
|
||||
|
||||
func (e ErrScopeEmpty) Error() string {
|
||||
return "You must provide either a Project or Domain in a Scope"
|
||||
}
|
52
vendor/github.com/gophercloud/gophercloud/openstack/auth_env.go
generated
vendored
Normal file
52
vendor/github.com/gophercloud/gophercloud/openstack/auth_env.go
generated
vendored
Normal file
@ -0,0 +1,52 @@
|
||||
package openstack
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/gophercloud/gophercloud"
|
||||
)
|
||||
|
||||
var nilOptions = gophercloud.AuthOptions{}
|
||||
|
||||
// AuthOptionsFromEnv fills out an identity.AuthOptions structure with the settings found on the various OpenStack
|
||||
// OS_* environment variables. The following variables provide sources of truth: OS_AUTH_URL, OS_USERNAME,
|
||||
// OS_PASSWORD, OS_TENANT_ID, and OS_TENANT_NAME. Of these, OS_USERNAME, OS_PASSWORD, and OS_AUTH_URL must
|
||||
// have settings, or an error will result. OS_TENANT_ID and OS_TENANT_NAME are optional.
|
||||
func AuthOptionsFromEnv() (gophercloud.AuthOptions, error) {
|
||||
authURL := os.Getenv("OS_AUTH_URL")
|
||||
username := os.Getenv("OS_USERNAME")
|
||||
userID := os.Getenv("OS_USERID")
|
||||
password := os.Getenv("OS_PASSWORD")
|
||||
tenantID := os.Getenv("OS_TENANT_ID")
|
||||
tenantName := os.Getenv("OS_TENANT_NAME")
|
||||
domainID := os.Getenv("OS_DOMAIN_ID")
|
||||
domainName := os.Getenv("OS_DOMAIN_NAME")
|
||||
|
||||
if authURL == "" {
|
||||
err := gophercloud.ErrMissingInput{Argument: "authURL"}
|
||||
return nilOptions, err
|
||||
}
|
||||
|
||||
if username == "" && userID == "" {
|
||||
err := gophercloud.ErrMissingInput{Argument: "username"}
|
||||
return nilOptions, err
|
||||
}
|
||||
|
||||
if password == "" {
|
||||
err := gophercloud.ErrMissingInput{Argument: "password"}
|
||||
return nilOptions, err
|
||||
}
|
||||
|
||||
ao := gophercloud.AuthOptions{
|
||||
IdentityEndpoint: authURL,
|
||||
UserID: userID,
|
||||
Username: username,
|
||||
Password: password,
|
||||
TenantID: tenantID,
|
||||
TenantName: tenantName,
|
||||
DomainID: domainID,
|
||||
DomainName: domainName,
|
||||
}
|
||||
|
||||
return ao, nil
|
||||
}
|
295
vendor/github.com/gophercloud/gophercloud/openstack/client.go
generated
vendored
Normal file
295
vendor/github.com/gophercloud/gophercloud/openstack/client.go
generated
vendored
Normal file
@ -0,0 +1,295 @@
|
||||
package openstack
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"reflect"
|
||||
|
||||
"github.com/gophercloud/gophercloud"
|
||||
tokens2 "github.com/gophercloud/gophercloud/openstack/identity/v2/tokens"
|
||||
tokens3 "github.com/gophercloud/gophercloud/openstack/identity/v3/tokens"
|
||||
"github.com/gophercloud/gophercloud/openstack/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
v20 = "v2.0"
|
||||
v30 = "v3.0"
|
||||
)
|
||||
|
||||
// NewClient prepares an unauthenticated ProviderClient instance.
|
||||
// Most users will probably prefer using the AuthenticatedClient function instead.
|
||||
// This is useful if you wish to explicitly control the version of the identity service that's used for authentication explicitly,
|
||||
// for example.
|
||||
func NewClient(endpoint string) (*gophercloud.ProviderClient, error) {
|
||||
u, err := url.Parse(endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hadPath := u.Path != ""
|
||||
u.Path, u.RawQuery, u.Fragment = "", "", ""
|
||||
base := u.String()
|
||||
|
||||
endpoint = gophercloud.NormalizeURL(endpoint)
|
||||
base = gophercloud.NormalizeURL(base)
|
||||
|
||||
if hadPath {
|
||||
return &gophercloud.ProviderClient{
|
||||
IdentityBase: base,
|
||||
IdentityEndpoint: endpoint,
|
||||
}, nil
|
||||
}
|
||||
|
||||
return &gophercloud.ProviderClient{
|
||||
IdentityBase: base,
|
||||
IdentityEndpoint: "",
|
||||
}, nil
|
||||
}
|
||||
|
||||
// AuthenticatedClient logs in to an OpenStack cloud found at the identity endpoint specified by options, acquires a token, and
|
||||
// returns a Client instance that's ready to operate.
|
||||
// It first queries the root identity endpoint to determine which versions of the identity service are supported, then chooses
|
||||
// the most recent identity service available to proceed.
|
||||
func AuthenticatedClient(options gophercloud.AuthOptions) (*gophercloud.ProviderClient, error) {
|
||||
client, err := NewClient(options.IdentityEndpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = Authenticate(client, options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// Authenticate or re-authenticate against the most recent identity service supported at the provided endpoint.
|
||||
func Authenticate(client *gophercloud.ProviderClient, options gophercloud.AuthOptions) error {
|
||||
versions := []*utils.Version{
|
||||
{ID: v20, Priority: 20, Suffix: "/v2.0/"},
|
||||
{ID: v30, Priority: 30, Suffix: "/v3/"},
|
||||
}
|
||||
|
||||
chosen, endpoint, err := utils.ChooseVersion(client, versions)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch chosen.ID {
|
||||
case v20:
|
||||
return v2auth(client, endpoint, options, gophercloud.EndpointOpts{})
|
||||
case v30:
|
||||
return v3auth(client, endpoint, &options, gophercloud.EndpointOpts{})
|
||||
default:
|
||||
// The switch statement must be out of date from the versions list.
|
||||
return fmt.Errorf("Unrecognized identity version: %s", chosen.ID)
|
||||
}
|
||||
}
|
||||
|
||||
// AuthenticateV2 explicitly authenticates against the identity v2 endpoint.
|
||||
func AuthenticateV2(client *gophercloud.ProviderClient, options gophercloud.AuthOptions, eo gophercloud.EndpointOpts) error {
|
||||
return v2auth(client, "", options, eo)
|
||||
}
|
||||
|
||||
func v2auth(client *gophercloud.ProviderClient, endpoint string, options gophercloud.AuthOptions, eo gophercloud.EndpointOpts) error {
|
||||
v2Client, err := NewIdentityV2(client, eo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if endpoint != "" {
|
||||
v2Client.Endpoint = endpoint
|
||||
}
|
||||
|
||||
v2Opts := tokens2.AuthOptions{
|
||||
IdentityEndpoint: options.IdentityEndpoint,
|
||||
Username: options.Username,
|
||||
Password: options.Password,
|
||||
TenantID: options.TenantID,
|
||||
TenantName: options.TenantName,
|
||||
AllowReauth: options.AllowReauth,
|
||||
TokenID: options.TokenID,
|
||||
}
|
||||
|
||||
result := tokens2.Create(v2Client, v2Opts)
|
||||
|
||||
token, err := result.ExtractToken()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
catalog, err := result.ExtractServiceCatalog()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if options.AllowReauth {
|
||||
client.ReauthFunc = func() error {
|
||||
client.TokenID = ""
|
||||
return v2auth(client, endpoint, options, eo)
|
||||
}
|
||||
}
|
||||
client.TokenID = token.ID
|
||||
client.EndpointLocator = func(opts gophercloud.EndpointOpts) (string, error) {
|
||||
return V2EndpointURL(catalog, opts)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// AuthenticateV3 explicitly authenticates against the identity v3 service.
|
||||
func AuthenticateV3(client *gophercloud.ProviderClient, options tokens3.AuthOptionsBuilder, eo gophercloud.EndpointOpts) error {
|
||||
return v3auth(client, "", options, eo)
|
||||
}
|
||||
|
||||
func v3auth(client *gophercloud.ProviderClient, endpoint string, opts tokens3.AuthOptionsBuilder, eo gophercloud.EndpointOpts) error {
|
||||
// Override the generated service endpoint with the one returned by the version endpoint.
|
||||
v3Client, err := NewIdentityV3(client, eo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if endpoint != "" {
|
||||
v3Client.Endpoint = endpoint
|
||||
}
|
||||
|
||||
result := tokens3.Create(v3Client, opts)
|
||||
|
||||
token, err := result.ExtractToken()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
catalog, err := result.ExtractServiceCatalog()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
client.TokenID = token.ID
|
||||
|
||||
if opts.CanReauth() {
|
||||
client.ReauthFunc = func() error {
|
||||
client.TokenID = ""
|
||||
return v3auth(client, endpoint, opts, eo)
|
||||
}
|
||||
}
|
||||
client.EndpointLocator = func(opts gophercloud.EndpointOpts) (string, error) {
|
||||
return V3EndpointURL(catalog, opts)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewIdentityV2 creates a ServiceClient that may be used to interact with the v2 identity service.
|
||||
func NewIdentityV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) {
|
||||
endpoint := client.IdentityBase + "v2.0/"
|
||||
clientType := "identity"
|
||||
var err error
|
||||
if !reflect.DeepEqual(eo, gophercloud.EndpointOpts{}) {
|
||||
eo.ApplyDefaults(clientType)
|
||||
endpoint, err = client.EndpointLocator(eo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return &gophercloud.ServiceClient{
|
||||
ProviderClient: client,
|
||||
Endpoint: endpoint,
|
||||
Type: clientType,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NewIdentityV3 creates a ServiceClient that may be used to access the v3 identity service.
|
||||
func NewIdentityV3(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) {
|
||||
endpoint := client.IdentityBase + "v3/"
|
||||
clientType := "identity"
|
||||
var err error
|
||||
if !reflect.DeepEqual(eo, gophercloud.EndpointOpts{}) {
|
||||
eo.ApplyDefaults(clientType)
|
||||
endpoint, err = client.EndpointLocator(eo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return &gophercloud.ServiceClient{
|
||||
ProviderClient: client,
|
||||
Endpoint: endpoint,
|
||||
Type: clientType,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func initClientOpts(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts, clientType string) (*gophercloud.ServiceClient, error) {
|
||||
sc := new(gophercloud.ServiceClient)
|
||||
eo.ApplyDefaults(clientType)
|
||||
url, err := client.EndpointLocator(eo)
|
||||
if err != nil {
|
||||
return sc, err
|
||||
}
|
||||
sc.ProviderClient = client
|
||||
sc.Endpoint = url
|
||||
sc.Type = clientType
|
||||
return sc, nil
|
||||
}
|
||||
|
||||
// NewObjectStorageV1 creates a ServiceClient that may be used with the v1 object storage package.
|
||||
func NewObjectStorageV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) {
|
||||
return initClientOpts(client, eo, "object-store")
|
||||
}
|
||||
|
||||
// NewComputeV2 creates a ServiceClient that may be used with the v2 compute package.
|
||||
func NewComputeV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) {
|
||||
return initClientOpts(client, eo, "compute")
|
||||
}
|
||||
|
||||
// NewNetworkV2 creates a ServiceClient that may be used with the v2 network package.
|
||||
func NewNetworkV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) {
|
||||
sc, err := initClientOpts(client, eo, "network")
|
||||
sc.ResourceBase = sc.Endpoint + "v2.0/"
|
||||
return sc, err
|
||||
}
|
||||
|
||||
// NewBlockStorageV1 creates a ServiceClient that may be used to access the v1 block storage service.
|
||||
func NewBlockStorageV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) {
|
||||
return initClientOpts(client, eo, "volume")
|
||||
}
|
||||
|
||||
// NewBlockStorageV2 creates a ServiceClient that may be used to access the v2 block storage service.
|
||||
func NewBlockStorageV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) {
|
||||
return initClientOpts(client, eo, "volumev2")
|
||||
}
|
||||
|
||||
// NewSharedFileSystemV2 creates a ServiceClient that may be used to access the v2 shared file system service.
|
||||
func NewSharedFileSystemV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) {
|
||||
return initClientOpts(client, eo, "sharev2")
|
||||
}
|
||||
|
||||
// NewCDNV1 creates a ServiceClient that may be used to access the OpenStack v1
|
||||
// CDN service.
|
||||
func NewCDNV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) {
|
||||
return initClientOpts(client, eo, "cdn")
|
||||
}
|
||||
|
||||
// NewOrchestrationV1 creates a ServiceClient that may be used to access the v1 orchestration service.
|
||||
func NewOrchestrationV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) {
|
||||
return initClientOpts(client, eo, "orchestration")
|
||||
}
|
||||
|
||||
// NewDBV1 creates a ServiceClient that may be used to access the v1 DB service.
|
||||
func NewDBV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) {
|
||||
return initClientOpts(client, eo, "database")
|
||||
}
|
||||
|
||||
// NewDNSV2 creates a ServiceClient that may be used to access the v2 DNS service.
|
||||
func NewDNSV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) {
|
||||
sc, err := initClientOpts(client, eo, "dns")
|
||||
sc.ResourceBase = sc.Endpoint + "v2/"
|
||||
return sc, err
|
||||
}
|
||||
|
||||
// NewImageServiceV2 creates a ServiceClient that may be used to access the v2 image service.
|
||||
func NewImageServiceV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) {
|
||||
sc, err := initClientOpts(client, eo, "image")
|
||||
sc.ResourceBase = sc.Endpoint + "v2/"
|
||||
return sc, err
|
||||
}
|
99
vendor/github.com/gophercloud/gophercloud/openstack/endpoint_location.go
generated
vendored
Normal file
99
vendor/github.com/gophercloud/gophercloud/openstack/endpoint_location.go
generated
vendored
Normal file
@ -0,0 +1,99 @@
|
||||
package openstack
|
||||
|
||||
import (
|
||||
"github.com/gophercloud/gophercloud"
|
||||
tokens2 "github.com/gophercloud/gophercloud/openstack/identity/v2/tokens"
|
||||
tokens3 "github.com/gophercloud/gophercloud/openstack/identity/v3/tokens"
|
||||
)
|
||||
|
||||
// V2EndpointURL discovers the endpoint URL for a specific service from a ServiceCatalog acquired
|
||||
// during the v2 identity service. The specified EndpointOpts are used to identify a unique,
|
||||
// unambiguous endpoint to return. It's an error both when multiple endpoints match the provided
|
||||
// criteria and when none do. The minimum that can be specified is a Type, but you will also often
|
||||
// need to specify a Name and/or a Region depending on what's available on your OpenStack
|
||||
// deployment.
|
||||
func V2EndpointURL(catalog *tokens2.ServiceCatalog, opts gophercloud.EndpointOpts) (string, error) {
|
||||
// Extract Endpoints from the catalog entries that match the requested Type, Name if provided, and Region if provided.
|
||||
var endpoints = make([]tokens2.Endpoint, 0, 1)
|
||||
for _, entry := range catalog.Entries {
|
||||
if (entry.Type == opts.Type) && (opts.Name == "" || entry.Name == opts.Name) {
|
||||
for _, endpoint := range entry.Endpoints {
|
||||
if opts.Region == "" || endpoint.Region == opts.Region {
|
||||
endpoints = append(endpoints, endpoint)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Report an error if the options were ambiguous.
|
||||
if len(endpoints) > 1 {
|
||||
err := &ErrMultipleMatchingEndpointsV2{}
|
||||
err.Endpoints = endpoints
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Extract the appropriate URL from the matching Endpoint.
|
||||
for _, endpoint := range endpoints {
|
||||
switch opts.Availability {
|
||||
case gophercloud.AvailabilityPublic:
|
||||
return gophercloud.NormalizeURL(endpoint.PublicURL), nil
|
||||
case gophercloud.AvailabilityInternal:
|
||||
return gophercloud.NormalizeURL(endpoint.InternalURL), nil
|
||||
case gophercloud.AvailabilityAdmin:
|
||||
return gophercloud.NormalizeURL(endpoint.AdminURL), nil
|
||||
default:
|
||||
err := &ErrInvalidAvailabilityProvided{}
|
||||
err.Argument = "Availability"
|
||||
err.Value = opts.Availability
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
// Report an error if there were no matching endpoints.
|
||||
err := &gophercloud.ErrEndpointNotFound{}
|
||||
return "", err
|
||||
}
|
||||
|
||||
// V3EndpointURL discovers the endpoint URL for a specific service from a Catalog acquired
|
||||
// during the v3 identity service. The specified EndpointOpts are used to identify a unique,
|
||||
// unambiguous endpoint to return. It's an error both when multiple endpoints match the provided
|
||||
// criteria and when none do. The minimum that can be specified is a Type, but you will also often
|
||||
// need to specify a Name and/or a Region depending on what's available on your OpenStack
|
||||
// deployment.
|
||||
func V3EndpointURL(catalog *tokens3.ServiceCatalog, opts gophercloud.EndpointOpts) (string, error) {
|
||||
// Extract Endpoints from the catalog entries that match the requested Type, Interface,
|
||||
// Name if provided, and Region if provided.
|
||||
var endpoints = make([]tokens3.Endpoint, 0, 1)
|
||||
for _, entry := range catalog.Entries {
|
||||
if (entry.Type == opts.Type) && (opts.Name == "" || entry.Name == opts.Name) {
|
||||
for _, endpoint := range entry.Endpoints {
|
||||
if opts.Availability != gophercloud.AvailabilityAdmin &&
|
||||
opts.Availability != gophercloud.AvailabilityPublic &&
|
||||
opts.Availability != gophercloud.AvailabilityInternal {
|
||||
err := &ErrInvalidAvailabilityProvided{}
|
||||
err.Argument = "Availability"
|
||||
err.Value = opts.Availability
|
||||
return "", err
|
||||
}
|
||||
if (opts.Availability == gophercloud.Availability(endpoint.Interface)) &&
|
||||
(opts.Region == "" || endpoint.Region == opts.Region) {
|
||||
endpoints = append(endpoints, endpoint)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Report an error if the options were ambiguous.
|
||||
if len(endpoints) > 1 {
|
||||
return "", ErrMultipleMatchingEndpointsV3{Endpoints: endpoints}
|
||||
}
|
||||
|
||||
// Extract the URL from the matching Endpoint.
|
||||
for _, endpoint := range endpoints {
|
||||
return gophercloud.NormalizeURL(endpoint.URL), nil
|
||||
}
|
||||
|
||||
// Report an error if there were no matching endpoints.
|
||||
err := &gophercloud.ErrEndpointNotFound{}
|
||||
return "", err
|
||||
}
|
71
vendor/github.com/gophercloud/gophercloud/openstack/errors.go
generated
vendored
Normal file
71
vendor/github.com/gophercloud/gophercloud/openstack/errors.go
generated
vendored
Normal file
@ -0,0 +1,71 @@
|
||||
package openstack
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/gophercloud/gophercloud"
|
||||
tokens2 "github.com/gophercloud/gophercloud/openstack/identity/v2/tokens"
|
||||
tokens3 "github.com/gophercloud/gophercloud/openstack/identity/v3/tokens"
|
||||
)
|
||||
|
||||
// ErrEndpointNotFound is the error when no suitable endpoint can be found
|
||||
// in the user's catalog
|
||||
type ErrEndpointNotFound struct{ gophercloud.BaseError }
|
||||
|
||||
func (e ErrEndpointNotFound) Error() string {
|
||||
return "No suitable endpoint could be found in the service catalog."
|
||||
}
|
||||
|
||||
// ErrInvalidAvailabilityProvided is the error when an invalid endpoint
|
||||
// availability is provided
|
||||
type ErrInvalidAvailabilityProvided struct{ gophercloud.ErrInvalidInput }
|
||||
|
||||
func (e ErrInvalidAvailabilityProvided) Error() string {
|
||||
return fmt.Sprintf("Unexpected availability in endpoint query: %s", e.Value)
|
||||
}
|
||||
|
||||
// ErrMultipleMatchingEndpointsV2 is the error when more than one endpoint
|
||||
// for the given options is found in the v2 catalog
|
||||
type ErrMultipleMatchingEndpointsV2 struct {
|
||||
gophercloud.BaseError
|
||||
Endpoints []tokens2.Endpoint
|
||||
}
|
||||
|
||||
func (e ErrMultipleMatchingEndpointsV2) Error() string {
|
||||
return fmt.Sprintf("Discovered %d matching endpoints: %#v", len(e.Endpoints), e.Endpoints)
|
||||
}
|
||||
|
||||
// ErrMultipleMatchingEndpointsV3 is the error when more than one endpoint
|
||||
// for the given options is found in the v3 catalog
|
||||
type ErrMultipleMatchingEndpointsV3 struct {
|
||||
gophercloud.BaseError
|
||||
Endpoints []tokens3.Endpoint
|
||||
}
|
||||
|
||||
func (e ErrMultipleMatchingEndpointsV3) Error() string {
|
||||
return fmt.Sprintf("Discovered %d matching endpoints: %#v", len(e.Endpoints), e.Endpoints)
|
||||
}
|
||||
|
||||
// ErrNoAuthURL is the error when the OS_AUTH_URL environment variable is not
|
||||
// found
|
||||
type ErrNoAuthURL struct{ gophercloud.ErrInvalidInput }
|
||||
|
||||
func (e ErrNoAuthURL) Error() string {
|
||||
return "Environment variable OS_AUTH_URL needs to be set."
|
||||
}
|
||||
|
||||
// ErrNoUsername is the error when the OS_USERNAME environment variable is not
|
||||
// found
|
||||
type ErrNoUsername struct{ gophercloud.ErrInvalidInput }
|
||||
|
||||
func (e ErrNoUsername) Error() string {
|
||||
return "Environment variable OS_USERNAME needs to be set."
|
||||
}
|
||||
|
||||
// ErrNoPassword is the error when the OS_PASSWORD environment variable is not
|
||||
// found
|
||||
type ErrNoPassword struct{ gophercloud.ErrInvalidInput }
|
||||
|
||||
func (e ErrNoPassword) Error() string {
|
||||
return "Environment variable OS_PASSWORD needs to be set."
|
||||
}
|
7
vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/doc.go
generated
vendored
Normal file
7
vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/doc.go
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
// Package tenants provides information and interaction with the
|
||||
// tenants API resource for the OpenStack Identity service.
|
||||
//
|
||||
// See http://developer.openstack.org/api-ref-identity-v2.html#identity-auth-v2
|
||||
// and http://developer.openstack.org/api-ref-identity-v2.html#admin-tenants
|
||||
// for more information.
|
||||
package tenants
|
107
vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/requests.go
generated
vendored
Normal file
107
vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/requests.go
generated
vendored
Normal file
@ -0,0 +1,107 @@
|
||||
package tenants
|
||||
|
||||
import (
|
||||
"github.com/gophercloud/gophercloud"
|
||||
"github.com/gophercloud/gophercloud/pagination"
|
||||
)
|
||||
|
||||
// ListOpts filters the Tenants that are returned by the List call.
|
||||
type ListOpts struct {
|
||||
// Marker is the ID of the last Tenant on the previous page.
|
||||
Marker string `q:"marker"`
|
||||
// Limit specifies the page size.
|
||||
Limit int `q:"limit"`
|
||||
}
|
||||
|
||||
// List enumerates the Tenants to which the current token has access.
|
||||
func List(client *gophercloud.ServiceClient, opts *ListOpts) pagination.Pager {
|
||||
url := listURL(client)
|
||||
if opts != nil {
|
||||
q, err := gophercloud.BuildQueryString(opts)
|
||||
if err != nil {
|
||||
return pagination.Pager{Err: err}
|
||||
}
|
||||
url += q.String()
|
||||
}
|
||||
return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page {
|
||||
return TenantPage{pagination.LinkedPageBase{PageResult: r}}
|
||||
})
|
||||
}
|
||||
|
||||
// CreateOpts represents the options needed when creating new tenant.
|
||||
type CreateOpts struct {
|
||||
// Name is the name of the tenant.
|
||||
Name string `json:"name,required"`
|
||||
// Description is the description of the tenant.
|
||||
Description string `json:"description,omitempty"`
|
||||
// Enabled sets the tenant status to enabled or disabled.
|
||||
Enabled *bool `json:"enabled,omitempty"`
|
||||
}
|
||||
|
||||
// CreateOptsBuilder describes struct types that can be accepted by the Create call.
|
||||
type CreateOptsBuilder interface {
|
||||
ToTenantCreateMap() (map[string]interface{}, error)
|
||||
}
|
||||
|
||||
// ToTenantCreateMap assembles a request body based on the contents of a CreateOpts.
|
||||
func (opts CreateOpts) ToTenantCreateMap() (map[string]interface{}, error) {
|
||||
return gophercloud.BuildRequestBody(opts, "tenant")
|
||||
}
|
||||
|
||||
// Create is the operation responsible for creating new tenant.
|
||||
func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) {
|
||||
b, err := opts.ToTenantCreateMap()
|
||||
if err != nil {
|
||||
r.Err = err
|
||||
return
|
||||
}
|
||||
_, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{
|
||||
OkCodes: []int{200, 201},
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// Get requests details on a single tenant, either by ID.
|
||||
func Get(client *gophercloud.ServiceClient, id string) (r GetResult) {
|
||||
_, r.Err = client.Get(getURL(client, id), &r.Body, nil)
|
||||
return
|
||||
}
|
||||
|
||||
// UpdateOptsBuilder allows extensions to add additional attributes to the Update request.
|
||||
type UpdateOptsBuilder interface {
|
||||
ToTenantUpdateMap() (map[string]interface{}, error)
|
||||
}
|
||||
|
||||
// UpdateOpts specifies the base attributes that may be updated on an existing server.
|
||||
type UpdateOpts struct {
|
||||
// Name is the name of the tenant.
|
||||
Name string `json:"name,omitempty"`
|
||||
// Description is the description of the tenant.
|
||||
Description string `json:"description,omitempty"`
|
||||
// Enabled sets the tenant status to enabled or disabled.
|
||||
Enabled *bool `json:"enabled,omitempty"`
|
||||
}
|
||||
|
||||
// ToTenantUpdateMap formats an UpdateOpts structure into a request body.
|
||||
func (opts UpdateOpts) ToTenantUpdateMap() (map[string]interface{}, error) {
|
||||
return gophercloud.BuildRequestBody(opts, "tenant")
|
||||
}
|
||||
|
||||
// Update is the operation responsible for updating exist tenants by their UUID.
|
||||
func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) {
|
||||
b, err := opts.ToTenantUpdateMap()
|
||||
if err != nil {
|
||||
r.Err = err
|
||||
return
|
||||
}
|
||||
_, r.Err = client.Put(updateURL(client, id), &b, &r.Body, &gophercloud.RequestOpts{
|
||||
OkCodes: []int{200},
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// Delete is the operation responsible for permanently deleting an API tenant.
|
||||
func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) {
|
||||
_, r.Err = client.Delete(deleteURL(client, id), nil)
|
||||
return
|
||||
}
|
86
vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/results.go
generated
vendored
Normal file
86
vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/results.go
generated
vendored
Normal file
@ -0,0 +1,86 @@
|
||||
package tenants
|
||||
|
||||
import (
|
||||
"github.com/gophercloud/gophercloud"
|
||||
"github.com/gophercloud/gophercloud/pagination"
|
||||
)
|
||||
|
||||
// Tenant is a grouping of users in the identity service.
|
||||
type Tenant struct {
|
||||
// ID is a unique identifier for this tenant.
|
||||
ID string `json:"id"`
|
||||
|
||||
// Name is a friendlier user-facing name for this tenant.
|
||||
Name string `json:"name"`
|
||||
|
||||
// Description is a human-readable explanation of this Tenant's purpose.
|
||||
Description string `json:"description"`
|
||||
|
||||
// Enabled indicates whether or not a tenant is active.
|
||||
Enabled bool `json:"enabled"`
|
||||
}
|
||||
|
||||
// TenantPage is a single page of Tenant results.
|
||||
type TenantPage struct {
|
||||
pagination.LinkedPageBase
|
||||
}
|
||||
|
||||
// IsEmpty determines whether or not a page of Tenants contains any results.
|
||||
func (r TenantPage) IsEmpty() (bool, error) {
|
||||
tenants, err := ExtractTenants(r)
|
||||
return len(tenants) == 0, err
|
||||
}
|
||||
|
||||
// NextPageURL extracts the "next" link from the tenants_links section of the result.
|
||||
func (r TenantPage) NextPageURL() (string, error) {
|
||||
var s struct {
|
||||
Links []gophercloud.Link `json:"tenants_links"`
|
||||
}
|
||||
err := r.ExtractInto(&s)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return gophercloud.ExtractNextURL(s.Links)
|
||||
}
|
||||
|
||||
// ExtractTenants returns a slice of Tenants contained in a single page of results.
|
||||
func ExtractTenants(r pagination.Page) ([]Tenant, error) {
|
||||
var s struct {
|
||||
Tenants []Tenant `json:"tenants"`
|
||||
}
|
||||
err := (r.(TenantPage)).ExtractInto(&s)
|
||||
return s.Tenants, err
|
||||
}
|
||||
|
||||
type tenantResult struct {
|
||||
gophercloud.Result
|
||||
}
|
||||
|
||||
// Extract interprets any tenantResults as a tenant.
|
||||
func (r tenantResult) Extract() (*Tenant, error) {
|
||||
var s struct {
|
||||
Tenant *Tenant `json:"tenant"`
|
||||
}
|
||||
err := r.ExtractInto(&s)
|
||||
return s.Tenant, err
|
||||
}
|
||||
|
||||
// GetResult temporarily contains the response from the Get call.
|
||||
type GetResult struct {
|
||||
tenantResult
|
||||
}
|
||||
|
||||
// CreateResult temporarily contains the reponse from the Create call.
|
||||
type CreateResult struct {
|
||||
tenantResult
|
||||
}
|
||||
|
||||
// DeleteResult temporarily contains the response from the Delete call.
|
||||
type DeleteResult struct {
|
||||
tenantResult
|
||||
}
|
||||
|
||||
// UpdateResult temporarily contains the response from the Update call.
|
||||
type UpdateResult struct {
|
||||
tenantResult
|
||||
}
|
23
vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/urls.go
generated
vendored
Normal file
23
vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/urls.go
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
package tenants
|
||||
|
||||
import "github.com/gophercloud/gophercloud"
|
||||
|
||||
func listURL(client *gophercloud.ServiceClient) string {
|
||||
return client.ServiceURL("v2.0/tenants")
|
||||
}
|
||||
|
||||
func getURL(client *gophercloud.ServiceClient, tenantID string) string {
|
||||
return client.ServiceURL("v2.0/tenants", tenantID)
|
||||
}
|
||||
|
||||
func createURL(client *gophercloud.ServiceClient) string {
|
||||
return client.ServiceURL("v2.0/tenants")
|
||||
}
|
||||
|
||||
func deleteURL(client *gophercloud.ServiceClient, tenantID string) string {
|
||||
return client.ServiceURL("v2.0/tenants", tenantID)
|
||||
}
|
||||
|
||||
func updateURL(client *gophercloud.ServiceClient, tenantID string) string {
|
||||
return client.ServiceURL("v2.0/tenants", tenantID)
|
||||
}
|
5
vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/doc.go
generated
vendored
Normal file
5
vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/doc.go
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
// Package tokens provides information and interaction with the token API
|
||||
// resource for the OpenStack Identity service.
|
||||
// For more information, see:
|
||||
// http://developer.openstack.org/api-ref-identity-v2.html#identity-auth-v2
|
||||
package tokens
|
99
vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/requests.go
generated
vendored
Normal file
99
vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/requests.go
generated
vendored
Normal file
@ -0,0 +1,99 @@
|
||||
package tokens
|
||||
|
||||
import "github.com/gophercloud/gophercloud"
|
||||
|
||||
type PasswordCredentialsV2 struct {
|
||||
Username string `json:"username" required:"true"`
|
||||
Password string `json:"password" required:"true"`
|
||||
}
|
||||
|
||||
type TokenCredentialsV2 struct {
|
||||
ID string `json:"id,omitempty" required:"true"`
|
||||
}
|
||||
|
||||
// AuthOptionsV2 wraps a gophercloud AuthOptions in order to adhere to the AuthOptionsBuilder
|
||||
// interface.
|
||||
type AuthOptionsV2 struct {
|
||||
PasswordCredentials *PasswordCredentialsV2 `json:"passwordCredentials,omitempty" xor:"TokenCredentials"`
|
||||
|
||||
// The TenantID and TenantName fields are optional for the Identity V2 API.
|
||||
// Some providers allow you to specify a TenantName instead of the TenantId.
|
||||
// Some require both. Your provider's authentication policies will determine
|
||||
// how these fields influence authentication.
|
||||
TenantID string `json:"tenantId,omitempty"`
|
||||
TenantName string `json:"tenantName,omitempty"`
|
||||
|
||||
// TokenCredentials allows users to authenticate (possibly as another user) with an
|
||||
// authentication token ID.
|
||||
TokenCredentials *TokenCredentialsV2 `json:"token,omitempty" xor:"PasswordCredentials"`
|
||||
}
|
||||
|
||||
// AuthOptionsBuilder describes any argument that may be passed to the Create call.
|
||||
type AuthOptionsBuilder interface {
|
||||
// ToTokenCreateMap assembles the Create request body, returning an error if parameters are
|
||||
// missing or inconsistent.
|
||||
ToTokenV2CreateMap() (map[string]interface{}, error)
|
||||
}
|
||||
|
||||
// AuthOptions are the valid options for Openstack Identity v2 authentication.
|
||||
// For field descriptions, see gophercloud.AuthOptions.
|
||||
type AuthOptions struct {
|
||||
IdentityEndpoint string `json:"-"`
|
||||
Username string `json:"username,omitempty"`
|
||||
Password string `json:"password,omitempty"`
|
||||
TenantID string `json:"tenantId,omitempty"`
|
||||
TenantName string `json:"tenantName,omitempty"`
|
||||
AllowReauth bool `json:"-"`
|
||||
TokenID string
|
||||
}
|
||||
|
||||
// ToTokenV2CreateMap allows AuthOptions to satisfy the AuthOptionsBuilder
|
||||
// interface in the v2 tokens package
|
||||
func (opts AuthOptions) ToTokenV2CreateMap() (map[string]interface{}, error) {
|
||||
v2Opts := AuthOptionsV2{
|
||||
TenantID: opts.TenantID,
|
||||
TenantName: opts.TenantName,
|
||||
}
|
||||
|
||||
if opts.Password != "" {
|
||||
v2Opts.PasswordCredentials = &PasswordCredentialsV2{
|
||||
Username: opts.Username,
|
||||
Password: opts.Password,
|
||||
}
|
||||
} else {
|
||||
v2Opts.TokenCredentials = &TokenCredentialsV2{
|
||||
ID: opts.TokenID,
|
||||
}
|
||||
}
|
||||
|
||||
b, err := gophercloud.BuildRequestBody(v2Opts, "auth")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// Create authenticates to the identity service and attempts to acquire a Token.
|
||||
// If successful, the CreateResult
|
||||
// Generally, rather than interact with this call directly, end users should call openstack.AuthenticatedClient(),
|
||||
// which abstracts all of the gory details about navigating service catalogs and such.
|
||||
func Create(client *gophercloud.ServiceClient, auth AuthOptionsBuilder) (r CreateResult) {
|
||||
b, err := auth.ToTokenV2CreateMap()
|
||||
if err != nil {
|
||||
r.Err = err
|
||||
return
|
||||
}
|
||||
_, r.Err = client.Post(CreateURL(client), b, &r.Body, &gophercloud.RequestOpts{
|
||||
OkCodes: []int{200, 203},
|
||||
MoreHeaders: map[string]string{"X-Auth-Token": ""},
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// Get validates and retrieves information for user's token.
|
||||
func Get(client *gophercloud.ServiceClient, token string) (r GetResult) {
|
||||
_, r.Err = client.Get(GetURL(client, token), &r.Body, &gophercloud.RequestOpts{
|
||||
OkCodes: []int{200, 203},
|
||||
})
|
||||
return
|
||||
}
|
144
vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/results.go
generated
vendored
Normal file
144
vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/results.go
generated
vendored
Normal file
@ -0,0 +1,144 @@
|
||||
package tokens
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/gophercloud/gophercloud"
|
||||
"github.com/gophercloud/gophercloud/openstack/identity/v2/tenants"
|
||||
)
|
||||
|
||||
// Token provides only the most basic information related to an authentication token.
|
||||
type Token struct {
|
||||
// ID provides the primary means of identifying a user to the OpenStack API.
|
||||
// OpenStack defines this field as an opaque value, so do not depend on its content.
|
||||
// It is safe, however, to compare for equality.
|
||||
ID string
|
||||
|
||||
// ExpiresAt provides a timestamp in ISO 8601 format, indicating when the authentication token becomes invalid.
|
||||
// After this point in time, future API requests made using this authentication token will respond with errors.
|
||||
// Either the caller will need to reauthenticate manually, or more preferably, the caller should exploit automatic re-authentication.
|
||||
// See the AuthOptions structure for more details.
|
||||
ExpiresAt time.Time
|
||||
|
||||
// Tenant provides information about the tenant to which this token grants access.
|
||||
Tenant tenants.Tenant
|
||||
}
|
||||
|
||||
// Role is a role for a user.
|
||||
type Role struct {
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
// User is an OpenStack user.
|
||||
type User struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
UserName string `json:"username"`
|
||||
Roles []Role `json:"roles"`
|
||||
}
|
||||
|
||||
// Endpoint represents a single API endpoint offered by a service.
|
||||
// It provides the public and internal URLs, if supported, along with a region specifier, again if provided.
|
||||
// The significance of the Region field will depend upon your provider.
|
||||
//
|
||||
// In addition, the interface offered by the service will have version information associated with it
|
||||
// through the VersionId, VersionInfo, and VersionList fields, if provided or supported.
|
||||
//
|
||||
// In all cases, fields which aren't supported by the provider and service combined will assume a zero-value ("").
|
||||
type Endpoint struct {
|
||||
TenantID string `json:"tenantId"`
|
||||
PublicURL string `json:"publicURL"`
|
||||
InternalURL string `json:"internalURL"`
|
||||
AdminURL string `json:"adminURL"`
|
||||
Region string `json:"region"`
|
||||
VersionID string `json:"versionId"`
|
||||
VersionInfo string `json:"versionInfo"`
|
||||
VersionList string `json:"versionList"`
|
||||
}
|
||||
|
||||
// CatalogEntry provides a type-safe interface to an Identity API V2 service catalog listing.
|
||||
// Each class of service, such as cloud DNS or block storage services, will have a single
|
||||
// CatalogEntry representing it.
|
||||
//
|
||||
// Note: when looking for the desired service, try, whenever possible, to key off the type field.
|
||||
// Otherwise, you'll tie the representation of the service to a specific provider.
|
||||
type CatalogEntry struct {
|
||||
// Name will contain the provider-specified name for the service.
|
||||
Name string `json:"name"`
|
||||
|
||||
// Type will contain a type string if OpenStack defines a type for the service.
|
||||
// Otherwise, for provider-specific services, the provider may assign their own type strings.
|
||||
Type string `json:"type"`
|
||||
|
||||
// Endpoints will let the caller iterate over all the different endpoints that may exist for
|
||||
// the service.
|
||||
Endpoints []Endpoint `json:"endpoints"`
|
||||
}
|
||||
|
||||
// ServiceCatalog provides a view into the service catalog from a previous, successful authentication.
|
||||
type ServiceCatalog struct {
|
||||
Entries []CatalogEntry
|
||||
}
|
||||
|
||||
// CreateResult defers the interpretation of a created token.
|
||||
// Use ExtractToken() to interpret it as a Token, or ExtractServiceCatalog() to interpret it as a service catalog.
|
||||
type CreateResult struct {
|
||||
gophercloud.Result
|
||||
}
|
||||
|
||||
// GetResult is the deferred response from a Get call, which is the same with a Created token.
|
||||
// Use ExtractUser() to interpret it as a User.
|
||||
type GetResult struct {
|
||||
CreateResult
|
||||
}
|
||||
|
||||
// ExtractToken returns the just-created Token from a CreateResult.
|
||||
func (r CreateResult) ExtractToken() (*Token, error) {
|
||||
var s struct {
|
||||
Access struct {
|
||||
Token struct {
|
||||
Expires string `json:"expires"`
|
||||
ID string `json:"id"`
|
||||
Tenant tenants.Tenant `json:"tenant"`
|
||||
} `json:"token"`
|
||||
} `json:"access"`
|
||||
}
|
||||
|
||||
err := r.ExtractInto(&s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
expiresTs, err := time.Parse(gophercloud.RFC3339Milli, s.Access.Token.Expires)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Token{
|
||||
ID: s.Access.Token.ID,
|
||||
ExpiresAt: expiresTs,
|
||||
Tenant: s.Access.Token.Tenant,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ExtractServiceCatalog returns the ServiceCatalog that was generated along with the user's Token.
|
||||
func (r CreateResult) ExtractServiceCatalog() (*ServiceCatalog, error) {
|
||||
var s struct {
|
||||
Access struct {
|
||||
Entries []CatalogEntry `json:"serviceCatalog"`
|
||||
} `json:"access"`
|
||||
}
|
||||
err := r.ExtractInto(&s)
|
||||
return &ServiceCatalog{Entries: s.Access.Entries}, err
|
||||
}
|
||||
|
||||
// ExtractUser returns the User from a GetResult.
|
||||
func (r GetResult) ExtractUser() (*User, error) {
|
||||
var s struct {
|
||||
Access struct {
|
||||
User User `json:"user"`
|
||||
} `json:"access"`
|
||||
}
|
||||
err := r.ExtractInto(&s)
|
||||
return &s.Access.User, err
|
||||
}
|
13
vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/urls.go
generated
vendored
Normal file
13
vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/urls.go
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
package tokens
|
||||
|
||||
import "github.com/gophercloud/gophercloud"
|
||||
|
||||
// CreateURL generates the URL used to create new Tokens.
|
||||
func CreateURL(client *gophercloud.ServiceClient) string {
|
||||
return client.ServiceURL("tokens")
|
||||
}
|
||||
|
||||
// GetURL generates the URL used to Validate Tokens.
|
||||
func GetURL(client *gophercloud.ServiceClient, token string) string {
|
||||
return client.ServiceURL("tokens", token)
|
||||
}
|
1
vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/users/doc.go
generated
vendored
Normal file
1
vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/users/doc.go
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
package users
|
108
vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/users/requests.go
generated
vendored
Normal file
108
vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/users/requests.go
generated
vendored
Normal file
@ -0,0 +1,108 @@
|
||||
package users
|
||||
|
||||
import (
|
||||
"github.com/gophercloud/gophercloud"
|
||||
"github.com/gophercloud/gophercloud/pagination"
|
||||
)
|
||||
|
||||
// List lists the existing users.
|
||||
func List(client *gophercloud.ServiceClient) pagination.Pager {
|
||||
return pagination.NewPager(client, rootURL(client), func(r pagination.PageResult) pagination.Page {
|
||||
return UserPage{pagination.SinglePageBase(r)}
|
||||
})
|
||||
}
|
||||
|
||||
// CommonOpts are the parameters that are shared between CreateOpts and
|
||||
// UpdateOpts
|
||||
type CommonOpts struct {
|
||||
// Either a name or username is required. When provided, the value must be
|
||||
// unique or a 409 conflict error will be returned. If you provide a name but
|
||||
// omit a username, the latter will be set to the former; and vice versa.
|
||||
Name string `json:"name,omitempty"`
|
||||
Username string `json:"username,omitempty"`
|
||||
// The ID of the tenant to which you want to assign this user.
|
||||
TenantID string `json:"tenantId,omitempty"`
|
||||
// Indicates whether this user is enabled or not.
|
||||
Enabled *bool `json:"enabled,omitempty"`
|
||||
// The email address of this user.
|
||||
Email string `json:"email,omitempty"`
|
||||
// Password is the password of the new user.
|
||||
Password string `json:"password,omitempty"`
|
||||
}
|
||||
|
||||
// CreateOpts represents the options needed when creating new users.
|
||||
type CreateOpts CommonOpts
|
||||
|
||||
// CreateOptsBuilder describes struct types that can be accepted by the Create call.
|
||||
type CreateOptsBuilder interface {
|
||||
ToUserCreateMap() (map[string]interface{}, error)
|
||||
}
|
||||
|
||||
// ToUserCreateMap assembles a request body based on the contents of a CreateOpts.
|
||||
func (opts CreateOpts) ToUserCreateMap() (map[string]interface{}, error) {
|
||||
if opts.Name == "" && opts.Username == "" {
|
||||
err := gophercloud.ErrMissingInput{}
|
||||
err.Argument = "users.CreateOpts.Name/users.CreateOpts.Username"
|
||||
err.Info = "Either a Name or Username must be provided"
|
||||
return nil, err
|
||||
}
|
||||
return gophercloud.BuildRequestBody(opts, "user")
|
||||
}
|
||||
|
||||
// Create is the operation responsible for creating new users.
|
||||
func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) {
|
||||
b, err := opts.ToUserCreateMap()
|
||||
if err != nil {
|
||||
r.Err = err
|
||||
return
|
||||
}
|
||||
_, r.Err = client.Post(rootURL(client), b, &r.Body, &gophercloud.RequestOpts{
|
||||
OkCodes: []int{200, 201},
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// Get requests details on a single user, either by ID.
|
||||
func Get(client *gophercloud.ServiceClient, id string) (r GetResult) {
|
||||
_, r.Err = client.Get(ResourceURL(client, id), &r.Body, nil)
|
||||
return
|
||||
}
|
||||
|
||||
// UpdateOptsBuilder allows extensions to add additional attributes to the Update request.
|
||||
type UpdateOptsBuilder interface {
|
||||
ToUserUpdateMap() (map[string]interface{}, error)
|
||||
}
|
||||
|
||||
// UpdateOpts specifies the base attributes that may be updated on an existing server.
|
||||
type UpdateOpts CommonOpts
|
||||
|
||||
// ToUserUpdateMap formats an UpdateOpts structure into a request body.
|
||||
func (opts UpdateOpts) ToUserUpdateMap() (map[string]interface{}, error) {
|
||||
return gophercloud.BuildRequestBody(opts, "user")
|
||||
}
|
||||
|
||||
// Update is the operation responsible for updating exist users by their UUID.
|
||||
func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) {
|
||||
b, err := opts.ToUserUpdateMap()
|
||||
if err != nil {
|
||||
r.Err = err
|
||||
return
|
||||
}
|
||||
_, r.Err = client.Put(ResourceURL(client, id), &b, &r.Body, &gophercloud.RequestOpts{
|
||||
OkCodes: []int{200},
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// Delete is the operation responsible for permanently deleting an API user.
|
||||
func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) {
|
||||
_, r.Err = client.Delete(ResourceURL(client, id), nil)
|
||||
return
|
||||
}
|
||||
|
||||
// ListRoles lists the existing roles that can be assigned to users.
|
||||
func ListRoles(client *gophercloud.ServiceClient, tenantID, userID string) pagination.Pager {
|
||||
return pagination.NewPager(client, listRolesURL(client, tenantID, userID), func(r pagination.PageResult) pagination.Page {
|
||||
return RolePage{pagination.SinglePageBase(r)}
|
||||
})
|
||||
}
|
110
vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/users/results.go
generated
vendored
Normal file
110
vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/users/results.go
generated
vendored
Normal file
@ -0,0 +1,110 @@
|
||||
package users
|
||||
|
||||
import (
|
||||
"github.com/gophercloud/gophercloud"
|
||||
"github.com/gophercloud/gophercloud/pagination"
|
||||
)
|
||||
|
||||
// User represents a user resource that exists on the API.
|
||||
type User struct {
|
||||
// The UUID for this user.
|
||||
ID string
|
||||
|
||||
// The human name for this user.
|
||||
Name string
|
||||
|
||||
// The username for this user.
|
||||
Username string
|
||||
|
||||
// Indicates whether the user is enabled (true) or disabled (false).
|
||||
Enabled bool
|
||||
|
||||
// The email address for this user.
|
||||
Email string
|
||||
|
||||
// The ID of the tenant to which this user belongs.
|
||||
TenantID string `json:"tenant_id"`
|
||||
}
|
||||
|
||||
// Role assigns specific responsibilities to users, allowing them to accomplish
|
||||
// certain API operations whilst scoped to a service.
|
||||
type Role struct {
|
||||
// UUID of the role
|
||||
ID string
|
||||
|
||||
// Name of the role
|
||||
Name string
|
||||
}
|
||||
|
||||
// UserPage is a single page of a User collection.
|
||||
type UserPage struct {
|
||||
pagination.SinglePageBase
|
||||
}
|
||||
|
||||
// RolePage is a single page of a user Role collection.
|
||||
type RolePage struct {
|
||||
pagination.SinglePageBase
|
||||
}
|
||||
|
||||
// IsEmpty determines whether or not a page of Tenants contains any results.
|
||||
func (r UserPage) IsEmpty() (bool, error) {
|
||||
users, err := ExtractUsers(r)
|
||||
return len(users) == 0, err
|
||||
}
|
||||
|
||||
// ExtractUsers returns a slice of Tenants contained in a single page of results.
|
||||
func ExtractUsers(r pagination.Page) ([]User, error) {
|
||||
var s struct {
|
||||
Users []User `json:"users"`
|
||||
}
|
||||
err := (r.(UserPage)).ExtractInto(&s)
|
||||
return s.Users, err
|
||||
}
|
||||
|
||||
// IsEmpty determines whether or not a page of Tenants contains any results.
|
||||
func (r RolePage) IsEmpty() (bool, error) {
|
||||
users, err := ExtractRoles(r)
|
||||
return len(users) == 0, err
|
||||
}
|
||||
|
||||
// ExtractRoles returns a slice of Roles contained in a single page of results.
|
||||
func ExtractRoles(r pagination.Page) ([]Role, error) {
|
||||
var s struct {
|
||||
Roles []Role `json:"roles"`
|
||||
}
|
||||
err := (r.(RolePage)).ExtractInto(&s)
|
||||
return s.Roles, err
|
||||
}
|
||||
|
||||
type commonResult struct {
|
||||
gophercloud.Result
|
||||
}
|
||||
|
||||
// Extract interprets any commonResult as a User, if possible.
|
||||
func (r commonResult) Extract() (*User, error) {
|
||||
var s struct {
|
||||
User *User `json:"user"`
|
||||
}
|
||||
err := r.ExtractInto(&s)
|
||||
return s.User, err
|
||||
}
|
||||
|
||||
// CreateResult represents the result of a Create operation
|
||||
type CreateResult struct {
|
||||
commonResult
|
||||
}
|
||||
|
||||
// GetResult represents the result of a Get operation
|
||||
type GetResult struct {
|
||||
commonResult
|
||||
}
|
||||
|
||||
// UpdateResult represents the result of an Update operation
|
||||
type UpdateResult struct {
|
||||
commonResult
|
||||
}
|
||||
|
||||
// DeleteResult represents the result of a Delete operation
|
||||
type DeleteResult struct {
|
||||
commonResult
|
||||
}
|
21
vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/users/urls.go
generated
vendored
Normal file
21
vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/users/urls.go
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
package users
|
||||
|
||||
import "github.com/gophercloud/gophercloud"
|
||||
|
||||
const (
|
||||
tenantPath = "tenants"
|
||||
userPath = "v2.0/users"
|
||||
rolePath = "roles"
|
||||
)
|
||||
|
||||
func ResourceURL(c *gophercloud.ServiceClient, id string) string {
|
||||
return c.ServiceURL(userPath, id)
|
||||
}
|
||||
|
||||
func rootURL(c *gophercloud.ServiceClient) string {
|
||||
return c.ServiceURL(userPath)
|
||||
}
|
||||
|
||||
func listRolesURL(c *gophercloud.ServiceClient, tenantID, userID string) string {
|
||||
return c.ServiceURL(tenantPath, tenantID, userPath, userID, rolePath)
|
||||
}
|
6
vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/doc.go
generated
vendored
Normal file
6
vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/doc.go
generated
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
// Package tokens provides information and interaction with the token API
|
||||
// resource for the OpenStack Identity service.
|
||||
//
|
||||
// For more information, see:
|
||||
// http://developer.openstack.org/api-ref-identity-v3.html#tokens-v3
|
||||
package tokens
|
200
vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/requests.go
generated
vendored
Normal file
200
vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/requests.go
generated
vendored
Normal file
@ -0,0 +1,200 @@
|
||||
package tokens
|
||||
|
||||
import "github.com/gophercloud/gophercloud"
|
||||
|
||||
// Scope allows a created token to be limited to a specific domain or project.
|
||||
type Scope struct {
|
||||
ProjectID string
|
||||
ProjectName string
|
||||
DomainID string
|
||||
DomainName string
|
||||
}
|
||||
|
||||
// AuthOptionsBuilder describes any argument that may be passed to the Create call.
|
||||
type AuthOptionsBuilder interface {
|
||||
// ToTokenV3CreateMap assembles the Create request body, returning an error if parameters are
|
||||
// missing or inconsistent.
|
||||
ToTokenV3CreateMap(map[string]interface{}) (map[string]interface{}, error)
|
||||
ToTokenV3ScopeMap() (map[string]interface{}, error)
|
||||
CanReauth() bool
|
||||
}
|
||||
|
||||
type AuthOptions struct {
|
||||
// IdentityEndpoint specifies the HTTP endpoint that is required to work with
|
||||
// the Identity API of the appropriate version. While it's ultimately needed by
|
||||
// all of the identity services, it will often be populated by a provider-level
|
||||
// function.
|
||||
IdentityEndpoint string `json:"-"`
|
||||
|
||||
// Username is required if using Identity V2 API. Consult with your provider's
|
||||
// control panel to discover your account's username. In Identity V3, either
|
||||
// UserID or a combination of Username and DomainID or DomainName are needed.
|
||||
Username string `json:"username,omitempty"`
|
||||
UserID string `json:"id,omitempty"`
|
||||
|
||||
Password string `json:"password,omitempty"`
|
||||
|
||||
// At most one of DomainID and DomainName must be provided if using Username
|
||||
// with Identity V3. Otherwise, either are optional.
|
||||
DomainID string `json:"-"`
|
||||
DomainName string `json:"name,omitempty"`
|
||||
|
||||
// AllowReauth should be set to true if you grant permission for Gophercloud to
|
||||
// cache your credentials in memory, and to allow Gophercloud to attempt to
|
||||
// re-authenticate automatically if/when your token expires. If you set it to
|
||||
// false, it will not cache these settings, but re-authentication will not be
|
||||
// possible. This setting defaults to false.
|
||||
AllowReauth bool `json:"-"`
|
||||
|
||||
// TokenID allows users to authenticate (possibly as another user) with an
|
||||
// authentication token ID.
|
||||
TokenID string `json:"-"`
|
||||
|
||||
Scope Scope `json:"-"`
|
||||
}
|
||||
|
||||
func (opts *AuthOptions) ToTokenV3CreateMap(scope map[string]interface{}) (map[string]interface{}, error) {
|
||||
gophercloudAuthOpts := gophercloud.AuthOptions{
|
||||
Username: opts.Username,
|
||||
UserID: opts.UserID,
|
||||
Password: opts.Password,
|
||||
DomainID: opts.DomainID,
|
||||
DomainName: opts.DomainName,
|
||||
AllowReauth: opts.AllowReauth,
|
||||
TokenID: opts.TokenID,
|
||||
}
|
||||
|
||||
return gophercloudAuthOpts.ToTokenV3CreateMap(scope)
|
||||
}
|
||||
|
||||
func (opts *AuthOptions) ToTokenV3ScopeMap() (map[string]interface{}, error) {
|
||||
if opts.Scope.ProjectName != "" {
|
||||
// ProjectName provided: either DomainID or DomainName must also be supplied.
|
||||
// ProjectID may not be supplied.
|
||||
if opts.Scope.DomainID == "" && opts.Scope.DomainName == "" {
|
||||
return nil, gophercloud.ErrScopeDomainIDOrDomainName{}
|
||||
}
|
||||
if opts.Scope.ProjectID != "" {
|
||||
return nil, gophercloud.ErrScopeProjectIDOrProjectName{}
|
||||
}
|
||||
|
||||
if opts.Scope.DomainID != "" {
|
||||
// ProjectName + DomainID
|
||||
return map[string]interface{}{
|
||||
"project": map[string]interface{}{
|
||||
"name": &opts.Scope.ProjectName,
|
||||
"domain": map[string]interface{}{"id": &opts.Scope.DomainID},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
if opts.Scope.DomainName != "" {
|
||||
// ProjectName + DomainName
|
||||
return map[string]interface{}{
|
||||
"project": map[string]interface{}{
|
||||
"name": &opts.Scope.ProjectName,
|
||||
"domain": map[string]interface{}{"name": &opts.Scope.DomainName},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
} else if opts.Scope.ProjectID != "" {
|
||||
// ProjectID provided. ProjectName, DomainID, and DomainName may not be provided.
|
||||
if opts.Scope.DomainID != "" {
|
||||
return nil, gophercloud.ErrScopeProjectIDAlone{}
|
||||
}
|
||||
if opts.Scope.DomainName != "" {
|
||||
return nil, gophercloud.ErrScopeProjectIDAlone{}
|
||||
}
|
||||
|
||||
// ProjectID
|
||||
return map[string]interface{}{
|
||||
"project": map[string]interface{}{
|
||||
"id": &opts.Scope.ProjectID,
|
||||
},
|
||||
}, nil
|
||||
} else if opts.Scope.DomainID != "" {
|
||||
// DomainID provided. ProjectID, ProjectName, and DomainName may not be provided.
|
||||
if opts.Scope.DomainName != "" {
|
||||
return nil, gophercloud.ErrScopeDomainIDOrDomainName{}
|
||||
}
|
||||
|
||||
// DomainID
|
||||
return map[string]interface{}{
|
||||
"domain": map[string]interface{}{
|
||||
"id": &opts.Scope.DomainID,
|
||||
},
|
||||
}, nil
|
||||
} else if opts.Scope.DomainName != "" {
|
||||
return nil, gophercloud.ErrScopeDomainName{}
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (opts *AuthOptions) CanReauth() bool {
|
||||
return opts.AllowReauth
|
||||
}
|
||||
|
||||
func subjectTokenHeaders(c *gophercloud.ServiceClient, subjectToken string) map[string]string {
|
||||
return map[string]string{
|
||||
"X-Subject-Token": subjectToken,
|
||||
}
|
||||
}
|
||||
|
||||
// Create authenticates and either generates a new token, or changes the Scope of an existing token.
|
||||
func Create(c *gophercloud.ServiceClient, opts AuthOptionsBuilder) (r CreateResult) {
|
||||
scope, err := opts.ToTokenV3ScopeMap()
|
||||
if err != nil {
|
||||
r.Err = err
|
||||
return
|
||||
}
|
||||
|
||||
b, err := opts.ToTokenV3CreateMap(scope)
|
||||
if err != nil {
|
||||
r.Err = err
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := c.Post(tokenURL(c), b, &r.Body, &gophercloud.RequestOpts{
|
||||
MoreHeaders: map[string]string{"X-Auth-Token": ""},
|
||||
})
|
||||
r.Err = err
|
||||
if resp != nil {
|
||||
r.Header = resp.Header
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Get validates and retrieves information about another token.
|
||||
func Get(c *gophercloud.ServiceClient, token string) (r GetResult) {
|
||||
resp, err := c.Get(tokenURL(c), &r.Body, &gophercloud.RequestOpts{
|
||||
MoreHeaders: subjectTokenHeaders(c, token),
|
||||
OkCodes: []int{200, 203},
|
||||
})
|
||||
if resp != nil {
|
||||
r.Err = err
|
||||
r.Header = resp.Header
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Validate determines if a specified token is valid or not.
|
||||
func Validate(c *gophercloud.ServiceClient, token string) (bool, error) {
|
||||
resp, err := c.Request("HEAD", tokenURL(c), &gophercloud.RequestOpts{
|
||||
MoreHeaders: subjectTokenHeaders(c, token),
|
||||
OkCodes: []int{200, 204, 404},
|
||||
})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return resp.StatusCode == 200 || resp.StatusCode == 204, nil
|
||||
}
|
||||
|
||||
// Revoke immediately makes specified token invalid.
|
||||
func Revoke(c *gophercloud.ServiceClient, token string) (r RevokeResult) {
|
||||
_, r.Err = c.Delete(tokenURL(c), &gophercloud.RequestOpts{
|
||||
MoreHeaders: subjectTokenHeaders(c, token),
|
||||
})
|
||||
return
|
||||
}
|
103
vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/results.go
generated
vendored
Normal file
103
vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/results.go
generated
vendored
Normal file
@ -0,0 +1,103 @@
|
||||
package tokens
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/gophercloud/gophercloud"
|
||||
)
|
||||
|
||||
// Endpoint represents a single API endpoint offered by a service.
|
||||
// It matches either a public, internal or admin URL.
|
||||
// If supported, it contains a region specifier, again if provided.
|
||||
// The significance of the Region field will depend upon your provider.
|
||||
type Endpoint struct {
|
||||
ID string `json:"id"`
|
||||
Region string `json:"region"`
|
||||
Interface string `json:"interface"`
|
||||
URL string `json:"url"`
|
||||
}
|
||||
|
||||
// CatalogEntry provides a type-safe interface to an Identity API V3 service catalog listing.
|
||||
// Each class of service, such as cloud DNS or block storage services, could have multiple
|
||||
// CatalogEntry representing it (one by interface type, e.g public, admin or internal).
|
||||
//
|
||||
// Note: when looking for the desired service, try, whenever possible, to key off the type field.
|
||||
// Otherwise, you'll tie the representation of the service to a specific provider.
|
||||
type CatalogEntry struct {
|
||||
// Service ID
|
||||
ID string `json:"id"`
|
||||
// Name will contain the provider-specified name for the service.
|
||||
Name string `json:"name"`
|
||||
// Type will contain a type string if OpenStack defines a type for the service.
|
||||
// Otherwise, for provider-specific services, the provider may assign their own type strings.
|
||||
Type string `json:"type"`
|
||||
// Endpoints will let the caller iterate over all the different endpoints that may exist for
|
||||
// the service.
|
||||
Endpoints []Endpoint `json:"endpoints"`
|
||||
}
|
||||
|
||||
// ServiceCatalog provides a view into the service catalog from a previous, successful authentication.
|
||||
type ServiceCatalog struct {
|
||||
Entries []CatalogEntry `json:"catalog"`
|
||||
}
|
||||
|
||||
// commonResult is the deferred result of a Create or a Get call.
|
||||
type commonResult struct {
|
||||
gophercloud.Result
|
||||
}
|
||||
|
||||
// Extract is a shortcut for ExtractToken.
|
||||
// This function is deprecated and still present for backward compatibility.
|
||||
func (r commonResult) Extract() (*Token, error) {
|
||||
return r.ExtractToken()
|
||||
}
|
||||
|
||||
// ExtractToken interprets a commonResult as a Token.
|
||||
func (r commonResult) ExtractToken() (*Token, error) {
|
||||
var s Token
|
||||
err := r.ExtractInto(&s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Parse the token itself from the stored headers.
|
||||
s.ID = r.Header.Get("X-Subject-Token")
|
||||
|
||||
return &s, err
|
||||
}
|
||||
|
||||
// ExtractServiceCatalog returns the ServiceCatalog that was generated along with the user's Token.
|
||||
func (r CreateResult) ExtractServiceCatalog() (*ServiceCatalog, error) {
|
||||
var s ServiceCatalog
|
||||
err := r.ExtractInto(&s)
|
||||
return &s, err
|
||||
}
|
||||
|
||||
// CreateResult defers the interpretation of a created token.
|
||||
// Use ExtractToken() to interpret it as a Token, or ExtractServiceCatalog() to interpret it as a service catalog.
|
||||
type CreateResult struct {
|
||||
commonResult
|
||||
}
|
||||
|
||||
// GetResult is the deferred response from a Get call.
|
||||
type GetResult struct {
|
||||
commonResult
|
||||
}
|
||||
|
||||
// RevokeResult is the deferred response from a Revoke call.
|
||||
type RevokeResult struct {
|
||||
commonResult
|
||||
}
|
||||
|
||||
// Token is a string that grants a user access to a controlled set of services in an OpenStack provider.
|
||||
// Each Token is valid for a set length of time.
|
||||
type Token struct {
|
||||
// ID is the issued token.
|
||||
ID string `json:"id"`
|
||||
// ExpiresAt is the timestamp at which this token will no longer be accepted.
|
||||
ExpiresAt time.Time `json:"expires_at"`
|
||||
}
|
||||
|
||||
func (r commonResult) ExtractInto(v interface{}) error {
|
||||
return r.ExtractIntoStructPtr(v, "token")
|
||||
}
|
7
vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/urls.go
generated
vendored
Normal file
7
vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/urls.go
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
package tokens
|
||||
|
||||
import "github.com/gophercloud/gophercloud"
|
||||
|
||||
func tokenURL(c *gophercloud.ServiceClient) string {
|
||||
return c.ServiceURL("auth", "tokens")
|
||||
}
|
114
vendor/github.com/gophercloud/gophercloud/openstack/utils/choose_version.go
generated
vendored
Normal file
114
vendor/github.com/gophercloud/gophercloud/openstack/utils/choose_version.go
generated
vendored
Normal file
@ -0,0 +1,114 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/gophercloud/gophercloud"
|
||||
)
|
||||
|
||||
// Version is a supported API version, corresponding to a vN package within the appropriate service.
|
||||
type Version struct {
|
||||
ID string
|
||||
Suffix string
|
||||
Priority int
|
||||
}
|
||||
|
||||
var goodStatus = map[string]bool{
|
||||
"current": true,
|
||||
"supported": true,
|
||||
"stable": true,
|
||||
}
|
||||
|
||||
// ChooseVersion queries the base endpoint of an API to choose the most recent non-experimental alternative from a service's
|
||||
// published versions.
|
||||
// It returns the highest-Priority Version among the alternatives that are provided, as well as its corresponding endpoint.
|
||||
func ChooseVersion(client *gophercloud.ProviderClient, recognized []*Version) (*Version, string, error) {
|
||||
type linkResp struct {
|
||||
Href string `json:"href"`
|
||||
Rel string `json:"rel"`
|
||||
}
|
||||
|
||||
type valueResp struct {
|
||||
ID string `json:"id"`
|
||||
Status string `json:"status"`
|
||||
Links []linkResp `json:"links"`
|
||||
}
|
||||
|
||||
type versionsResp struct {
|
||||
Values []valueResp `json:"values"`
|
||||
}
|
||||
|
||||
type response struct {
|
||||
Versions versionsResp `json:"versions"`
|
||||
}
|
||||
|
||||
normalize := func(endpoint string) string {
|
||||
if !strings.HasSuffix(endpoint, "/") {
|
||||
return endpoint + "/"
|
||||
}
|
||||
return endpoint
|
||||
}
|
||||
identityEndpoint := normalize(client.IdentityEndpoint)
|
||||
|
||||
// If a full endpoint is specified, check version suffixes for a match first.
|
||||
for _, v := range recognized {
|
||||
if strings.HasSuffix(identityEndpoint, v.Suffix) {
|
||||
return v, identityEndpoint, nil
|
||||
}
|
||||
}
|
||||
|
||||
var resp response
|
||||
_, err := client.Request("GET", client.IdentityBase, &gophercloud.RequestOpts{
|
||||
JSONResponse: &resp,
|
||||
OkCodes: []int{200, 300},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
byID := make(map[string]*Version)
|
||||
for _, version := range recognized {
|
||||
byID[version.ID] = version
|
||||
}
|
||||
|
||||
var highest *Version
|
||||
var endpoint string
|
||||
|
||||
for _, value := range resp.Versions.Values {
|
||||
href := ""
|
||||
for _, link := range value.Links {
|
||||
if link.Rel == "self" {
|
||||
href = normalize(link.Href)
|
||||
}
|
||||
}
|
||||
|
||||
if matching, ok := byID[value.ID]; ok {
|
||||
// Prefer a version that exactly matches the provided endpoint.
|
||||
if href == identityEndpoint {
|
||||
if href == "" {
|
||||
return nil, "", fmt.Errorf("Endpoint missing in version %s response from %s", value.ID, client.IdentityBase)
|
||||
}
|
||||
return matching, href, nil
|
||||
}
|
||||
|
||||
// Otherwise, find the highest-priority version with a whitelisted status.
|
||||
if goodStatus[strings.ToLower(value.Status)] {
|
||||
if highest == nil || matching.Priority > highest.Priority {
|
||||
highest = matching
|
||||
endpoint = href
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if highest == nil {
|
||||
return nil, "", fmt.Errorf("No supported version available from endpoint %s", client.IdentityBase)
|
||||
}
|
||||
if endpoint == "" {
|
||||
return nil, "", fmt.Errorf("Endpoint missing in version %s response from %s", highest.ID, client.IdentityBase)
|
||||
}
|
||||
|
||||
return highest, endpoint, nil
|
||||
}
|
60
vendor/github.com/gophercloud/gophercloud/pagination/http.go
generated
vendored
Normal file
60
vendor/github.com/gophercloud/gophercloud/pagination/http.go
generated
vendored
Normal file
@ -0,0 +1,60 @@
|
||||
package pagination
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/gophercloud/gophercloud"
|
||||
)
|
||||
|
||||
// PageResult stores the HTTP response that returned the current page of results.
|
||||
type PageResult struct {
|
||||
gophercloud.Result
|
||||
url.URL
|
||||
}
|
||||
|
||||
// PageResultFrom parses an HTTP response as JSON and returns a PageResult containing the
|
||||
// results, interpreting it as JSON if the content type indicates.
|
||||
func PageResultFrom(resp *http.Response) (PageResult, error) {
|
||||
var parsedBody interface{}
|
||||
|
||||
defer resp.Body.Close()
|
||||
rawBody, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return PageResult{}, err
|
||||
}
|
||||
|
||||
if strings.HasPrefix(resp.Header.Get("Content-Type"), "application/json") {
|
||||
err = json.Unmarshal(rawBody, &parsedBody)
|
||||
if err != nil {
|
||||
return PageResult{}, err
|
||||
}
|
||||
} else {
|
||||
parsedBody = rawBody
|
||||
}
|
||||
|
||||
return PageResultFromParsed(resp, parsedBody), err
|
||||
}
|
||||
|
||||
// PageResultFromParsed constructs a PageResult from an HTTP response that has already had its
|
||||
// body parsed as JSON (and closed).
|
||||
func PageResultFromParsed(resp *http.Response, body interface{}) PageResult {
|
||||
return PageResult{
|
||||
Result: gophercloud.Result{
|
||||
Body: body,
|
||||
Header: resp.Header,
|
||||
},
|
||||
URL: *resp.Request.URL,
|
||||
}
|
||||
}
|
||||
|
||||
// Request performs an HTTP request and extracts the http.Response from the result.
|
||||
func Request(client *gophercloud.ServiceClient, headers map[string]string, url string) (*http.Response, error) {
|
||||
return client.Get(url, nil, &gophercloud.RequestOpts{
|
||||
MoreHeaders: headers,
|
||||
OkCodes: []int{200, 204, 300},
|
||||
})
|
||||
}
|
92
vendor/github.com/gophercloud/gophercloud/pagination/linked.go
generated
vendored
Normal file
92
vendor/github.com/gophercloud/gophercloud/pagination/linked.go
generated
vendored
Normal file
@ -0,0 +1,92 @@
|
||||
package pagination
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/gophercloud/gophercloud"
|
||||
)
|
||||
|
||||
// LinkedPageBase may be embedded to implement a page that provides navigational "Next" and "Previous" links within its result.
|
||||
type LinkedPageBase struct {
|
||||
PageResult
|
||||
|
||||
// LinkPath lists the keys that should be traversed within a response to arrive at the "next" pointer.
|
||||
// If any link along the path is missing, an empty URL will be returned.
|
||||
// If any link results in an unexpected value type, an error will be returned.
|
||||
// When left as "nil", []string{"links", "next"} will be used as a default.
|
||||
LinkPath []string
|
||||
}
|
||||
|
||||
// NextPageURL extracts the pagination structure from a JSON response and returns the "next" link, if one is present.
|
||||
// It assumes that the links are available in a "links" element of the top-level response object.
|
||||
// If this is not the case, override NextPageURL on your result type.
|
||||
func (current LinkedPageBase) NextPageURL() (string, error) {
|
||||
var path []string
|
||||
var key string
|
||||
|
||||
if current.LinkPath == nil {
|
||||
path = []string{"links", "next"}
|
||||
} else {
|
||||
path = current.LinkPath
|
||||
}
|
||||
|
||||
submap, ok := current.Body.(map[string]interface{})
|
||||
if !ok {
|
||||
err := gophercloud.ErrUnexpectedType{}
|
||||
err.Expected = "map[string]interface{}"
|
||||
err.Actual = fmt.Sprintf("%v", reflect.TypeOf(current.Body))
|
||||
return "", err
|
||||
}
|
||||
|
||||
for {
|
||||
key, path = path[0], path[1:len(path)]
|
||||
|
||||
value, ok := submap[key]
|
||||
if !ok {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
if len(path) > 0 {
|
||||
submap, ok = value.(map[string]interface{})
|
||||
if !ok {
|
||||
err := gophercloud.ErrUnexpectedType{}
|
||||
err.Expected = "map[string]interface{}"
|
||||
err.Actual = fmt.Sprintf("%v", reflect.TypeOf(value))
|
||||
return "", err
|
||||
}
|
||||
} else {
|
||||
if value == nil {
|
||||
// Actual null element.
|
||||
return "", nil
|
||||
}
|
||||
|
||||
url, ok := value.(string)
|
||||
if !ok {
|
||||
err := gophercloud.ErrUnexpectedType{}
|
||||
err.Expected = "string"
|
||||
err.Actual = fmt.Sprintf("%v", reflect.TypeOf(value))
|
||||
return "", err
|
||||
}
|
||||
|
||||
return url, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// IsEmpty satisifies the IsEmpty method of the Page interface
|
||||
func (current LinkedPageBase) IsEmpty() (bool, error) {
|
||||
if b, ok := current.Body.([]interface{}); ok {
|
||||
return len(b) == 0, nil
|
||||
}
|
||||
err := gophercloud.ErrUnexpectedType{}
|
||||
err.Expected = "[]interface{}"
|
||||
err.Actual = fmt.Sprintf("%v", reflect.TypeOf(current.Body))
|
||||
return true, err
|
||||
}
|
||||
|
||||
// GetBody returns the linked page's body. This method is needed to satisfy the
|
||||
// Page interface.
|
||||
func (current LinkedPageBase) GetBody() interface{} {
|
||||
return current.Body
|
||||
}
|
58
vendor/github.com/gophercloud/gophercloud/pagination/marker.go
generated
vendored
Normal file
58
vendor/github.com/gophercloud/gophercloud/pagination/marker.go
generated
vendored
Normal file
@ -0,0 +1,58 @@
|
||||
package pagination
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/gophercloud/gophercloud"
|
||||
)
|
||||
|
||||
// MarkerPage is a stricter Page interface that describes additional functionality required for use with NewMarkerPager.
|
||||
// For convenience, embed the MarkedPageBase struct.
|
||||
type MarkerPage interface {
|
||||
Page
|
||||
|
||||
// LastMarker returns the last "marker" value on this page.
|
||||
LastMarker() (string, error)
|
||||
}
|
||||
|
||||
// MarkerPageBase is a page in a collection that's paginated by "limit" and "marker" query parameters.
|
||||
type MarkerPageBase struct {
|
||||
PageResult
|
||||
|
||||
// Owner is a reference to the embedding struct.
|
||||
Owner MarkerPage
|
||||
}
|
||||
|
||||
// NextPageURL generates the URL for the page of results after this one.
|
||||
func (current MarkerPageBase) NextPageURL() (string, error) {
|
||||
currentURL := current.URL
|
||||
|
||||
mark, err := current.Owner.LastMarker()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
q := currentURL.Query()
|
||||
q.Set("marker", mark)
|
||||
currentURL.RawQuery = q.Encode()
|
||||
|
||||
return currentURL.String(), nil
|
||||
}
|
||||
|
||||
// IsEmpty satisifies the IsEmpty method of the Page interface
|
||||
func (current MarkerPageBase) IsEmpty() (bool, error) {
|
||||
if b, ok := current.Body.([]interface{}); ok {
|
||||
return len(b) == 0, nil
|
||||
}
|
||||
err := gophercloud.ErrUnexpectedType{}
|
||||
err.Expected = "[]interface{}"
|
||||
err.Actual = fmt.Sprintf("%v", reflect.TypeOf(current.Body))
|
||||
return true, err
|
||||
}
|
||||
|
||||
// GetBody returns the linked page's body. This method is needed to satisfy the
|
||||
// Page interface.
|
||||
func (current MarkerPageBase) GetBody() interface{} {
|
||||
return current.Body
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user