From 1ebef0984de520860b980ddd5d9b232bf6653614 Mon Sep 17 00:00:00 2001 From: Dmitry Rozhkov Date: Wed, 20 Jun 2018 09:22:42 +0300 Subject: [PATCH] bring in deps required for getting node's annotations --- Gopkg.lock | 43 +- Gopkg.toml | 4 + vendor/github.com/davecgh/go-spew/.gitignore | 22 + vendor/github.com/davecgh/go-spew/.travis.yml | 14 + vendor/github.com/davecgh/go-spew/LICENSE | 15 + vendor/github.com/davecgh/go-spew/README.md | 205 + .../github.com/davecgh/go-spew/cov_report.sh | 22 + .../github.com/davecgh/go-spew/spew/bypass.go | 152 + .../davecgh/go-spew/spew/bypasssafe.go | 38 + .../github.com/davecgh/go-spew/spew/common.go | 341 + .../davecgh/go-spew/spew/common_test.go | 298 + .../github.com/davecgh/go-spew/spew/config.go | 306 + vendor/github.com/davecgh/go-spew/spew/doc.go | 211 + .../github.com/davecgh/go-spew/spew/dump.go | 509 ++ .../davecgh/go-spew/spew/dump_test.go | 1042 +++ .../davecgh/go-spew/spew/dumpcgo_test.go | 99 + .../davecgh/go-spew/spew/dumpnocgo_test.go | 26 + .../davecgh/go-spew/spew/example_test.go | 226 + .../github.com/davecgh/go-spew/spew/format.go | 419 + .../davecgh/go-spew/spew/format_test.go | 1558 ++++ .../davecgh/go-spew/spew/internal_test.go | 87 + .../go-spew/spew/internalunsafe_test.go | 102 + .../github.com/davecgh/go-spew/spew/spew.go | 148 + .../davecgh/go-spew/spew/spew_test.go | 320 + .../davecgh/go-spew/test_coverage.txt | 61 + .../apiextensions-apiserver/CONTRIBUTING.md | 7 + vendor/k8s.io/apiextensions-apiserver/LICENSE | 202 + vendor/k8s.io/apiextensions-apiserver/OWNERS | 9 + .../k8s.io/apiextensions-apiserver/README.md | 20 + .../SECURITY_CONTACTS | 0 .../code-of-conduct.md | 3 + vendor/k8s.io/apiextensions-apiserver/main.go | 40 + .../pkg/features/OWNERS | 2 + .../pkg/features/kube_features.go | 55 + .../k8s.io/apimachinery/pkg/api/errors/BUILD | 47 + vendor/k8s.io/apimachinery/pkg/api/meta/BUILD | 72 + .../apimachinery/pkg/api/meta/interfaces.go | 21 +- .../k8s.io/apimachinery/pkg/api/meta/lazy.go | 25 +- .../k8s.io/apimachinery/pkg/api/meta/meta.go | 3 + .../apimachinery/pkg/api/meta/priority.go | 30 +- .../pkg/api/meta/priority_test.go | 63 - .../apimachinery/pkg/api/meta/restmapper.go | 38 +- .../pkg/api/meta/restmapper_test.go | 95 +- .../apimachinery/pkg/api/meta/unstructured.go | 47 + .../apimachinery/pkg/api/resource/BUILD | 69 + .../pkg/api/resource/generated.pb.go | 2 +- .../pkg/api/resource/generated.proto | 2 +- .../apimachinery/pkg/api/resource/quantity.go | 42 + .../pkg/api/resource/quantity_test.go | 16 + .../pkg/api/resource/zz_generated.deepcopy.go | 2 +- .../pkg/apis/meta/internalversion/BUILD | 59 + .../apis/meta/internalversion/conversion.go | 77 + .../pkg/apis/meta/internalversion/doc.go | 20 + .../pkg/apis/meta/internalversion/register.go | 105 + .../meta/internalversion/register_test.go | 89 + .../roundtrip_test.go} | 16 +- .../pkg/apis/meta/internalversion/types.go | 70 + .../zz_generated.conversion.go | 118 + .../internalversion/zz_generated.deepcopy.go | 106 + .../apimachinery/pkg/apis/meta/v1/BUILD | 101 + .../pkg/apis/meta/v1/conversion.go | 12 - .../pkg/apis/meta/v1/conversion_test.go | 35 - .../apimachinery/pkg/apis/meta/v1/duration.go | 5 +- .../pkg/apis/meta/v1/generated.pb.go | 2 +- .../pkg/apis/meta/v1/generated.proto | 3 +- .../pkg/apis/meta/v1/micro_time.go | 5 +- .../apimachinery/pkg/apis/meta/v1/time.go | 5 +- .../apimachinery/pkg/apis/meta/v1/types.go | 3 +- .../meta/v1/types_swagger_doc_generated.go | 4 +- .../pkg/apis/meta/v1/unstructured/BUILD | 53 + .../pkg/apis/meta/v1/unstructured/helpers.go | 66 +- .../apis/meta/v1/unstructured/helpers_test.go | 76 - .../apis/meta/v1/unstructured/unstructured.go | 24 +- .../meta/v1/unstructured/unstructured_list.go | 17 +- .../v1/unstructured/zz_generated.deepcopy.go | 2 +- .../pkg/apis/meta/v1/zz_generated.deepcopy.go | 2 +- .../pkg/apis/meta/v1/zz_generated.defaults.go | 2 +- .../apimachinery/pkg/apis/meta/v1beta1/BUILD | 45 + .../pkg/apis/meta/v1beta1/deepcopy.go | 23 +- .../pkg/apis/meta/v1beta1/generated.pb.go | 2 +- .../pkg/apis/meta/v1beta1/generated.proto | 2 +- .../pkg/apis/meta/v1beta1/types.go | 4 +- .../v1beta1/types_swagger_doc_generated.go | 6 +- .../meta/v1beta1/zz_generated.deepcopy.go | 2 +- .../meta/v1beta1/zz_generated.defaults.go | 2 +- .../k8s.io/apimachinery/pkg/conversion/BUILD | 49 + .../pkg/conversion/queryparams/BUILD | 39 + .../pkg/conversion/queryparams/convert.go | 3 - .../conversion/queryparams/convert_test.go | 6 +- vendor/k8s.io/apimachinery/pkg/fields/BUILD | 41 + .../apimachinery/pkg/fields/selector.go | 21 - vendor/k8s.io/apimachinery/pkg/labels/BUILD | 50 + .../pkg/labels/zz_generated.deepcopy.go | 2 +- vendor/k8s.io/apimachinery/pkg/runtime/BUILD | 98 + .../apimachinery/pkg/runtime/converter.go | 26 +- .../k8s.io/apimachinery/pkg/runtime/error.go | 8 - .../apimachinery/pkg/runtime/generated.pb.go | 2 +- .../apimachinery/pkg/runtime/generated.proto | 2 +- .../apimachinery/pkg/runtime/interfaces.go | 17 +- .../pkg/runtime/local_scheme_test.go | 150 - .../apimachinery/pkg/runtime/schema/BUILD | 43 + .../pkg/runtime/schema/generated.pb.go | 2 +- .../pkg/runtime/schema/generated.proto | 2 +- .../pkg/runtime/schema/group_version_test.go | 44 - .../k8s.io/apimachinery/pkg/runtime/scheme.go | 146 +- .../apimachinery/pkg/runtime/serializer/BUILD | 64 + .../pkg/runtime/serializer/json/BUILD | 55 + .../pkg/runtime/serializer/json/json.go | 5 + .../pkg/runtime/serializer/protobuf/BUILD | 35 + .../pkg/runtime/serializer/recognizer/BUILD | 32 + .../pkg/runtime/serializer/sparse_test.go | 91 - .../pkg/runtime/serializer/streaming/BUILD | 41 + .../pkg/runtime/serializer/versioning/BUILD | 41 + .../serializer/versioning/versioning.go | 23 +- .../serializer/versioning/versioning_test.go | 16 +- .../pkg/runtime/zz_generated.deepcopy.go | 2 +- .../k8s.io/apimachinery/pkg/selection/BUILD | 25 + vendor/k8s.io/apimachinery/pkg/types/BUILD | 31 + .../apimachinery/pkg/types/namespacedname.go | 17 + .../k8s.io/apimachinery/pkg/util/clock/BUILD | 32 + .../apimachinery/pkg/util/clock/clock.go | 63 +- .../apimachinery/pkg/util/clock/clock_test.go | 18 +- .../k8s.io/apimachinery/pkg/util/errors/BUILD | 35 + .../k8s.io/apimachinery/pkg/util/framer/BUILD | 32 + .../k8s.io/apimachinery/pkg/util/intstr/BUILD | 47 + .../pkg/util/intstr/generated.pb.go | 2 +- .../pkg/util/intstr/generated.proto | 2 +- .../k8s.io/apimachinery/pkg/util/json/BUILD | 32 + .../apimachinery/pkg/util/mergepatch/BUILD | 39 + .../apimachinery/pkg/util/mergepatch/OWNERS | 5 + .../pkg/util/mergepatch/errors.go | 102 + .../apimachinery/pkg/util/mergepatch/util.go | 133 + .../pkg/util/mergepatch/util_test.go | 138 + vendor/k8s.io/apimachinery/pkg/util/net/BUILD | 50 + .../k8s.io/apimachinery/pkg/util/net/http.go | 12 +- .../apimachinery/pkg/util/net/port_range.go | 68 +- .../pkg/util/net/port_range_test.go | 12 +- .../apimachinery/pkg/util/runtime/BUILD | 33 + .../apimachinery/pkg/util/runtime/runtime.go | 10 +- .../k8s.io/apimachinery/pkg/util/sets/BUILD | 71 + .../k8s.io/apimachinery/pkg/util/sets/byte.go | 6 +- .../k8s.io/apimachinery/pkg/util/sets/doc.go | 4 +- .../apimachinery/pkg/util/sets/empty.go | 4 +- .../k8s.io/apimachinery/pkg/util/sets/int.go | 6 +- .../apimachinery/pkg/util/sets/int64.go | 6 +- .../apimachinery/pkg/util/sets/string.go | 6 +- .../pkg/util/strategicpatch/BUILD | 60 + .../pkg/util/strategicpatch/OWNERS | 5 + .../pkg/util/strategicpatch/errors.go | 49 + .../pkg/util/strategicpatch/meta.go | 194 + .../pkg/util/strategicpatch/patch.go | 2151 ++++++ .../pkg/util/strategicpatch/patch_test.go | 6763 +++++++++++++++++ .../pkg/util/strategicpatch/types.go | 193 + .../apimachinery/pkg/util/validation/BUILD | 37 + .../pkg/util/validation/field/BUILD | 42 + .../k8s.io/apimachinery/pkg/util/wait/BUILD | 37 + .../k8s.io/apimachinery/pkg/util/wait/wait.go | 22 +- .../k8s.io/apimachinery/pkg/util/yaml/BUILD | 36 + vendor/k8s.io/apimachinery/pkg/version/BUILD | 28 + .../apimachinery/pkg/version/helpers.go | 88 - .../apimachinery/pkg/version/helpers_test.go | 52 - vendor/k8s.io/apimachinery/pkg/watch/BUILD | 69 + .../k8s.io/apimachinery/pkg/watch/filter.go | 6 +- vendor/k8s.io/apimachinery/pkg/watch/mux.go | 6 +- .../pkg/watch/zz_generated.deepcopy.go | 2 +- .../third_party/forked/golang/json/BUILD | 32 + .../third_party/forked/golang/json/OWNERS | 5 + .../third_party/forked/golang/json/fields.go | 513 ++ .../forked/golang/json/fields_test.go | 30 + .../third_party/forked/golang/reflect/BUILD | 32 + .../forked/golang/reflect/deep_equal.go | 2 +- vendor/k8s.io/apiserver/.import-restrictions | 10 + vendor/k8s.io/apiserver/CONTRIBUTING.md | 7 + vendor/k8s.io/apiserver/LICENSE | 202 + vendor/k8s.io/apiserver/OWNERS | 19 + vendor/k8s.io/apiserver/README.md | 30 + vendor/k8s.io/apiserver/SECURITY_CONTACTS | 17 + vendor/k8s.io/apiserver/code-of-conduct.md | 3 + vendor/k8s.io/apiserver/pkg/features/OWNERS | 2 + .../apiserver/pkg/features/kube_features.go | 81 + .../pkg/util/feature/feature_gate.go | 347 + .../pkg/util/feature/feature_gate_test.go | 320 + vendor/k8s.io/kube-openapi/.gitignore | 20 + vendor/k8s.io/kube-openapi/.travis.yml | 4 + vendor/k8s.io/kube-openapi/LICENSE | 202 + vendor/k8s.io/kube-openapi/OWNERS | 11 + vendor/k8s.io/kube-openapi/README.md | 14 + vendor/k8s.io/kube-openapi/code-of-conduct.md | 3 + .../k8s.io/kube-openapi/pkg/util/proto/doc.go | 19 + .../kube-openapi/pkg/util/proto/document.go | 299 + .../kube-openapi/pkg/util/proto/openapi.go | 276 + .../pkg/util/proto/openapi_suite_test.go | 49 + .../pkg/util/proto/openapi_test.go | 265 + vendor/k8s.io/kube-openapi/pkg/util/trie.go | 79 + vendor/k8s.io/kube-openapi/pkg/util/util.go | 39 + .../k8s.io/kube-openapi/pkg/util/util_test.go | 37 + vendor/k8s.io/kubernetes/pkg/api/OWNERS | 4 + vendor/k8s.io/kubernetes/pkg/apis/OWNERS | 44 + vendor/k8s.io/kubernetes/pkg/apis/core/BUILD | 62 + vendor/k8s.io/kubernetes/pkg/apis/core/OWNERS | 43 + .../pkg/apis/core/annotation_key_constants.go | 92 + vendor/k8s.io/kubernetes/pkg/apis/core/doc.go | 24 + .../pkg/apis/core/field_constants.go | 38 + .../k8s.io/kubernetes/pkg/apis/core/json.go | 28 + .../pkg/apis/core/objectreference.go | 34 + .../kubernetes/pkg/apis/core/register.go | 99 + .../kubernetes/pkg/apis/core/resource.go | 62 + .../k8s.io/kubernetes/pkg/apis/core/taint.go | 36 + .../kubernetes/pkg/apis/core/taint_test.go | 120 + .../kubernetes/pkg/apis/core/toleration.go | 30 + .../pkg/apis/core/toleration_test.go | 136 + .../k8s.io/kubernetes/pkg/apis/core/types.go | 4669 ++++++++++++ .../pkg/apis/core/zz_generated.deepcopy.go | 5970 +++++++++++++++ vendor/k8s.io/kubernetes/pkg/features/BUILD | 30 + .../kubernetes/pkg/features/kube_features.go | 341 + vendor/k8s.io/kubernetes/pkg/util/BUILD | 77 + vendor/k8s.io/kubernetes/pkg/util/node/BUILD | 48 + .../k8s.io/kubernetes/pkg/util/node/node.go | 203 + .../kubernetes/pkg/util/node/node_test.go | 91 + .../kubernetes/pkg/util/verify-util-pkg.sh | 48 + 220 files changed, 34482 insertions(+), 1196 deletions(-) create mode 100644 vendor/github.com/davecgh/go-spew/.gitignore create mode 100644 vendor/github.com/davecgh/go-spew/.travis.yml create mode 100644 vendor/github.com/davecgh/go-spew/LICENSE create mode 100644 vendor/github.com/davecgh/go-spew/README.md create mode 100644 vendor/github.com/davecgh/go-spew/cov_report.sh create mode 100644 vendor/github.com/davecgh/go-spew/spew/bypass.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/bypasssafe.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/common.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/common_test.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/config.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/doc.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/dump.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/dump_test.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/dumpnocgo_test.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/example_test.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/format.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/format_test.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/internal_test.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/internalunsafe_test.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/spew.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/spew_test.go create mode 100644 vendor/github.com/davecgh/go-spew/test_coverage.txt create mode 100644 vendor/k8s.io/apiextensions-apiserver/CONTRIBUTING.md create mode 100644 vendor/k8s.io/apiextensions-apiserver/LICENSE create mode 100644 vendor/k8s.io/apiextensions-apiserver/OWNERS create mode 100644 vendor/k8s.io/apiextensions-apiserver/README.md rename vendor/k8s.io/{apimachinery => apiextensions-apiserver}/SECURITY_CONTACTS (100%) create mode 100644 vendor/k8s.io/apiextensions-apiserver/code-of-conduct.md create mode 100644 vendor/k8s.io/apiextensions-apiserver/main.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/features/OWNERS create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/features/kube_features.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/errors/BUILD create mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/BUILD create mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/unstructured.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/resource/BUILD create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/BUILD create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/conversion.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/doc.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/register.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/register_test.go rename vendor/k8s.io/apimachinery/pkg/apis/meta/{v1/unstructured/unstructured_test.go => internalversion/roundtrip_test.go} (62%) create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/types.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/BUILD create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/BUILD create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/BUILD create mode 100644 vendor/k8s.io/apimachinery/pkg/conversion/BUILD create mode 100644 vendor/k8s.io/apimachinery/pkg/conversion/queryparams/BUILD create mode 100644 vendor/k8s.io/apimachinery/pkg/fields/BUILD create mode 100644 vendor/k8s.io/apimachinery/pkg/labels/BUILD create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/BUILD delete mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/local_scheme_test.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/schema/BUILD create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/BUILD create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/BUILD create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/BUILD create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer/BUILD delete mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/sparse_test.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/BUILD create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/BUILD create mode 100644 vendor/k8s.io/apimachinery/pkg/selection/BUILD create mode 100644 vendor/k8s.io/apimachinery/pkg/types/BUILD create mode 100644 vendor/k8s.io/apimachinery/pkg/util/clock/BUILD create mode 100644 vendor/k8s.io/apimachinery/pkg/util/errors/BUILD create mode 100644 vendor/k8s.io/apimachinery/pkg/util/framer/BUILD create mode 100644 vendor/k8s.io/apimachinery/pkg/util/intstr/BUILD create mode 100644 vendor/k8s.io/apimachinery/pkg/util/json/BUILD create mode 100644 vendor/k8s.io/apimachinery/pkg/util/mergepatch/BUILD create mode 100644 vendor/k8s.io/apimachinery/pkg/util/mergepatch/OWNERS create mode 100644 vendor/k8s.io/apimachinery/pkg/util/mergepatch/errors.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/mergepatch/util_test.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/net/BUILD create mode 100644 vendor/k8s.io/apimachinery/pkg/util/runtime/BUILD create mode 100644 vendor/k8s.io/apimachinery/pkg/util/sets/BUILD create mode 100644 vendor/k8s.io/apimachinery/pkg/util/strategicpatch/BUILD create mode 100644 vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS create mode 100644 vendor/k8s.io/apimachinery/pkg/util/strategicpatch/errors.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch_test.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/strategicpatch/types.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/validation/BUILD create mode 100644 vendor/k8s.io/apimachinery/pkg/util/validation/field/BUILD create mode 100644 vendor/k8s.io/apimachinery/pkg/util/wait/BUILD create mode 100644 vendor/k8s.io/apimachinery/pkg/util/yaml/BUILD create mode 100644 vendor/k8s.io/apimachinery/pkg/version/BUILD delete mode 100644 vendor/k8s.io/apimachinery/pkg/version/helpers.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/version/helpers_test.go create mode 100644 vendor/k8s.io/apimachinery/pkg/watch/BUILD create mode 100644 vendor/k8s.io/apimachinery/third_party/forked/golang/json/BUILD create mode 100644 vendor/k8s.io/apimachinery/third_party/forked/golang/json/OWNERS create mode 100644 vendor/k8s.io/apimachinery/third_party/forked/golang/json/fields.go create mode 100644 vendor/k8s.io/apimachinery/third_party/forked/golang/json/fields_test.go create mode 100644 vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/BUILD create mode 100644 vendor/k8s.io/apiserver/.import-restrictions create mode 100644 vendor/k8s.io/apiserver/CONTRIBUTING.md create mode 100644 vendor/k8s.io/apiserver/LICENSE create mode 100644 vendor/k8s.io/apiserver/OWNERS create mode 100644 vendor/k8s.io/apiserver/README.md create mode 100644 vendor/k8s.io/apiserver/SECURITY_CONTACTS create mode 100644 vendor/k8s.io/apiserver/code-of-conduct.md create mode 100644 vendor/k8s.io/apiserver/pkg/features/OWNERS create mode 100644 vendor/k8s.io/apiserver/pkg/features/kube_features.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/feature/feature_gate.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/feature/feature_gate_test.go create mode 100644 vendor/k8s.io/kube-openapi/.gitignore create mode 100644 vendor/k8s.io/kube-openapi/.travis.yml create mode 100644 vendor/k8s.io/kube-openapi/LICENSE create mode 100755 vendor/k8s.io/kube-openapi/OWNERS create mode 100644 vendor/k8s.io/kube-openapi/README.md create mode 100644 vendor/k8s.io/kube-openapi/code-of-conduct.md create mode 100644 vendor/k8s.io/kube-openapi/pkg/util/proto/doc.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/util/proto/document.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/util/proto/openapi_suite_test.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/util/proto/openapi_test.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/util/trie.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/util/util.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/util/util_test.go create mode 100644 vendor/k8s.io/kubernetes/pkg/api/OWNERS create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/OWNERS create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/BUILD create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/OWNERS create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/annotation_key_constants.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/doc.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/field_constants.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/json.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/objectreference.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/register.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/resource.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/taint.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/taint_test.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/toleration.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/toleration_test.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/types.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/kubernetes/pkg/features/BUILD create mode 100644 vendor/k8s.io/kubernetes/pkg/features/kube_features.go create mode 100644 vendor/k8s.io/kubernetes/pkg/util/BUILD create mode 100644 vendor/k8s.io/kubernetes/pkg/util/node/BUILD create mode 100644 vendor/k8s.io/kubernetes/pkg/util/node/node.go create mode 100644 vendor/k8s.io/kubernetes/pkg/util/node/node_test.go create mode 100755 vendor/k8s.io/kubernetes/pkg/util/verify-util-pkg.sh diff --git a/Gopkg.lock b/Gopkg.lock index 8a5f0eb6..19265495 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -1,6 +1,12 @@ # This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. +[[projects]] + name = "github.com/davecgh/go-spew" + packages = ["spew"] + revision = "346938d642f2ec3594ed81d874461961cd0faa76" + version = "v1.1.0" + [[projects]] name = "github.com/ghodss/yaml" packages = ["."] @@ -230,11 +236,17 @@ [[projects]] branch = "master" + name = "k8s.io/apiextensions-apiserver" + packages = ["pkg/features"] + revision = "003acb65be70b07acafe3b5eb4f13a6d0fe47154" + +[[projects]] name = "k8s.io/apimachinery" packages = [ "pkg/api/errors", "pkg/api/meta", "pkg/api/resource", + "pkg/apis/meta/internalversion", "pkg/apis/meta/v1", "pkg/apis/meta/v1/unstructured", "pkg/apis/meta/v1beta1", @@ -257,18 +269,31 @@ "pkg/util/framer", "pkg/util/intstr", "pkg/util/json", + "pkg/util/mergepatch", "pkg/util/net", "pkg/util/runtime", "pkg/util/sets", + "pkg/util/strategicpatch", "pkg/util/validation", "pkg/util/validation/field", "pkg/util/wait", "pkg/util/yaml", "pkg/version", "pkg/watch", + "third_party/forked/golang/json", "third_party/forked/golang/reflect" ] - revision = "5a0ac3ef2591b5fbf8659ab72e91c3bfac620615" + revision = "302974c03f7e50f16561ba237db776ab93594ef6" + version = "kubernetes-1.10.0" + +[[projects]] + branch = "master" + name = "k8s.io/apiserver" + packages = [ + "pkg/features", + "pkg/util/feature" + ] + revision = "8f8ecf0776308a699047c16cd6fc9d98cd3e6505" [[projects]] name = "k8s.io/client-go" @@ -326,15 +351,27 @@ revision = "23781f4d6632d88e869066eaebb743857aa1ef9b" version = "v7.0.0" +[[projects]] + branch = "master" + name = "k8s.io/kube-openapi" + packages = ["pkg/util/proto"] + revision = "bf40560368791a7dddfeea9b3cfcf89b34139f44" + [[projects]] name = "k8s.io/kubernetes" - packages = ["pkg/kubelet/apis/deviceplugin/v1beta1"] + packages = [ + "pkg/apis/core", + "pkg/features", + "pkg/kubelet/apis", + "pkg/kubelet/apis/deviceplugin/v1beta1", + "pkg/util/node" + ] revision = "fc32d2f3698e36b93322a3465f63a14e9f0eaead" version = "v1.10.0" [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "481e372f64cc48abd8548d0f3668bda0d9cc72b26c8b2544861fcc1305e9496f" + inputs-digest = "dbcc6386ef763c4ddca92c5477abfc19a5ea44c5060c7894e1f1194245a89306" solver-name = "gps-cdcl" solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml index d145230e..fbcde93e 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -36,3 +36,7 @@ [[constraint]] name = "k8s.io/kubernetes" version = "1.10.0" + +[[constraint]] + name = "k8s.io/apimachinery" + version = "kubernetes-1.10.0" diff --git a/vendor/github.com/davecgh/go-spew/.gitignore b/vendor/github.com/davecgh/go-spew/.gitignore new file mode 100644 index 00000000..00268614 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/.gitignore @@ -0,0 +1,22 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe diff --git a/vendor/github.com/davecgh/go-spew/.travis.yml b/vendor/github.com/davecgh/go-spew/.travis.yml new file mode 100644 index 00000000..984e0736 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/.travis.yml @@ -0,0 +1,14 @@ +language: go +go: + - 1.5.4 + - 1.6.3 + - 1.7 +install: + - go get -v golang.org/x/tools/cmd/cover +script: + - go test -v -tags=safe ./spew + - go test -v -tags=testcgo ./spew -covermode=count -coverprofile=profile.cov +after_success: + - go get -v github.com/mattn/goveralls + - export PATH=$PATH:$HOME/gopath/bin + - goveralls -coverprofile=profile.cov -service=travis-ci diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE new file mode 100644 index 00000000..c8364161 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/LICENSE @@ -0,0 +1,15 @@ +ISC License + +Copyright (c) 2012-2016 Dave Collins + +Permission to use, copy, modify, and distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/vendor/github.com/davecgh/go-spew/README.md b/vendor/github.com/davecgh/go-spew/README.md new file mode 100644 index 00000000..26243044 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/README.md @@ -0,0 +1,205 @@ +go-spew +======= + +[![Build Status](https://img.shields.io/travis/davecgh/go-spew.svg)] +(https://travis-ci.org/davecgh/go-spew) [![ISC License] +(http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org) [![Coverage Status] +(https://img.shields.io/coveralls/davecgh/go-spew.svg)] +(https://coveralls.io/r/davecgh/go-spew?branch=master) + + +Go-spew implements a deep pretty printer for Go data structures to aid in +debugging. A comprehensive suite of tests with 100% test coverage is provided +to ensure proper functionality. See `test_coverage.txt` for the gocov coverage +report. Go-spew is licensed under the liberal ISC license, so it may be used in +open source or commercial projects. + +If you're interested in reading about how this package came to life and some +of the challenges involved in providing a deep pretty printer, there is a blog +post about it +[here](https://web.archive.org/web/20160304013555/https://blog.cyphertite.com/go-spew-a-journey-into-dumping-go-data-structures/). + +## Documentation + +[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)] +(http://godoc.org/github.com/davecgh/go-spew/spew) + +Full `go doc` style documentation for the project can be viewed online without +installing this package by using the excellent GoDoc site here: +http://godoc.org/github.com/davecgh/go-spew/spew + +You can also view the documentation locally once the package is installed with +the `godoc` tool by running `godoc -http=":6060"` and pointing your browser to +http://localhost:6060/pkg/github.com/davecgh/go-spew/spew + +## Installation + +```bash +$ go get -u github.com/davecgh/go-spew/spew +``` + +## Quick Start + +Add this import line to the file you're working in: + +```Go +import "github.com/davecgh/go-spew/spew" +``` + +To dump a variable with full newlines, indentation, type, and pointer +information use Dump, Fdump, or Sdump: + +```Go +spew.Dump(myVar1, myVar2, ...) +spew.Fdump(someWriter, myVar1, myVar2, ...) +str := spew.Sdump(myVar1, myVar2, ...) +``` + +Alternatively, if you would prefer to use format strings with a compacted inline +printing style, use the convenience wrappers Printf, Fprintf, etc with %v (most +compact), %+v (adds pointer addresses), %#v (adds types), or %#+v (adds types +and pointer addresses): + +```Go +spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) +spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) +spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) +spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) +``` + +## Debugging a Web Application Example + +Here is an example of how you can use `spew.Sdump()` to help debug a web application. Please be sure to wrap your output using the `html.EscapeString()` function for safety reasons. You should also only use this debugging technique in a development environment, never in production. + +```Go +package main + +import ( + "fmt" + "html" + "net/http" + + "github.com/davecgh/go-spew/spew" +) + +func handler(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "text/html") + fmt.Fprintf(w, "Hi there, %s!", r.URL.Path[1:]) + fmt.Fprintf(w, "") +} + +func main() { + http.HandleFunc("/", handler) + http.ListenAndServe(":8080", nil) +} +``` + +## Sample Dump Output + +``` +(main.Foo) { + unexportedField: (*main.Bar)(0xf84002e210)({ + flag: (main.Flag) flagTwo, + data: (uintptr) + }), + ExportedField: (map[interface {}]interface {}) { + (string) "one": (bool) true + } +} +([]uint8) { + 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | + 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| + 00000020 31 32 |12| +} +``` + +## Sample Formatter Output + +Double pointer to a uint8: +``` + %v: <**>5 + %+v: <**>(0xf8400420d0->0xf8400420c8)5 + %#v: (**uint8)5 + %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5 +``` + +Pointer to circular struct with a uint8 field and a pointer to itself: +``` + %v: <*>{1 <*>} + %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)} + %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)} + %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)} +``` + +## Configuration Options + +Configuration of spew is handled by fields in the ConfigState type. For +convenience, all of the top-level functions use a global state available via the +spew.Config global. + +It is also possible to create a ConfigState instance that provides methods +equivalent to the top-level functions. This allows concurrent configuration +options. See the ConfigState documentation for more details. + +``` +* Indent + String to use for each indentation level for Dump functions. + It is a single space by default. A popular alternative is "\t". + +* MaxDepth + Maximum number of levels to descend into nested data structures. + There is no limit by default. + +* DisableMethods + Disables invocation of error and Stringer interface methods. + Method invocation is enabled by default. + +* DisablePointerMethods + Disables invocation of error and Stringer interface methods on types + which only accept pointer receivers from non-pointer variables. This option + relies on access to the unsafe package, so it will not have any effect when + running in environments without access to the unsafe package such as Google + App Engine or with the "safe" build tag specified. + Pointer method invocation is enabled by default. + +* DisablePointerAddresses + DisablePointerAddresses specifies whether to disable the printing of + pointer addresses. This is useful when diffing data structures in tests. + +* DisableCapacities + DisableCapacities specifies whether to disable the printing of capacities + for arrays, slices, maps and channels. This is useful when diffing data + structures in tests. + +* ContinueOnMethod + Enables recursion into types after invoking error and Stringer interface + methods. Recursion after method invocation is disabled by default. + +* SortKeys + Specifies map keys should be sorted before being printed. Use + this to have a more deterministic, diffable output. Note that + only native types (bool, int, uint, floats, uintptr and string) + and types which implement error or Stringer interfaces are supported, + with other types sorted according to the reflect.Value.String() output + which guarantees display stability. Natural map order is used by + default. + +* SpewKeys + SpewKeys specifies that, as a last resort attempt, map keys should be + spewed to strings and sorted by those strings. This is only considered + if SortKeys is true. + +``` + +## Unsafe Package Dependency + +This package relies on the unsafe package to perform some of the more advanced +features, however it also supports a "limited" mode which allows it to work in +environments where the unsafe package is not available. By default, it will +operate in this mode on Google App Engine and when compiled with GopherJS. The +"safe" build tag may also be specified to force the package to build without +using the unsafe package. + +## License + +Go-spew is licensed under the [copyfree](http://copyfree.org) ISC License. diff --git a/vendor/github.com/davecgh/go-spew/cov_report.sh b/vendor/github.com/davecgh/go-spew/cov_report.sh new file mode 100644 index 00000000..9579497e --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/cov_report.sh @@ -0,0 +1,22 @@ +#!/bin/sh + +# This script uses gocov to generate a test coverage report. +# The gocov tool my be obtained with the following command: +# go get github.com/axw/gocov/gocov +# +# It will be installed to $GOPATH/bin, so ensure that location is in your $PATH. + +# Check for gocov. +if ! type gocov >/dev/null 2>&1; then + echo >&2 "This script requires the gocov tool." + echo >&2 "You may obtain it with the following command:" + echo >&2 "go get github.com/axw/gocov/gocov" + exit 1 +fi + +# Only run the cgo tests if gcc is installed. +if type gcc >/dev/null 2>&1; then + (cd spew && gocov test -tags testcgo | gocov report) +else + (cd spew && gocov test | gocov report) +fi diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go new file mode 100644 index 00000000..8a4a6589 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/bypass.go @@ -0,0 +1,152 @@ +// Copyright (c) 2015-2016 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when the code is not running on Google App Engine, compiled by GopherJS, and +// "-tags safe" is not added to the go build command line. The "disableunsafe" +// tag is deprecated and thus should not be used. +// +build !js,!appengine,!safe,!disableunsafe + +package spew + +import ( + "reflect" + "unsafe" +) + +const ( + // UnsafeDisabled is a build-time constant which specifies whether or + // not access to the unsafe package is available. + UnsafeDisabled = false + + // ptrSize is the size of a pointer on the current arch. + ptrSize = unsafe.Sizeof((*byte)(nil)) +) + +var ( + // offsetPtr, offsetScalar, and offsetFlag are the offsets for the + // internal reflect.Value fields. These values are valid before golang + // commit ecccf07e7f9d which changed the format. The are also valid + // after commit 82f48826c6c7 which changed the format again to mirror + // the original format. Code in the init function updates these offsets + // as necessary. + offsetPtr = uintptr(ptrSize) + offsetScalar = uintptr(0) + offsetFlag = uintptr(ptrSize * 2) + + // flagKindWidth and flagKindShift indicate various bits that the + // reflect package uses internally to track kind information. + // + // flagRO indicates whether or not the value field of a reflect.Value is + // read-only. + // + // flagIndir indicates whether the value field of a reflect.Value is + // the actual data or a pointer to the data. + // + // These values are valid before golang commit 90a7c3c86944 which + // changed their positions. Code in the init function updates these + // flags as necessary. + flagKindWidth = uintptr(5) + flagKindShift = uintptr(flagKindWidth - 1) + flagRO = uintptr(1 << 0) + flagIndir = uintptr(1 << 1) +) + +func init() { + // Older versions of reflect.Value stored small integers directly in the + // ptr field (which is named val in the older versions). Versions + // between commits ecccf07e7f9d and 82f48826c6c7 added a new field named + // scalar for this purpose which unfortunately came before the flag + // field, so the offset of the flag field is different for those + // versions. + // + // This code constructs a new reflect.Value from a known small integer + // and checks if the size of the reflect.Value struct indicates it has + // the scalar field. When it does, the offsets are updated accordingly. + vv := reflect.ValueOf(0xf00) + if unsafe.Sizeof(vv) == (ptrSize * 4) { + offsetScalar = ptrSize * 2 + offsetFlag = ptrSize * 3 + } + + // Commit 90a7c3c86944 changed the flag positions such that the low + // order bits are the kind. This code extracts the kind from the flags + // field and ensures it's the correct type. When it's not, the flag + // order has been changed to the newer format, so the flags are updated + // accordingly. + upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag) + upfv := *(*uintptr)(upf) + flagKindMask := uintptr((1<>flagKindShift != uintptr(reflect.Int) { + flagKindShift = 0 + flagRO = 1 << 5 + flagIndir = 1 << 6 + + // Commit adf9b30e5594 modified the flags to separate the + // flagRO flag into two bits which specifies whether or not the + // field is embedded. This causes flagIndir to move over a bit + // and means that flagRO is the combination of either of the + // original flagRO bit and the new bit. + // + // This code detects the change by extracting what used to be + // the indirect bit to ensure it's set. When it's not, the flag + // order has been changed to the newer format, so the flags are + // updated accordingly. + if upfv&flagIndir == 0 { + flagRO = 3 << 5 + flagIndir = 1 << 7 + } + } +} + +// unsafeReflectValue converts the passed reflect.Value into a one that bypasses +// the typical safety restrictions preventing access to unaddressable and +// unexported data. It works by digging the raw pointer to the underlying +// value out of the protected value and generating a new unprotected (unsafe) +// reflect.Value to it. +// +// This allows us to check for implementations of the Stringer and error +// interfaces to be used for pretty printing ordinarily unaddressable and +// inaccessible values such as unexported struct fields. +func unsafeReflectValue(v reflect.Value) (rv reflect.Value) { + indirects := 1 + vt := v.Type() + upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr) + rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag)) + if rvf&flagIndir != 0 { + vt = reflect.PtrTo(v.Type()) + indirects++ + } else if offsetScalar != 0 { + // The value is in the scalar field when it's not one of the + // reference types. + switch vt.Kind() { + case reflect.Uintptr: + case reflect.Chan: + case reflect.Func: + case reflect.Map: + case reflect.Ptr: + case reflect.UnsafePointer: + default: + upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + + offsetScalar) + } + } + + pv := reflect.NewAt(vt, upv) + rv = pv + for i := 0; i < indirects; i++ { + rv = rv.Elem() + } + return rv +} diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go new file mode 100644 index 00000000..1fe3cf3d --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go @@ -0,0 +1,38 @@ +// Copyright (c) 2015-2016 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when the code is running on Google App Engine, compiled by GopherJS, or +// "-tags safe" is added to the go build command line. The "disableunsafe" +// tag is deprecated and thus should not be used. +// +build js appengine safe disableunsafe + +package spew + +import "reflect" + +const ( + // UnsafeDisabled is a build-time constant which specifies whether or + // not access to the unsafe package is available. + UnsafeDisabled = true +) + +// unsafeReflectValue typically converts the passed reflect.Value into a one +// that bypasses the typical safety restrictions preventing access to +// unaddressable and unexported data. However, doing this relies on access to +// the unsafe package. This is a stub version which simply returns the passed +// reflect.Value when the unsafe package is not available. +func unsafeReflectValue(v reflect.Value) reflect.Value { + return v +} diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go new file mode 100644 index 00000000..7c519ff4 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/common.go @@ -0,0 +1,341 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "io" + "reflect" + "sort" + "strconv" +) + +// Some constants in the form of bytes to avoid string overhead. This mirrors +// the technique used in the fmt package. +var ( + panicBytes = []byte("(PANIC=") + plusBytes = []byte("+") + iBytes = []byte("i") + trueBytes = []byte("true") + falseBytes = []byte("false") + interfaceBytes = []byte("(interface {})") + commaNewlineBytes = []byte(",\n") + newlineBytes = []byte("\n") + openBraceBytes = []byte("{") + openBraceNewlineBytes = []byte("{\n") + closeBraceBytes = []byte("}") + asteriskBytes = []byte("*") + colonBytes = []byte(":") + colonSpaceBytes = []byte(": ") + openParenBytes = []byte("(") + closeParenBytes = []byte(")") + spaceBytes = []byte(" ") + pointerChainBytes = []byte("->") + nilAngleBytes = []byte("") + maxNewlineBytes = []byte("\n") + maxShortBytes = []byte("") + circularBytes = []byte("") + circularShortBytes = []byte("") + invalidAngleBytes = []byte("") + openBracketBytes = []byte("[") + closeBracketBytes = []byte("]") + percentBytes = []byte("%") + precisionBytes = []byte(".") + openAngleBytes = []byte("<") + closeAngleBytes = []byte(">") + openMapBytes = []byte("map[") + closeMapBytes = []byte("]") + lenEqualsBytes = []byte("len=") + capEqualsBytes = []byte("cap=") +) + +// hexDigits is used to map a decimal value to a hex digit. +var hexDigits = "0123456789abcdef" + +// catchPanic handles any panics that might occur during the handleMethods +// calls. +func catchPanic(w io.Writer, v reflect.Value) { + if err := recover(); err != nil { + w.Write(panicBytes) + fmt.Fprintf(w, "%v", err) + w.Write(closeParenBytes) + } +} + +// handleMethods attempts to call the Error and String methods on the underlying +// type the passed reflect.Value represents and outputes the result to Writer w. +// +// It handles panics in any called methods by catching and displaying the error +// as the formatted value. +func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) { + // We need an interface to check if the type implements the error or + // Stringer interface. However, the reflect package won't give us an + // interface on certain things like unexported struct fields in order + // to enforce visibility rules. We use unsafe, when it's available, + // to bypass these restrictions since this package does not mutate the + // values. + if !v.CanInterface() { + if UnsafeDisabled { + return false + } + + v = unsafeReflectValue(v) + } + + // Choose whether or not to do error and Stringer interface lookups against + // the base type or a pointer to the base type depending on settings. + // Technically calling one of these methods with a pointer receiver can + // mutate the value, however, types which choose to satisify an error or + // Stringer interface with a pointer receiver should not be mutating their + // state inside these interface methods. + if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() { + v = unsafeReflectValue(v) + } + if v.CanAddr() { + v = v.Addr() + } + + // Is it an error or Stringer? + switch iface := v.Interface().(type) { + case error: + defer catchPanic(w, v) + if cs.ContinueOnMethod { + w.Write(openParenBytes) + w.Write([]byte(iface.Error())) + w.Write(closeParenBytes) + w.Write(spaceBytes) + return false + } + + w.Write([]byte(iface.Error())) + return true + + case fmt.Stringer: + defer catchPanic(w, v) + if cs.ContinueOnMethod { + w.Write(openParenBytes) + w.Write([]byte(iface.String())) + w.Write(closeParenBytes) + w.Write(spaceBytes) + return false + } + w.Write([]byte(iface.String())) + return true + } + return false +} + +// printBool outputs a boolean value as true or false to Writer w. +func printBool(w io.Writer, val bool) { + if val { + w.Write(trueBytes) + } else { + w.Write(falseBytes) + } +} + +// printInt outputs a signed integer value to Writer w. +func printInt(w io.Writer, val int64, base int) { + w.Write([]byte(strconv.FormatInt(val, base))) +} + +// printUint outputs an unsigned integer value to Writer w. +func printUint(w io.Writer, val uint64, base int) { + w.Write([]byte(strconv.FormatUint(val, base))) +} + +// printFloat outputs a floating point value using the specified precision, +// which is expected to be 32 or 64bit, to Writer w. +func printFloat(w io.Writer, val float64, precision int) { + w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision))) +} + +// printComplex outputs a complex value using the specified float precision +// for the real and imaginary parts to Writer w. +func printComplex(w io.Writer, c complex128, floatPrecision int) { + r := real(c) + w.Write(openParenBytes) + w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision))) + i := imag(c) + if i >= 0 { + w.Write(plusBytes) + } + w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision))) + w.Write(iBytes) + w.Write(closeParenBytes) +} + +// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x' +// prefix to Writer w. +func printHexPtr(w io.Writer, p uintptr) { + // Null pointer. + num := uint64(p) + if num == 0 { + w.Write(nilAngleBytes) + return + } + + // Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix + buf := make([]byte, 18) + + // It's simpler to construct the hex string right to left. + base := uint64(16) + i := len(buf) - 1 + for num >= base { + buf[i] = hexDigits[num%base] + num /= base + i-- + } + buf[i] = hexDigits[num] + + // Add '0x' prefix. + i-- + buf[i] = 'x' + i-- + buf[i] = '0' + + // Strip unused leading bytes. + buf = buf[i:] + w.Write(buf) +} + +// valuesSorter implements sort.Interface to allow a slice of reflect.Value +// elements to be sorted. +type valuesSorter struct { + values []reflect.Value + strings []string // either nil or same len and values + cs *ConfigState +} + +// newValuesSorter initializes a valuesSorter instance, which holds a set of +// surrogate keys on which the data should be sorted. It uses flags in +// ConfigState to decide if and how to populate those surrogate keys. +func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface { + vs := &valuesSorter{values: values, cs: cs} + if canSortSimply(vs.values[0].Kind()) { + return vs + } + if !cs.DisableMethods { + vs.strings = make([]string, len(values)) + for i := range vs.values { + b := bytes.Buffer{} + if !handleMethods(cs, &b, vs.values[i]) { + vs.strings = nil + break + } + vs.strings[i] = b.String() + } + } + if vs.strings == nil && cs.SpewKeys { + vs.strings = make([]string, len(values)) + for i := range vs.values { + vs.strings[i] = Sprintf("%#v", vs.values[i].Interface()) + } + } + return vs +} + +// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted +// directly, or whether it should be considered for sorting by surrogate keys +// (if the ConfigState allows it). +func canSortSimply(kind reflect.Kind) bool { + // This switch parallels valueSortLess, except for the default case. + switch kind { + case reflect.Bool: + return true + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return true + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + return true + case reflect.Float32, reflect.Float64: + return true + case reflect.String: + return true + case reflect.Uintptr: + return true + case reflect.Array: + return true + } + return false +} + +// Len returns the number of values in the slice. It is part of the +// sort.Interface implementation. +func (s *valuesSorter) Len() int { + return len(s.values) +} + +// Swap swaps the values at the passed indices. It is part of the +// sort.Interface implementation. +func (s *valuesSorter) Swap(i, j int) { + s.values[i], s.values[j] = s.values[j], s.values[i] + if s.strings != nil { + s.strings[i], s.strings[j] = s.strings[j], s.strings[i] + } +} + +// valueSortLess returns whether the first value should sort before the second +// value. It is used by valueSorter.Less as part of the sort.Interface +// implementation. +func valueSortLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Bool: + return !a.Bool() && b.Bool() + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return a.Int() < b.Int() + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + return a.Uint() < b.Uint() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.String: + return a.String() < b.String() + case reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Array: + // Compare the contents of both arrays. + l := a.Len() + for i := 0; i < l; i++ { + av := a.Index(i) + bv := b.Index(i) + if av.Interface() == bv.Interface() { + continue + } + return valueSortLess(av, bv) + } + } + return a.String() < b.String() +} + +// Less returns whether the value at index i should sort before the +// value at index j. It is part of the sort.Interface implementation. +func (s *valuesSorter) Less(i, j int) bool { + if s.strings == nil { + return valueSortLess(s.values[i], s.values[j]) + } + return s.strings[i] < s.strings[j] +} + +// sortValues is a sort function that handles both native types and any type that +// can be converted to error or Stringer. Other inputs are sorted according to +// their Value.String() value to ensure display stability. +func sortValues(values []reflect.Value, cs *ConfigState) { + if len(values) == 0 { + return + } + sort.Sort(newValuesSorter(values, cs)) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/common_test.go b/vendor/github.com/davecgh/go-spew/spew/common_test.go new file mode 100644 index 00000000..0f5ce47d --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/common_test.go @@ -0,0 +1,298 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew_test + +import ( + "fmt" + "reflect" + "testing" + + "github.com/davecgh/go-spew/spew" +) + +// custom type to test Stinger interface on non-pointer receiver. +type stringer string + +// String implements the Stringer interface for testing invocation of custom +// stringers on types with non-pointer receivers. +func (s stringer) String() string { + return "stringer " + string(s) +} + +// custom type to test Stinger interface on pointer receiver. +type pstringer string + +// String implements the Stringer interface for testing invocation of custom +// stringers on types with only pointer receivers. +func (s *pstringer) String() string { + return "stringer " + string(*s) +} + +// xref1 and xref2 are cross referencing structs for testing circular reference +// detection. +type xref1 struct { + ps2 *xref2 +} +type xref2 struct { + ps1 *xref1 +} + +// indirCir1, indirCir2, and indirCir3 are used to generate an indirect circular +// reference for testing detection. +type indirCir1 struct { + ps2 *indirCir2 +} +type indirCir2 struct { + ps3 *indirCir3 +} +type indirCir3 struct { + ps1 *indirCir1 +} + +// embed is used to test embedded structures. +type embed struct { + a string +} + +// embedwrap is used to test embedded structures. +type embedwrap struct { + *embed + e *embed +} + +// panicer is used to intentionally cause a panic for testing spew properly +// handles them +type panicer int + +func (p panicer) String() string { + panic("test panic") +} + +// customError is used to test custom error interface invocation. +type customError int + +func (e customError) Error() string { + return fmt.Sprintf("error: %d", int(e)) +} + +// stringizeWants converts a slice of wanted test output into a format suitable +// for a test error message. +func stringizeWants(wants []string) string { + s := "" + for i, want := range wants { + if i > 0 { + s += fmt.Sprintf("want%d: %s", i+1, want) + } else { + s += "want: " + want + } + } + return s +} + +// testFailed returns whether or not a test failed by checking if the result +// of the test is in the slice of wanted strings. +func testFailed(result string, wants []string) bool { + for _, want := range wants { + if result == want { + return false + } + } + return true +} + +type sortableStruct struct { + x int +} + +func (ss sortableStruct) String() string { + return fmt.Sprintf("ss.%d", ss.x) +} + +type unsortableStruct struct { + x int +} + +type sortTestCase struct { + input []reflect.Value + expected []reflect.Value +} + +func helpTestSortValues(tests []sortTestCase, cs *spew.ConfigState, t *testing.T) { + getInterfaces := func(values []reflect.Value) []interface{} { + interfaces := []interface{}{} + for _, v := range values { + interfaces = append(interfaces, v.Interface()) + } + return interfaces + } + + for _, test := range tests { + spew.SortValues(test.input, cs) + // reflect.DeepEqual cannot really make sense of reflect.Value, + // probably because of all the pointer tricks. For instance, + // v(2.0) != v(2.0) on a 32-bits system. Turn them into interface{} + // instead. + input := getInterfaces(test.input) + expected := getInterfaces(test.expected) + if !reflect.DeepEqual(input, expected) { + t.Errorf("Sort mismatch:\n %v != %v", input, expected) + } + } +} + +// TestSortValues ensures the sort functionality for relect.Value based sorting +// works as intended. +func TestSortValues(t *testing.T) { + v := reflect.ValueOf + + a := v("a") + b := v("b") + c := v("c") + embedA := v(embed{"a"}) + embedB := v(embed{"b"}) + embedC := v(embed{"c"}) + tests := []sortTestCase{ + // No values. + { + []reflect.Value{}, + []reflect.Value{}, + }, + // Bools. + { + []reflect.Value{v(false), v(true), v(false)}, + []reflect.Value{v(false), v(false), v(true)}, + }, + // Ints. + { + []reflect.Value{v(2), v(1), v(3)}, + []reflect.Value{v(1), v(2), v(3)}, + }, + // Uints. + { + []reflect.Value{v(uint8(2)), v(uint8(1)), v(uint8(3))}, + []reflect.Value{v(uint8(1)), v(uint8(2)), v(uint8(3))}, + }, + // Floats. + { + []reflect.Value{v(2.0), v(1.0), v(3.0)}, + []reflect.Value{v(1.0), v(2.0), v(3.0)}, + }, + // Strings. + { + []reflect.Value{b, a, c}, + []reflect.Value{a, b, c}, + }, + // Array + { + []reflect.Value{v([3]int{3, 2, 1}), v([3]int{1, 3, 2}), v([3]int{1, 2, 3})}, + []reflect.Value{v([3]int{1, 2, 3}), v([3]int{1, 3, 2}), v([3]int{3, 2, 1})}, + }, + // Uintptrs. + { + []reflect.Value{v(uintptr(2)), v(uintptr(1)), v(uintptr(3))}, + []reflect.Value{v(uintptr(1)), v(uintptr(2)), v(uintptr(3))}, + }, + // SortableStructs. + { + // Note: not sorted - DisableMethods is set. + []reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})}, + []reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})}, + }, + // UnsortableStructs. + { + // Note: not sorted - SpewKeys is false. + []reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})}, + []reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})}, + }, + // Invalid. + { + []reflect.Value{embedB, embedA, embedC}, + []reflect.Value{embedB, embedA, embedC}, + }, + } + cs := spew.ConfigState{DisableMethods: true, SpewKeys: false} + helpTestSortValues(tests, &cs, t) +} + +// TestSortValuesWithMethods ensures the sort functionality for relect.Value +// based sorting works as intended when using string methods. +func TestSortValuesWithMethods(t *testing.T) { + v := reflect.ValueOf + + a := v("a") + b := v("b") + c := v("c") + tests := []sortTestCase{ + // Ints. + { + []reflect.Value{v(2), v(1), v(3)}, + []reflect.Value{v(1), v(2), v(3)}, + }, + // Strings. + { + []reflect.Value{b, a, c}, + []reflect.Value{a, b, c}, + }, + // SortableStructs. + { + []reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})}, + []reflect.Value{v(sortableStruct{1}), v(sortableStruct{2}), v(sortableStruct{3})}, + }, + // UnsortableStructs. + { + // Note: not sorted - SpewKeys is false. + []reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})}, + []reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})}, + }, + } + cs := spew.ConfigState{DisableMethods: false, SpewKeys: false} + helpTestSortValues(tests, &cs, t) +} + +// TestSortValuesWithSpew ensures the sort functionality for relect.Value +// based sorting works as intended when using spew to stringify keys. +func TestSortValuesWithSpew(t *testing.T) { + v := reflect.ValueOf + + a := v("a") + b := v("b") + c := v("c") + tests := []sortTestCase{ + // Ints. + { + []reflect.Value{v(2), v(1), v(3)}, + []reflect.Value{v(1), v(2), v(3)}, + }, + // Strings. + { + []reflect.Value{b, a, c}, + []reflect.Value{a, b, c}, + }, + // SortableStructs. + { + []reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})}, + []reflect.Value{v(sortableStruct{1}), v(sortableStruct{2}), v(sortableStruct{3})}, + }, + // UnsortableStructs. + { + []reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})}, + []reflect.Value{v(unsortableStruct{1}), v(unsortableStruct{2}), v(unsortableStruct{3})}, + }, + } + cs := spew.ConfigState{DisableMethods: true, SpewKeys: true} + helpTestSortValues(tests, &cs, t) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go new file mode 100644 index 00000000..2e3d22f3 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/config.go @@ -0,0 +1,306 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "io" + "os" +) + +// ConfigState houses the configuration options used by spew to format and +// display values. There is a global instance, Config, that is used to control +// all top-level Formatter and Dump functionality. Each ConfigState instance +// provides methods equivalent to the top-level functions. +// +// The zero value for ConfigState provides no indentation. You would typically +// want to set it to a space or a tab. +// +// Alternatively, you can use NewDefaultConfig to get a ConfigState instance +// with default settings. See the documentation of NewDefaultConfig for default +// values. +type ConfigState struct { + // Indent specifies the string to use for each indentation level. The + // global config instance that all top-level functions use set this to a + // single space by default. If you would like more indentation, you might + // set this to a tab with "\t" or perhaps two spaces with " ". + Indent string + + // MaxDepth controls the maximum number of levels to descend into nested + // data structures. The default, 0, means there is no limit. + // + // NOTE: Circular data structures are properly detected, so it is not + // necessary to set this value unless you specifically want to limit deeply + // nested data structures. + MaxDepth int + + // DisableMethods specifies whether or not error and Stringer interfaces are + // invoked for types that implement them. + DisableMethods bool + + // DisablePointerMethods specifies whether or not to check for and invoke + // error and Stringer interfaces on types which only accept a pointer + // receiver when the current type is not a pointer. + // + // NOTE: This might be an unsafe action since calling one of these methods + // with a pointer receiver could technically mutate the value, however, + // in practice, types which choose to satisify an error or Stringer + // interface with a pointer receiver should not be mutating their state + // inside these interface methods. As a result, this option relies on + // access to the unsafe package, so it will not have any effect when + // running in environments without access to the unsafe package such as + // Google App Engine or with the "safe" build tag specified. + DisablePointerMethods bool + + // DisablePointerAddresses specifies whether to disable the printing of + // pointer addresses. This is useful when diffing data structures in tests. + DisablePointerAddresses bool + + // DisableCapacities specifies whether to disable the printing of capacities + // for arrays, slices, maps and channels. This is useful when diffing + // data structures in tests. + DisableCapacities bool + + // ContinueOnMethod specifies whether or not recursion should continue once + // a custom error or Stringer interface is invoked. The default, false, + // means it will print the results of invoking the custom error or Stringer + // interface and return immediately instead of continuing to recurse into + // the internals of the data type. + // + // NOTE: This flag does not have any effect if method invocation is disabled + // via the DisableMethods or DisablePointerMethods options. + ContinueOnMethod bool + + // SortKeys specifies map keys should be sorted before being printed. Use + // this to have a more deterministic, diffable output. Note that only + // native types (bool, int, uint, floats, uintptr and string) and types + // that support the error or Stringer interfaces (if methods are + // enabled) are supported, with other types sorted according to the + // reflect.Value.String() output which guarantees display stability. + SortKeys bool + + // SpewKeys specifies that, as a last resort attempt, map keys should + // be spewed to strings and sorted by those strings. This is only + // considered if SortKeys is true. + SpewKeys bool +} + +// Config is the active configuration of the top-level functions. +// The configuration can be changed by modifying the contents of spew.Config. +var Config = ConfigState{Indent: " "} + +// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the formatted string as a value that satisfies error. See NewFormatter +// for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) { + return fmt.Errorf(format, c.convertArgs(a)...) +} + +// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprint(w, c.convertArgs(a)...) +} + +// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + return fmt.Fprintf(w, format, c.convertArgs(a)...) +} + +// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it +// passed with a Formatter interface returned by c.NewFormatter. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprintln(w, c.convertArgs(a)...) +} + +// Print is a wrapper for fmt.Print that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Print(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Print(a ...interface{}) (n int, err error) { + return fmt.Print(c.convertArgs(a)...) +} + +// Printf is a wrapper for fmt.Printf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) { + return fmt.Printf(format, c.convertArgs(a)...) +} + +// Println is a wrapper for fmt.Println that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Println(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Println(a ...interface{}) (n int, err error) { + return fmt.Println(c.convertArgs(a)...) +} + +// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprint(a ...interface{}) string { + return fmt.Sprint(c.convertArgs(a)...) +} + +// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprintf(format string, a ...interface{}) string { + return fmt.Sprintf(format, c.convertArgs(a)...) +} + +// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it +// were passed with a Formatter interface returned by c.NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprintln(a ...interface{}) string { + return fmt.Sprintln(c.convertArgs(a)...) +} + +/* +NewFormatter returns a custom formatter that satisfies the fmt.Formatter +interface. As a result, it integrates cleanly with standard fmt package +printing functions. The formatter is useful for inline printing of smaller data +types similar to the standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Typically this function shouldn't be called directly. It is much easier to make +use of the custom formatter by calling one of the convenience functions such as +c.Printf, c.Println, or c.Printf. +*/ +func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter { + return newFormatter(c, v) +} + +// Fdump formats and displays the passed arguments to io.Writer w. It formats +// exactly the same as Dump. +func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) { + fdump(c, w, a...) +} + +/* +Dump displays the passed parameters to standard out with newlines, customizable +indentation, and additional debug information such as complete types and all +pointer addresses used to indirect to the final value. It provides the +following features over the built-in printing facilities provided by the fmt +package: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output + +The configuration options are controlled by modifying the public members +of c. See ConfigState for options documentation. + +See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to +get the formatted result as a string. +*/ +func (c *ConfigState) Dump(a ...interface{}) { + fdump(c, os.Stdout, a...) +} + +// Sdump returns a string with the passed arguments formatted exactly the same +// as Dump. +func (c *ConfigState) Sdump(a ...interface{}) string { + var buf bytes.Buffer + fdump(c, &buf, a...) + return buf.String() +} + +// convertArgs accepts a slice of arguments and returns a slice of the same +// length with each argument converted to a spew Formatter interface using +// the ConfigState associated with s. +func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) { + formatters = make([]interface{}, len(args)) + for index, arg := range args { + formatters[index] = newFormatter(c, arg) + } + return formatters +} + +// NewDefaultConfig returns a ConfigState with the following default settings. +// +// Indent: " " +// MaxDepth: 0 +// DisableMethods: false +// DisablePointerMethods: false +// ContinueOnMethod: false +// SortKeys: false +func NewDefaultConfig() *ConfigState { + return &ConfigState{Indent: " "} +} diff --git a/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/davecgh/go-spew/spew/doc.go new file mode 100644 index 00000000..aacaac6f --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/doc.go @@ -0,0 +1,211 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* +Package spew implements a deep pretty printer for Go data structures to aid in +debugging. + +A quick overview of the additional features spew provides over the built-in +printing facilities for Go data types are as follows: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output (only when using + Dump style) + +There are two different approaches spew allows for dumping Go data structures: + + * Dump style which prints with newlines, customizable indentation, + and additional debug information such as types and all pointer addresses + used to indirect to the final value + * A custom Formatter interface that integrates cleanly with the standard fmt + package and replaces %v, %+v, %#v, and %#+v to provide inline printing + similar to the default %v while providing the additional functionality + outlined above and passing unsupported format verbs such as %x and %q + along to fmt + +Quick Start + +This section demonstrates how to quickly get started with spew. See the +sections below for further details on formatting and configuration options. + +To dump a variable with full newlines, indentation, type, and pointer +information use Dump, Fdump, or Sdump: + spew.Dump(myVar1, myVar2, ...) + spew.Fdump(someWriter, myVar1, myVar2, ...) + str := spew.Sdump(myVar1, myVar2, ...) + +Alternatively, if you would prefer to use format strings with a compacted inline +printing style, use the convenience wrappers Printf, Fprintf, etc with +%v (most compact), %+v (adds pointer addresses), %#v (adds types), or +%#+v (adds types and pointer addresses): + spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + +Configuration Options + +Configuration of spew is handled by fields in the ConfigState type. For +convenience, all of the top-level functions use a global state available +via the spew.Config global. + +It is also possible to create a ConfigState instance that provides methods +equivalent to the top-level functions. This allows concurrent configuration +options. See the ConfigState documentation for more details. + +The following configuration options are available: + * Indent + String to use for each indentation level for Dump functions. + It is a single space by default. A popular alternative is "\t". + + * MaxDepth + Maximum number of levels to descend into nested data structures. + There is no limit by default. + + * DisableMethods + Disables invocation of error and Stringer interface methods. + Method invocation is enabled by default. + + * DisablePointerMethods + Disables invocation of error and Stringer interface methods on types + which only accept pointer receivers from non-pointer variables. + Pointer method invocation is enabled by default. + + * DisablePointerAddresses + DisablePointerAddresses specifies whether to disable the printing of + pointer addresses. This is useful when diffing data structures in tests. + + * DisableCapacities + DisableCapacities specifies whether to disable the printing of + capacities for arrays, slices, maps and channels. This is useful when + diffing data structures in tests. + + * ContinueOnMethod + Enables recursion into types after invoking error and Stringer interface + methods. Recursion after method invocation is disabled by default. + + * SortKeys + Specifies map keys should be sorted before being printed. Use + this to have a more deterministic, diffable output. Note that + only native types (bool, int, uint, floats, uintptr and string) + and types which implement error or Stringer interfaces are + supported with other types sorted according to the + reflect.Value.String() output which guarantees display + stability. Natural map order is used by default. + + * SpewKeys + Specifies that, as a last resort attempt, map keys should be + spewed to strings and sorted by those strings. This is only + considered if SortKeys is true. + +Dump Usage + +Simply call spew.Dump with a list of variables you want to dump: + + spew.Dump(myVar1, myVar2, ...) + +You may also call spew.Fdump if you would prefer to output to an arbitrary +io.Writer. For example, to dump to standard error: + + spew.Fdump(os.Stderr, myVar1, myVar2, ...) + +A third option is to call spew.Sdump to get the formatted output as a string: + + str := spew.Sdump(myVar1, myVar2, ...) + +Sample Dump Output + +See the Dump example for details on the setup of the types and variables being +shown here. + + (main.Foo) { + unexportedField: (*main.Bar)(0xf84002e210)({ + flag: (main.Flag) flagTwo, + data: (uintptr) + }), + ExportedField: (map[interface {}]interface {}) (len=1) { + (string) (len=3) "one": (bool) true + } + } + +Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C +command as shown. + ([]uint8) (len=32 cap=32) { + 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | + 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| + 00000020 31 32 |12| + } + +Custom Formatter + +Spew provides a custom formatter that implements the fmt.Formatter interface +so that it integrates cleanly with standard fmt package printing functions. The +formatter is useful for inline printing of smaller data types similar to the +standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Custom Formatter Usage + +The simplest way to make use of the spew custom formatter is to call one of the +convenience functions such as spew.Printf, spew.Println, or spew.Printf. The +functions have syntax you are most likely already familiar with: + + spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + spew.Println(myVar, myVar2) + spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + +See the Index for the full list convenience functions. + +Sample Formatter Output + +Double pointer to a uint8: + %v: <**>5 + %+v: <**>(0xf8400420d0->0xf8400420c8)5 + %#v: (**uint8)5 + %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5 + +Pointer to circular struct with a uint8 field and a pointer to itself: + %v: <*>{1 <*>} + %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)} + %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)} + %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)} + +See the Printf example for details on the setup of variables being shown +here. + +Errors + +Since it is possible for custom Stringer/error interfaces to panic, spew +detects them and handles them internally by printing the panic information +inline with the output. Since spew is intended to provide deep pretty printing +capabilities on structures, it intentionally does not return any errors. +*/ +package spew diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go new file mode 100644 index 00000000..df1d582a --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/dump.go @@ -0,0 +1,509 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "encoding/hex" + "fmt" + "io" + "os" + "reflect" + "regexp" + "strconv" + "strings" +) + +var ( + // uint8Type is a reflect.Type representing a uint8. It is used to + // convert cgo types to uint8 slices for hexdumping. + uint8Type = reflect.TypeOf(uint8(0)) + + // cCharRE is a regular expression that matches a cgo char. + // It is used to detect character arrays to hexdump them. + cCharRE = regexp.MustCompile("^.*\\._Ctype_char$") + + // cUnsignedCharRE is a regular expression that matches a cgo unsigned + // char. It is used to detect unsigned character arrays to hexdump + // them. + cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$") + + // cUint8tCharRE is a regular expression that matches a cgo uint8_t. + // It is used to detect uint8_t arrays to hexdump them. + cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$") +) + +// dumpState contains information about the state of a dump operation. +type dumpState struct { + w io.Writer + depth int + pointers map[uintptr]int + ignoreNextType bool + ignoreNextIndent bool + cs *ConfigState +} + +// indent performs indentation according to the depth level and cs.Indent +// option. +func (d *dumpState) indent() { + if d.ignoreNextIndent { + d.ignoreNextIndent = false + return + } + d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth)) +} + +// unpackValue returns values inside of non-nil interfaces when possible. +// This is useful for data types like structs, arrays, slices, and maps which +// can contain varying types packed inside an interface. +func (d *dumpState) unpackValue(v reflect.Value) reflect.Value { + if v.Kind() == reflect.Interface && !v.IsNil() { + v = v.Elem() + } + return v +} + +// dumpPtr handles formatting of pointers by indirecting them as necessary. +func (d *dumpState) dumpPtr(v reflect.Value) { + // Remove pointers at or below the current depth from map used to detect + // circular refs. + for k, depth := range d.pointers { + if depth >= d.depth { + delete(d.pointers, k) + } + } + + // Keep list of all dereferenced pointers to show later. + pointerChain := make([]uintptr, 0) + + // Figure out how many levels of indirection there are by dereferencing + // pointers and unpacking interfaces down the chain while detecting circular + // references. + nilFound := false + cycleFound := false + indirects := 0 + ve := v + for ve.Kind() == reflect.Ptr { + if ve.IsNil() { + nilFound = true + break + } + indirects++ + addr := ve.Pointer() + pointerChain = append(pointerChain, addr) + if pd, ok := d.pointers[addr]; ok && pd < d.depth { + cycleFound = true + indirects-- + break + } + d.pointers[addr] = d.depth + + ve = ve.Elem() + if ve.Kind() == reflect.Interface { + if ve.IsNil() { + nilFound = true + break + } + ve = ve.Elem() + } + } + + // Display type information. + d.w.Write(openParenBytes) + d.w.Write(bytes.Repeat(asteriskBytes, indirects)) + d.w.Write([]byte(ve.Type().String())) + d.w.Write(closeParenBytes) + + // Display pointer information. + if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 { + d.w.Write(openParenBytes) + for i, addr := range pointerChain { + if i > 0 { + d.w.Write(pointerChainBytes) + } + printHexPtr(d.w, addr) + } + d.w.Write(closeParenBytes) + } + + // Display dereferenced value. + d.w.Write(openParenBytes) + switch { + case nilFound == true: + d.w.Write(nilAngleBytes) + + case cycleFound == true: + d.w.Write(circularBytes) + + default: + d.ignoreNextType = true + d.dump(ve) + } + d.w.Write(closeParenBytes) +} + +// dumpSlice handles formatting of arrays and slices. Byte (uint8 under +// reflection) arrays and slices are dumped in hexdump -C fashion. +func (d *dumpState) dumpSlice(v reflect.Value) { + // Determine whether this type should be hex dumped or not. Also, + // for types which should be hexdumped, try to use the underlying data + // first, then fall back to trying to convert them to a uint8 slice. + var buf []uint8 + doConvert := false + doHexDump := false + numEntries := v.Len() + if numEntries > 0 { + vt := v.Index(0).Type() + vts := vt.String() + switch { + // C types that need to be converted. + case cCharRE.MatchString(vts): + fallthrough + case cUnsignedCharRE.MatchString(vts): + fallthrough + case cUint8tCharRE.MatchString(vts): + doConvert = true + + // Try to use existing uint8 slices and fall back to converting + // and copying if that fails. + case vt.Kind() == reflect.Uint8: + // We need an addressable interface to convert the type + // to a byte slice. However, the reflect package won't + // give us an interface on certain things like + // unexported struct fields in order to enforce + // visibility rules. We use unsafe, when available, to + // bypass these restrictions since this package does not + // mutate the values. + vs := v + if !vs.CanInterface() || !vs.CanAddr() { + vs = unsafeReflectValue(vs) + } + if !UnsafeDisabled { + vs = vs.Slice(0, numEntries) + + // Use the existing uint8 slice if it can be + // type asserted. + iface := vs.Interface() + if slice, ok := iface.([]uint8); ok { + buf = slice + doHexDump = true + break + } + } + + // The underlying data needs to be converted if it can't + // be type asserted to a uint8 slice. + doConvert = true + } + + // Copy and convert the underlying type if needed. + if doConvert && vt.ConvertibleTo(uint8Type) { + // Convert and copy each element into a uint8 byte + // slice. + buf = make([]uint8, numEntries) + for i := 0; i < numEntries; i++ { + vv := v.Index(i) + buf[i] = uint8(vv.Convert(uint8Type).Uint()) + } + doHexDump = true + } + } + + // Hexdump the entire slice as needed. + if doHexDump { + indent := strings.Repeat(d.cs.Indent, d.depth) + str := indent + hex.Dump(buf) + str = strings.Replace(str, "\n", "\n"+indent, -1) + str = strings.TrimRight(str, d.cs.Indent) + d.w.Write([]byte(str)) + return + } + + // Recursively call dump for each item. + for i := 0; i < numEntries; i++ { + d.dump(d.unpackValue(v.Index(i))) + if i < (numEntries - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } +} + +// dump is the main workhorse for dumping a value. It uses the passed reflect +// value to figure out what kind of object we are dealing with and formats it +// appropriately. It is a recursive function, however circular data structures +// are detected and handled properly. +func (d *dumpState) dump(v reflect.Value) { + // Handle invalid reflect values immediately. + kind := v.Kind() + if kind == reflect.Invalid { + d.w.Write(invalidAngleBytes) + return + } + + // Handle pointers specially. + if kind == reflect.Ptr { + d.indent() + d.dumpPtr(v) + return + } + + // Print type information unless already handled elsewhere. + if !d.ignoreNextType { + d.indent() + d.w.Write(openParenBytes) + d.w.Write([]byte(v.Type().String())) + d.w.Write(closeParenBytes) + d.w.Write(spaceBytes) + } + d.ignoreNextType = false + + // Display length and capacity if the built-in len and cap functions + // work with the value's kind and the len/cap itself is non-zero. + valueLen, valueCap := 0, 0 + switch v.Kind() { + case reflect.Array, reflect.Slice, reflect.Chan: + valueLen, valueCap = v.Len(), v.Cap() + case reflect.Map, reflect.String: + valueLen = v.Len() + } + if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 { + d.w.Write(openParenBytes) + if valueLen != 0 { + d.w.Write(lenEqualsBytes) + printInt(d.w, int64(valueLen), 10) + } + if !d.cs.DisableCapacities && valueCap != 0 { + if valueLen != 0 { + d.w.Write(spaceBytes) + } + d.w.Write(capEqualsBytes) + printInt(d.w, int64(valueCap), 10) + } + d.w.Write(closeParenBytes) + d.w.Write(spaceBytes) + } + + // Call Stringer/error interfaces if they exist and the handle methods flag + // is enabled + if !d.cs.DisableMethods { + if (kind != reflect.Invalid) && (kind != reflect.Interface) { + if handled := handleMethods(d.cs, d.w, v); handled { + return + } + } + } + + switch kind { + case reflect.Invalid: + // Do nothing. We should never get here since invalid has already + // been handled above. + + case reflect.Bool: + printBool(d.w, v.Bool()) + + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + printInt(d.w, v.Int(), 10) + + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + printUint(d.w, v.Uint(), 10) + + case reflect.Float32: + printFloat(d.w, v.Float(), 32) + + case reflect.Float64: + printFloat(d.w, v.Float(), 64) + + case reflect.Complex64: + printComplex(d.w, v.Complex(), 32) + + case reflect.Complex128: + printComplex(d.w, v.Complex(), 64) + + case reflect.Slice: + if v.IsNil() { + d.w.Write(nilAngleBytes) + break + } + fallthrough + + case reflect.Array: + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + d.dumpSlice(v) + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.String: + d.w.Write([]byte(strconv.Quote(v.String()))) + + case reflect.Interface: + // The only time we should get here is for nil interfaces due to + // unpackValue calls. + if v.IsNil() { + d.w.Write(nilAngleBytes) + } + + case reflect.Ptr: + // Do nothing. We should never get here since pointers have already + // been handled above. + + case reflect.Map: + // nil maps should be indicated as different than empty maps + if v.IsNil() { + d.w.Write(nilAngleBytes) + break + } + + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + numEntries := v.Len() + keys := v.MapKeys() + if d.cs.SortKeys { + sortValues(keys, d.cs) + } + for i, key := range keys { + d.dump(d.unpackValue(key)) + d.w.Write(colonSpaceBytes) + d.ignoreNextIndent = true + d.dump(d.unpackValue(v.MapIndex(key))) + if i < (numEntries - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.Struct: + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + vt := v.Type() + numFields := v.NumField() + for i := 0; i < numFields; i++ { + d.indent() + vtf := vt.Field(i) + d.w.Write([]byte(vtf.Name)) + d.w.Write(colonSpaceBytes) + d.ignoreNextIndent = true + d.dump(d.unpackValue(v.Field(i))) + if i < (numFields - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.Uintptr: + printHexPtr(d.w, uintptr(v.Uint())) + + case reflect.UnsafePointer, reflect.Chan, reflect.Func: + printHexPtr(d.w, v.Pointer()) + + // There were not any other types at the time this code was written, but + // fall back to letting the default fmt package handle it in case any new + // types are added. + default: + if v.CanInterface() { + fmt.Fprintf(d.w, "%v", v.Interface()) + } else { + fmt.Fprintf(d.w, "%v", v.String()) + } + } +} + +// fdump is a helper function to consolidate the logic from the various public +// methods which take varying writers and config states. +func fdump(cs *ConfigState, w io.Writer, a ...interface{}) { + for _, arg := range a { + if arg == nil { + w.Write(interfaceBytes) + w.Write(spaceBytes) + w.Write(nilAngleBytes) + w.Write(newlineBytes) + continue + } + + d := dumpState{w: w, cs: cs} + d.pointers = make(map[uintptr]int) + d.dump(reflect.ValueOf(arg)) + d.w.Write(newlineBytes) + } +} + +// Fdump formats and displays the passed arguments to io.Writer w. It formats +// exactly the same as Dump. +func Fdump(w io.Writer, a ...interface{}) { + fdump(&Config, w, a...) +} + +// Sdump returns a string with the passed arguments formatted exactly the same +// as Dump. +func Sdump(a ...interface{}) string { + var buf bytes.Buffer + fdump(&Config, &buf, a...) + return buf.String() +} + +/* +Dump displays the passed parameters to standard out with newlines, customizable +indentation, and additional debug information such as complete types and all +pointer addresses used to indirect to the final value. It provides the +following features over the built-in printing facilities provided by the fmt +package: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output + +The configuration options are controlled by an exported package global, +spew.Config. See ConfigState for options documentation. + +See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to +get the formatted result as a string. +*/ +func Dump(a ...interface{}) { + fdump(&Config, os.Stdout, a...) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/dump_test.go b/vendor/github.com/davecgh/go-spew/spew/dump_test.go new file mode 100644 index 00000000..5aad9c7a --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/dump_test.go @@ -0,0 +1,1042 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* +Test Summary: +NOTE: For each test, a nil pointer, a single pointer and double pointer to the +base test element are also tested to ensure proper indirection across all types. + +- Max int8, int16, int32, int64, int +- Max uint8, uint16, uint32, uint64, uint +- Boolean true and false +- Standard complex64 and complex128 +- Array containing standard ints +- Array containing type with custom formatter on pointer receiver only +- Array containing interfaces +- Array containing bytes +- Slice containing standard float32 values +- Slice containing type with custom formatter on pointer receiver only +- Slice containing interfaces +- Slice containing bytes +- Nil slice +- Standard string +- Nil interface +- Sub-interface +- Map with string keys and int vals +- Map with custom formatter type on pointer receiver only keys and vals +- Map with interface keys and values +- Map with nil interface value +- Struct with primitives +- Struct that contains another struct +- Struct that contains custom type with Stringer pointer interface via both + exported and unexported fields +- Struct that contains embedded struct and field to same struct +- Uintptr to 0 (null pointer) +- Uintptr address of real variable +- Unsafe.Pointer to 0 (null pointer) +- Unsafe.Pointer to address of real variable +- Nil channel +- Standard int channel +- Function with no params and no returns +- Function with param and no returns +- Function with multiple params and multiple returns +- Struct that is circular through self referencing +- Structs that are circular through cross referencing +- Structs that are indirectly circular +- Type that panics in its Stringer interface +*/ + +package spew_test + +import ( + "bytes" + "fmt" + "testing" + "unsafe" + + "github.com/davecgh/go-spew/spew" +) + +// dumpTest is used to describe a test to be performed against the Dump method. +type dumpTest struct { + in interface{} + wants []string +} + +// dumpTests houses all of the tests to be performed against the Dump method. +var dumpTests = make([]dumpTest, 0) + +// addDumpTest is a helper method to append the passed input and desired result +// to dumpTests +func addDumpTest(in interface{}, wants ...string) { + test := dumpTest{in, wants} + dumpTests = append(dumpTests, test) +} + +func addIntDumpTests() { + // Max int8. + v := int8(127) + nv := (*int8)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "int8" + vs := "127" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") + + // Max int16. + v2 := int16(32767) + nv2 := (*int16)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "int16" + v2s := "32767" + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") + addDumpTest(nv2, "(*"+v2t+")()\n") + + // Max int32. + v3 := int32(2147483647) + nv3 := (*int32)(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "int32" + v3s := "2147483647" + addDumpTest(v3, "("+v3t+") "+v3s+"\n") + addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n") + addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n") + addDumpTest(nv3, "(*"+v3t+")()\n") + + // Max int64. + v4 := int64(9223372036854775807) + nv4 := (*int64)(nil) + pv4 := &v4 + v4Addr := fmt.Sprintf("%p", pv4) + pv4Addr := fmt.Sprintf("%p", &pv4) + v4t := "int64" + v4s := "9223372036854775807" + addDumpTest(v4, "("+v4t+") "+v4s+"\n") + addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n") + addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n") + addDumpTest(nv4, "(*"+v4t+")()\n") + + // Max int. + v5 := int(2147483647) + nv5 := (*int)(nil) + pv5 := &v5 + v5Addr := fmt.Sprintf("%p", pv5) + pv5Addr := fmt.Sprintf("%p", &pv5) + v5t := "int" + v5s := "2147483647" + addDumpTest(v5, "("+v5t+") "+v5s+"\n") + addDumpTest(pv5, "(*"+v5t+")("+v5Addr+")("+v5s+")\n") + addDumpTest(&pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")("+v5s+")\n") + addDumpTest(nv5, "(*"+v5t+")()\n") +} + +func addUintDumpTests() { + // Max uint8. + v := uint8(255) + nv := (*uint8)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "uint8" + vs := "255" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") + + // Max uint16. + v2 := uint16(65535) + nv2 := (*uint16)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "uint16" + v2s := "65535" + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") + addDumpTest(nv2, "(*"+v2t+")()\n") + + // Max uint32. + v3 := uint32(4294967295) + nv3 := (*uint32)(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "uint32" + v3s := "4294967295" + addDumpTest(v3, "("+v3t+") "+v3s+"\n") + addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n") + addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n") + addDumpTest(nv3, "(*"+v3t+")()\n") + + // Max uint64. + v4 := uint64(18446744073709551615) + nv4 := (*uint64)(nil) + pv4 := &v4 + v4Addr := fmt.Sprintf("%p", pv4) + pv4Addr := fmt.Sprintf("%p", &pv4) + v4t := "uint64" + v4s := "18446744073709551615" + addDumpTest(v4, "("+v4t+") "+v4s+"\n") + addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n") + addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n") + addDumpTest(nv4, "(*"+v4t+")()\n") + + // Max uint. + v5 := uint(4294967295) + nv5 := (*uint)(nil) + pv5 := &v5 + v5Addr := fmt.Sprintf("%p", pv5) + pv5Addr := fmt.Sprintf("%p", &pv5) + v5t := "uint" + v5s := "4294967295" + addDumpTest(v5, "("+v5t+") "+v5s+"\n") + addDumpTest(pv5, "(*"+v5t+")("+v5Addr+")("+v5s+")\n") + addDumpTest(&pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")("+v5s+")\n") + addDumpTest(nv5, "(*"+v5t+")()\n") +} + +func addBoolDumpTests() { + // Boolean true. + v := bool(true) + nv := (*bool)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "bool" + vs := "true" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") + + // Boolean false. + v2 := bool(false) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "bool" + v2s := "false" + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") +} + +func addFloatDumpTests() { + // Standard float32. + v := float32(3.1415) + nv := (*float32)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "float32" + vs := "3.1415" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") + + // Standard float64. + v2 := float64(3.1415926) + nv2 := (*float64)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "float64" + v2s := "3.1415926" + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") + addDumpTest(nv2, "(*"+v2t+")()\n") +} + +func addComplexDumpTests() { + // Standard complex64. + v := complex(float32(6), -2) + nv := (*complex64)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "complex64" + vs := "(6-2i)" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") + + // Standard complex128. + v2 := complex(float64(-6), 2) + nv2 := (*complex128)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "complex128" + v2s := "(-6+2i)" + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") + addDumpTest(nv2, "(*"+v2t+")()\n") +} + +func addArrayDumpTests() { + // Array containing standard ints. + v := [3]int{1, 2, 3} + vLen := fmt.Sprintf("%d", len(v)) + vCap := fmt.Sprintf("%d", cap(v)) + nv := (*[3]int)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "int" + vs := "(len=" + vLen + " cap=" + vCap + ") {\n (" + vt + ") 1,\n (" + + vt + ") 2,\n (" + vt + ") 3\n}" + addDumpTest(v, "([3]"+vt+") "+vs+"\n") + addDumpTest(pv, "(*[3]"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**[3]"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*[3]"+vt+")()\n") + + // Array containing type with custom formatter on pointer receiver only. + v2i0 := pstringer("1") + v2i1 := pstringer("2") + v2i2 := pstringer("3") + v2 := [3]pstringer{v2i0, v2i1, v2i2} + v2i0Len := fmt.Sprintf("%d", len(v2i0)) + v2i1Len := fmt.Sprintf("%d", len(v2i1)) + v2i2Len := fmt.Sprintf("%d", len(v2i2)) + v2Len := fmt.Sprintf("%d", len(v2)) + v2Cap := fmt.Sprintf("%d", cap(v2)) + nv2 := (*[3]pstringer)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "spew_test.pstringer" + v2sp := "(len=" + v2Len + " cap=" + v2Cap + ") {\n (" + v2t + + ") (len=" + v2i0Len + ") stringer 1,\n (" + v2t + + ") (len=" + v2i1Len + ") stringer 2,\n (" + v2t + + ") (len=" + v2i2Len + ") " + "stringer 3\n}" + v2s := v2sp + if spew.UnsafeDisabled { + v2s = "(len=" + v2Len + " cap=" + v2Cap + ") {\n (" + v2t + + ") (len=" + v2i0Len + ") \"1\",\n (" + v2t + ") (len=" + + v2i1Len + ") \"2\",\n (" + v2t + ") (len=" + v2i2Len + + ") " + "\"3\"\n}" + } + addDumpTest(v2, "([3]"+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*[3]"+v2t+")("+v2Addr+")("+v2sp+")\n") + addDumpTest(&pv2, "(**[3]"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2sp+")\n") + addDumpTest(nv2, "(*[3]"+v2t+")()\n") + + // Array containing interfaces. + v3i0 := "one" + v3 := [3]interface{}{v3i0, int(2), uint(3)} + v3i0Len := fmt.Sprintf("%d", len(v3i0)) + v3Len := fmt.Sprintf("%d", len(v3)) + v3Cap := fmt.Sprintf("%d", cap(v3)) + nv3 := (*[3]interface{})(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "[3]interface {}" + v3t2 := "string" + v3t3 := "int" + v3t4 := "uint" + v3s := "(len=" + v3Len + " cap=" + v3Cap + ") {\n (" + v3t2 + ") " + + "(len=" + v3i0Len + ") \"one\",\n (" + v3t3 + ") 2,\n (" + + v3t4 + ") 3\n}" + addDumpTest(v3, "("+v3t+") "+v3s+"\n") + addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n") + addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n") + addDumpTest(nv3, "(*"+v3t+")()\n") + + // Array containing bytes. + v4 := [34]byte{ + 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, + 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, + 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, + 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, + 0x31, 0x32, + } + v4Len := fmt.Sprintf("%d", len(v4)) + v4Cap := fmt.Sprintf("%d", cap(v4)) + nv4 := (*[34]byte)(nil) + pv4 := &v4 + v4Addr := fmt.Sprintf("%p", pv4) + pv4Addr := fmt.Sprintf("%p", &pv4) + v4t := "[34]uint8" + v4s := "(len=" + v4Len + " cap=" + v4Cap + ") " + + "{\n 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20" + + " |............... |\n" + + " 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30" + + " |!\"#$%&'()*+,-./0|\n" + + " 00000020 31 32 " + + " |12|\n}" + addDumpTest(v4, "("+v4t+") "+v4s+"\n") + addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n") + addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n") + addDumpTest(nv4, "(*"+v4t+")()\n") +} + +func addSliceDumpTests() { + // Slice containing standard float32 values. + v := []float32{3.14, 6.28, 12.56} + vLen := fmt.Sprintf("%d", len(v)) + vCap := fmt.Sprintf("%d", cap(v)) + nv := (*[]float32)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "float32" + vs := "(len=" + vLen + " cap=" + vCap + ") {\n (" + vt + ") 3.14,\n (" + + vt + ") 6.28,\n (" + vt + ") 12.56\n}" + addDumpTest(v, "([]"+vt+") "+vs+"\n") + addDumpTest(pv, "(*[]"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**[]"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*[]"+vt+")()\n") + + // Slice containing type with custom formatter on pointer receiver only. + v2i0 := pstringer("1") + v2i1 := pstringer("2") + v2i2 := pstringer("3") + v2 := []pstringer{v2i0, v2i1, v2i2} + v2i0Len := fmt.Sprintf("%d", len(v2i0)) + v2i1Len := fmt.Sprintf("%d", len(v2i1)) + v2i2Len := fmt.Sprintf("%d", len(v2i2)) + v2Len := fmt.Sprintf("%d", len(v2)) + v2Cap := fmt.Sprintf("%d", cap(v2)) + nv2 := (*[]pstringer)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "spew_test.pstringer" + v2s := "(len=" + v2Len + " cap=" + v2Cap + ") {\n (" + v2t + ") (len=" + + v2i0Len + ") stringer 1,\n (" + v2t + ") (len=" + v2i1Len + + ") stringer 2,\n (" + v2t + ") (len=" + v2i2Len + ") " + + "stringer 3\n}" + addDumpTest(v2, "([]"+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*[]"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**[]"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") + addDumpTest(nv2, "(*[]"+v2t+")()\n") + + // Slice containing interfaces. + v3i0 := "one" + v3 := []interface{}{v3i0, int(2), uint(3), nil} + v3i0Len := fmt.Sprintf("%d", len(v3i0)) + v3Len := fmt.Sprintf("%d", len(v3)) + v3Cap := fmt.Sprintf("%d", cap(v3)) + nv3 := (*[]interface{})(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "[]interface {}" + v3t2 := "string" + v3t3 := "int" + v3t4 := "uint" + v3t5 := "interface {}" + v3s := "(len=" + v3Len + " cap=" + v3Cap + ") {\n (" + v3t2 + ") " + + "(len=" + v3i0Len + ") \"one\",\n (" + v3t3 + ") 2,\n (" + + v3t4 + ") 3,\n (" + v3t5 + ") \n}" + addDumpTest(v3, "("+v3t+") "+v3s+"\n") + addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n") + addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n") + addDumpTest(nv3, "(*"+v3t+")()\n") + + // Slice containing bytes. + v4 := []byte{ + 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, + 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, + 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, + 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, + 0x31, 0x32, + } + v4Len := fmt.Sprintf("%d", len(v4)) + v4Cap := fmt.Sprintf("%d", cap(v4)) + nv4 := (*[]byte)(nil) + pv4 := &v4 + v4Addr := fmt.Sprintf("%p", pv4) + pv4Addr := fmt.Sprintf("%p", &pv4) + v4t := "[]uint8" + v4s := "(len=" + v4Len + " cap=" + v4Cap + ") " + + "{\n 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20" + + " |............... |\n" + + " 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30" + + " |!\"#$%&'()*+,-./0|\n" + + " 00000020 31 32 " + + " |12|\n}" + addDumpTest(v4, "("+v4t+") "+v4s+"\n") + addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n") + addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n") + addDumpTest(nv4, "(*"+v4t+")()\n") + + // Nil slice. + v5 := []int(nil) + nv5 := (*[]int)(nil) + pv5 := &v5 + v5Addr := fmt.Sprintf("%p", pv5) + pv5Addr := fmt.Sprintf("%p", &pv5) + v5t := "[]int" + v5s := "" + addDumpTest(v5, "("+v5t+") "+v5s+"\n") + addDumpTest(pv5, "(*"+v5t+")("+v5Addr+")("+v5s+")\n") + addDumpTest(&pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")("+v5s+")\n") + addDumpTest(nv5, "(*"+v5t+")()\n") +} + +func addStringDumpTests() { + // Standard string. + v := "test" + vLen := fmt.Sprintf("%d", len(v)) + nv := (*string)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "string" + vs := "(len=" + vLen + ") \"test\"" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") +} + +func addInterfaceDumpTests() { + // Nil interface. + var v interface{} + nv := (*interface{})(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "interface {}" + vs := "" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") + + // Sub-interface. + v2 := interface{}(uint16(65535)) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "uint16" + v2s := "65535" + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") +} + +func addMapDumpTests() { + // Map with string keys and int vals. + k := "one" + kk := "two" + m := map[string]int{k: 1, kk: 2} + klen := fmt.Sprintf("%d", len(k)) // not kLen to shut golint up + kkLen := fmt.Sprintf("%d", len(kk)) + mLen := fmt.Sprintf("%d", len(m)) + nilMap := map[string]int(nil) + nm := (*map[string]int)(nil) + pm := &m + mAddr := fmt.Sprintf("%p", pm) + pmAddr := fmt.Sprintf("%p", &pm) + mt := "map[string]int" + mt1 := "string" + mt2 := "int" + ms := "(len=" + mLen + ") {\n (" + mt1 + ") (len=" + klen + ") " + + "\"one\": (" + mt2 + ") 1,\n (" + mt1 + ") (len=" + kkLen + + ") \"two\": (" + mt2 + ") 2\n}" + ms2 := "(len=" + mLen + ") {\n (" + mt1 + ") (len=" + kkLen + ") " + + "\"two\": (" + mt2 + ") 2,\n (" + mt1 + ") (len=" + klen + + ") \"one\": (" + mt2 + ") 1\n}" + addDumpTest(m, "("+mt+") "+ms+"\n", "("+mt+") "+ms2+"\n") + addDumpTest(pm, "(*"+mt+")("+mAddr+")("+ms+")\n", + "(*"+mt+")("+mAddr+")("+ms2+")\n") + addDumpTest(&pm, "(**"+mt+")("+pmAddr+"->"+mAddr+")("+ms+")\n", + "(**"+mt+")("+pmAddr+"->"+mAddr+")("+ms2+")\n") + addDumpTest(nm, "(*"+mt+")()\n") + addDumpTest(nilMap, "("+mt+") \n") + + // Map with custom formatter type on pointer receiver only keys and vals. + k2 := pstringer("one") + v2 := pstringer("1") + m2 := map[pstringer]pstringer{k2: v2} + k2Len := fmt.Sprintf("%d", len(k2)) + v2Len := fmt.Sprintf("%d", len(v2)) + m2Len := fmt.Sprintf("%d", len(m2)) + nilMap2 := map[pstringer]pstringer(nil) + nm2 := (*map[pstringer]pstringer)(nil) + pm2 := &m2 + m2Addr := fmt.Sprintf("%p", pm2) + pm2Addr := fmt.Sprintf("%p", &pm2) + m2t := "map[spew_test.pstringer]spew_test.pstringer" + m2t1 := "spew_test.pstringer" + m2t2 := "spew_test.pstringer" + m2s := "(len=" + m2Len + ") {\n (" + m2t1 + ") (len=" + k2Len + ") " + + "stringer one: (" + m2t2 + ") (len=" + v2Len + ") stringer 1\n}" + if spew.UnsafeDisabled { + m2s = "(len=" + m2Len + ") {\n (" + m2t1 + ") (len=" + k2Len + + ") " + "\"one\": (" + m2t2 + ") (len=" + v2Len + + ") \"1\"\n}" + } + addDumpTest(m2, "("+m2t+") "+m2s+"\n") + addDumpTest(pm2, "(*"+m2t+")("+m2Addr+")("+m2s+")\n") + addDumpTest(&pm2, "(**"+m2t+")("+pm2Addr+"->"+m2Addr+")("+m2s+")\n") + addDumpTest(nm2, "(*"+m2t+")()\n") + addDumpTest(nilMap2, "("+m2t+") \n") + + // Map with interface keys and values. + k3 := "one" + k3Len := fmt.Sprintf("%d", len(k3)) + m3 := map[interface{}]interface{}{k3: 1} + m3Len := fmt.Sprintf("%d", len(m3)) + nilMap3 := map[interface{}]interface{}(nil) + nm3 := (*map[interface{}]interface{})(nil) + pm3 := &m3 + m3Addr := fmt.Sprintf("%p", pm3) + pm3Addr := fmt.Sprintf("%p", &pm3) + m3t := "map[interface {}]interface {}" + m3t1 := "string" + m3t2 := "int" + m3s := "(len=" + m3Len + ") {\n (" + m3t1 + ") (len=" + k3Len + ") " + + "\"one\": (" + m3t2 + ") 1\n}" + addDumpTest(m3, "("+m3t+") "+m3s+"\n") + addDumpTest(pm3, "(*"+m3t+")("+m3Addr+")("+m3s+")\n") + addDumpTest(&pm3, "(**"+m3t+")("+pm3Addr+"->"+m3Addr+")("+m3s+")\n") + addDumpTest(nm3, "(*"+m3t+")()\n") + addDumpTest(nilMap3, "("+m3t+") \n") + + // Map with nil interface value. + k4 := "nil" + k4Len := fmt.Sprintf("%d", len(k4)) + m4 := map[string]interface{}{k4: nil} + m4Len := fmt.Sprintf("%d", len(m4)) + nilMap4 := map[string]interface{}(nil) + nm4 := (*map[string]interface{})(nil) + pm4 := &m4 + m4Addr := fmt.Sprintf("%p", pm4) + pm4Addr := fmt.Sprintf("%p", &pm4) + m4t := "map[string]interface {}" + m4t1 := "string" + m4t2 := "interface {}" + m4s := "(len=" + m4Len + ") {\n (" + m4t1 + ") (len=" + k4Len + ")" + + " \"nil\": (" + m4t2 + ") \n}" + addDumpTest(m4, "("+m4t+") "+m4s+"\n") + addDumpTest(pm4, "(*"+m4t+")("+m4Addr+")("+m4s+")\n") + addDumpTest(&pm4, "(**"+m4t+")("+pm4Addr+"->"+m4Addr+")("+m4s+")\n") + addDumpTest(nm4, "(*"+m4t+")()\n") + addDumpTest(nilMap4, "("+m4t+") \n") +} + +func addStructDumpTests() { + // Struct with primitives. + type s1 struct { + a int8 + b uint8 + } + v := s1{127, 255} + nv := (*s1)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "spew_test.s1" + vt2 := "int8" + vt3 := "uint8" + vs := "{\n a: (" + vt2 + ") 127,\n b: (" + vt3 + ") 255\n}" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") + + // Struct that contains another struct. + type s2 struct { + s1 s1 + b bool + } + v2 := s2{s1{127, 255}, true} + nv2 := (*s2)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "spew_test.s2" + v2t2 := "spew_test.s1" + v2t3 := "int8" + v2t4 := "uint8" + v2t5 := "bool" + v2s := "{\n s1: (" + v2t2 + ") {\n a: (" + v2t3 + ") 127,\n b: (" + + v2t4 + ") 255\n },\n b: (" + v2t5 + ") true\n}" + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") + addDumpTest(nv2, "(*"+v2t+")()\n") + + // Struct that contains custom type with Stringer pointer interface via both + // exported and unexported fields. + type s3 struct { + s pstringer + S pstringer + } + v3 := s3{"test", "test2"} + nv3 := (*s3)(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "spew_test.s3" + v3t2 := "spew_test.pstringer" + v3s := "{\n s: (" + v3t2 + ") (len=4) stringer test,\n S: (" + v3t2 + + ") (len=5) stringer test2\n}" + v3sp := v3s + if spew.UnsafeDisabled { + v3s = "{\n s: (" + v3t2 + ") (len=4) \"test\",\n S: (" + + v3t2 + ") (len=5) \"test2\"\n}" + v3sp = "{\n s: (" + v3t2 + ") (len=4) \"test\",\n S: (" + + v3t2 + ") (len=5) stringer test2\n}" + } + addDumpTest(v3, "("+v3t+") "+v3s+"\n") + addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3sp+")\n") + addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3sp+")\n") + addDumpTest(nv3, "(*"+v3t+")()\n") + + // Struct that contains embedded struct and field to same struct. + e := embed{"embedstr"} + eLen := fmt.Sprintf("%d", len("embedstr")) + v4 := embedwrap{embed: &e, e: &e} + nv4 := (*embedwrap)(nil) + pv4 := &v4 + eAddr := fmt.Sprintf("%p", &e) + v4Addr := fmt.Sprintf("%p", pv4) + pv4Addr := fmt.Sprintf("%p", &pv4) + v4t := "spew_test.embedwrap" + v4t2 := "spew_test.embed" + v4t3 := "string" + v4s := "{\n embed: (*" + v4t2 + ")(" + eAddr + ")({\n a: (" + v4t3 + + ") (len=" + eLen + ") \"embedstr\"\n }),\n e: (*" + v4t2 + + ")(" + eAddr + ")({\n a: (" + v4t3 + ") (len=" + eLen + ")" + + " \"embedstr\"\n })\n}" + addDumpTest(v4, "("+v4t+") "+v4s+"\n") + addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n") + addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n") + addDumpTest(nv4, "(*"+v4t+")()\n") +} + +func addUintptrDumpTests() { + // Null pointer. + v := uintptr(0) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "uintptr" + vs := "" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + + // Address of real variable. + i := 1 + v2 := uintptr(unsafe.Pointer(&i)) + nv2 := (*uintptr)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "uintptr" + v2s := fmt.Sprintf("%p", &i) + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") + addDumpTest(nv2, "(*"+v2t+")()\n") +} + +func addUnsafePointerDumpTests() { + // Null pointer. + v := unsafe.Pointer(uintptr(0)) + nv := (*unsafe.Pointer)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "unsafe.Pointer" + vs := "" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") + + // Address of real variable. + i := 1 + v2 := unsafe.Pointer(&i) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "unsafe.Pointer" + v2s := fmt.Sprintf("%p", &i) + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") + addDumpTest(nv, "(*"+vt+")()\n") +} + +func addChanDumpTests() { + // Nil channel. + var v chan int + pv := &v + nv := (*chan int)(nil) + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "chan int" + vs := "" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") + + // Real channel. + v2 := make(chan int) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "chan int" + v2s := fmt.Sprintf("%p", v2) + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") +} + +func addFuncDumpTests() { + // Function with no params and no returns. + v := addIntDumpTests + nv := (*func())(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "func()" + vs := fmt.Sprintf("%p", v) + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") + + // Function with param and no returns. + v2 := TestDump + nv2 := (*func(*testing.T))(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "func(*testing.T)" + v2s := fmt.Sprintf("%p", v2) + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") + addDumpTest(nv2, "(*"+v2t+")()\n") + + // Function with multiple params and multiple returns. + var v3 = func(i int, s string) (b bool, err error) { + return true, nil + } + nv3 := (*func(int, string) (bool, error))(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "func(int, string) (bool, error)" + v3s := fmt.Sprintf("%p", v3) + addDumpTest(v3, "("+v3t+") "+v3s+"\n") + addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n") + addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n") + addDumpTest(nv3, "(*"+v3t+")()\n") +} + +func addCircularDumpTests() { + // Struct that is circular through self referencing. + type circular struct { + c *circular + } + v := circular{nil} + v.c = &v + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "spew_test.circular" + vs := "{\n c: (*" + vt + ")(" + vAddr + ")({\n c: (*" + vt + ")(" + + vAddr + ")()\n })\n}" + vs2 := "{\n c: (*" + vt + ")(" + vAddr + ")()\n}" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs2+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs2+")\n") + + // Structs that are circular through cross referencing. + v2 := xref1{nil} + ts2 := xref2{&v2} + v2.ps2 = &ts2 + pv2 := &v2 + ts2Addr := fmt.Sprintf("%p", &ts2) + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "spew_test.xref1" + v2t2 := "spew_test.xref2" + v2s := "{\n ps2: (*" + v2t2 + ")(" + ts2Addr + ")({\n ps1: (*" + v2t + + ")(" + v2Addr + ")({\n ps2: (*" + v2t2 + ")(" + ts2Addr + + ")()\n })\n })\n}" + v2s2 := "{\n ps2: (*" + v2t2 + ")(" + ts2Addr + ")({\n ps1: (*" + v2t + + ")(" + v2Addr + ")()\n })\n}" + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s2+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s2+")\n") + + // Structs that are indirectly circular. + v3 := indirCir1{nil} + tic2 := indirCir2{nil} + tic3 := indirCir3{&v3} + tic2.ps3 = &tic3 + v3.ps2 = &tic2 + pv3 := &v3 + tic2Addr := fmt.Sprintf("%p", &tic2) + tic3Addr := fmt.Sprintf("%p", &tic3) + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "spew_test.indirCir1" + v3t2 := "spew_test.indirCir2" + v3t3 := "spew_test.indirCir3" + v3s := "{\n ps2: (*" + v3t2 + ")(" + tic2Addr + ")({\n ps3: (*" + v3t3 + + ")(" + tic3Addr + ")({\n ps1: (*" + v3t + ")(" + v3Addr + + ")({\n ps2: (*" + v3t2 + ")(" + tic2Addr + + ")()\n })\n })\n })\n}" + v3s2 := "{\n ps2: (*" + v3t2 + ")(" + tic2Addr + ")({\n ps3: (*" + v3t3 + + ")(" + tic3Addr + ")({\n ps1: (*" + v3t + ")(" + v3Addr + + ")()\n })\n })\n}" + addDumpTest(v3, "("+v3t+") "+v3s+"\n") + addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s2+")\n") + addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s2+")\n") +} + +func addPanicDumpTests() { + // Type that panics in its Stringer interface. + v := panicer(127) + nv := (*panicer)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "spew_test.panicer" + vs := "(PANIC=test panic)127" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") +} + +func addErrorDumpTests() { + // Type that has a custom Error interface. + v := customError(127) + nv := (*customError)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "spew_test.customError" + vs := "error: 127" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") +} + +// TestDump executes all of the tests described by dumpTests. +func TestDump(t *testing.T) { + // Setup tests. + addIntDumpTests() + addUintDumpTests() + addBoolDumpTests() + addFloatDumpTests() + addComplexDumpTests() + addArrayDumpTests() + addSliceDumpTests() + addStringDumpTests() + addInterfaceDumpTests() + addMapDumpTests() + addStructDumpTests() + addUintptrDumpTests() + addUnsafePointerDumpTests() + addChanDumpTests() + addFuncDumpTests() + addCircularDumpTests() + addPanicDumpTests() + addErrorDumpTests() + addCgoDumpTests() + + t.Logf("Running %d tests", len(dumpTests)) + for i, test := range dumpTests { + buf := new(bytes.Buffer) + spew.Fdump(buf, test.in) + s := buf.String() + if testFailed(s, test.wants) { + t.Errorf("Dump #%d\n got: %s %s", i, s, stringizeWants(test.wants)) + continue + } + } +} + +func TestDumpSortedKeys(t *testing.T) { + cfg := spew.ConfigState{SortKeys: true} + s := cfg.Sdump(map[int]string{1: "1", 3: "3", 2: "2"}) + expected := "(map[int]string) (len=3) {\n(int) 1: (string) (len=1) " + + "\"1\",\n(int) 2: (string) (len=1) \"2\",\n(int) 3: (string) " + + "(len=1) \"3\"\n" + + "}\n" + if s != expected { + t.Errorf("Sorted keys mismatch:\n %v %v", s, expected) + } + + s = cfg.Sdump(map[stringer]int{"1": 1, "3": 3, "2": 2}) + expected = "(map[spew_test.stringer]int) (len=3) {\n" + + "(spew_test.stringer) (len=1) stringer 1: (int) 1,\n" + + "(spew_test.stringer) (len=1) stringer 2: (int) 2,\n" + + "(spew_test.stringer) (len=1) stringer 3: (int) 3\n" + + "}\n" + if s != expected { + t.Errorf("Sorted keys mismatch:\n %v %v", s, expected) + } + + s = cfg.Sdump(map[pstringer]int{pstringer("1"): 1, pstringer("3"): 3, pstringer("2"): 2}) + expected = "(map[spew_test.pstringer]int) (len=3) {\n" + + "(spew_test.pstringer) (len=1) stringer 1: (int) 1,\n" + + "(spew_test.pstringer) (len=1) stringer 2: (int) 2,\n" + + "(spew_test.pstringer) (len=1) stringer 3: (int) 3\n" + + "}\n" + if spew.UnsafeDisabled { + expected = "(map[spew_test.pstringer]int) (len=3) {\n" + + "(spew_test.pstringer) (len=1) \"1\": (int) 1,\n" + + "(spew_test.pstringer) (len=1) \"2\": (int) 2,\n" + + "(spew_test.pstringer) (len=1) \"3\": (int) 3\n" + + "}\n" + } + if s != expected { + t.Errorf("Sorted keys mismatch:\n %v %v", s, expected) + } + + s = cfg.Sdump(map[customError]int{customError(1): 1, customError(3): 3, customError(2): 2}) + expected = "(map[spew_test.customError]int) (len=3) {\n" + + "(spew_test.customError) error: 1: (int) 1,\n" + + "(spew_test.customError) error: 2: (int) 2,\n" + + "(spew_test.customError) error: 3: (int) 3\n" + + "}\n" + if s != expected { + t.Errorf("Sorted keys mismatch:\n %v %v", s, expected) + } + +} diff --git a/vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go b/vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go new file mode 100644 index 00000000..6ab18080 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go @@ -0,0 +1,99 @@ +// Copyright (c) 2013-2016 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when both cgo is supported and "-tags testcgo" is added to the go test +// command line. This means the cgo tests are only added (and hence run) when +// specifially requested. This configuration is used because spew itself +// does not require cgo to run even though it does handle certain cgo types +// specially. Rather than forcing all clients to require cgo and an external +// C compiler just to run the tests, this scheme makes them optional. +// +build cgo,testcgo + +package spew_test + +import ( + "fmt" + + "github.com/davecgh/go-spew/spew/testdata" +) + +func addCgoDumpTests() { + // C char pointer. + v := testdata.GetCgoCharPointer() + nv := testdata.GetCgoNullCharPointer() + pv := &v + vcAddr := fmt.Sprintf("%p", v) + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "*testdata._Ctype_char" + vs := "116" + addDumpTest(v, "("+vt+")("+vcAddr+")("+vs+")\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+"->"+vcAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+"->"+vcAddr+")("+vs+")\n") + addDumpTest(nv, "("+vt+")()\n") + + // C char array. + v2, v2l, v2c := testdata.GetCgoCharArray() + v2Len := fmt.Sprintf("%d", v2l) + v2Cap := fmt.Sprintf("%d", v2c) + v2t := "[6]testdata._Ctype_char" + v2s := "(len=" + v2Len + " cap=" + v2Cap + ") " + + "{\n 00000000 74 65 73 74 32 00 " + + " |test2.|\n}" + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + + // C unsigned char array. + v3, v3l, v3c := testdata.GetCgoUnsignedCharArray() + v3Len := fmt.Sprintf("%d", v3l) + v3Cap := fmt.Sprintf("%d", v3c) + v3t := "[6]testdata._Ctype_unsignedchar" + v3t2 := "[6]testdata._Ctype_uchar" + v3s := "(len=" + v3Len + " cap=" + v3Cap + ") " + + "{\n 00000000 74 65 73 74 33 00 " + + " |test3.|\n}" + addDumpTest(v3, "("+v3t+") "+v3s+"\n", "("+v3t2+") "+v3s+"\n") + + // C signed char array. + v4, v4l, v4c := testdata.GetCgoSignedCharArray() + v4Len := fmt.Sprintf("%d", v4l) + v4Cap := fmt.Sprintf("%d", v4c) + v4t := "[6]testdata._Ctype_schar" + v4t2 := "testdata._Ctype_schar" + v4s := "(len=" + v4Len + " cap=" + v4Cap + ") " + + "{\n (" + v4t2 + ") 116,\n (" + v4t2 + ") 101,\n (" + v4t2 + + ") 115,\n (" + v4t2 + ") 116,\n (" + v4t2 + ") 52,\n (" + v4t2 + + ") 0\n}" + addDumpTest(v4, "("+v4t+") "+v4s+"\n") + + // C uint8_t array. + v5, v5l, v5c := testdata.GetCgoUint8tArray() + v5Len := fmt.Sprintf("%d", v5l) + v5Cap := fmt.Sprintf("%d", v5c) + v5t := "[6]testdata._Ctype_uint8_t" + v5s := "(len=" + v5Len + " cap=" + v5Cap + ") " + + "{\n 00000000 74 65 73 74 35 00 " + + " |test5.|\n}" + addDumpTest(v5, "("+v5t+") "+v5s+"\n") + + // C typedefed unsigned char array. + v6, v6l, v6c := testdata.GetCgoTypdefedUnsignedCharArray() + v6Len := fmt.Sprintf("%d", v6l) + v6Cap := fmt.Sprintf("%d", v6c) + v6t := "[6]testdata._Ctype_custom_uchar_t" + v6s := "(len=" + v6Len + " cap=" + v6Cap + ") " + + "{\n 00000000 74 65 73 74 36 00 " + + " |test6.|\n}" + addDumpTest(v6, "("+v6t+") "+v6s+"\n") +} diff --git a/vendor/github.com/davecgh/go-spew/spew/dumpnocgo_test.go b/vendor/github.com/davecgh/go-spew/spew/dumpnocgo_test.go new file mode 100644 index 00000000..52a0971f --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/dumpnocgo_test.go @@ -0,0 +1,26 @@ +// Copyright (c) 2013 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when either cgo is not supported or "-tags testcgo" is not added to the go +// test command line. This file intentionally does not setup any cgo tests in +// this scenario. +// +build !cgo !testcgo + +package spew_test + +func addCgoDumpTests() { + // Don't add any tests for cgo since this file is only compiled when + // there should not be any cgo tests. +} diff --git a/vendor/github.com/davecgh/go-spew/spew/example_test.go b/vendor/github.com/davecgh/go-spew/spew/example_test.go new file mode 100644 index 00000000..c6ec8c6d --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/example_test.go @@ -0,0 +1,226 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew_test + +import ( + "fmt" + + "github.com/davecgh/go-spew/spew" +) + +type Flag int + +const ( + flagOne Flag = iota + flagTwo +) + +var flagStrings = map[Flag]string{ + flagOne: "flagOne", + flagTwo: "flagTwo", +} + +func (f Flag) String() string { + if s, ok := flagStrings[f]; ok { + return s + } + return fmt.Sprintf("Unknown flag (%d)", int(f)) +} + +type Bar struct { + data uintptr +} + +type Foo struct { + unexportedField Bar + ExportedField map[interface{}]interface{} +} + +// This example demonstrates how to use Dump to dump variables to stdout. +func ExampleDump() { + // The following package level declarations are assumed for this example: + /* + type Flag int + + const ( + flagOne Flag = iota + flagTwo + ) + + var flagStrings = map[Flag]string{ + flagOne: "flagOne", + flagTwo: "flagTwo", + } + + func (f Flag) String() string { + if s, ok := flagStrings[f]; ok { + return s + } + return fmt.Sprintf("Unknown flag (%d)", int(f)) + } + + type Bar struct { + data uintptr + } + + type Foo struct { + unexportedField Bar + ExportedField map[interface{}]interface{} + } + */ + + // Setup some sample data structures for the example. + bar := Bar{uintptr(0)} + s1 := Foo{bar, map[interface{}]interface{}{"one": true}} + f := Flag(5) + b := []byte{ + 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, + 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, + 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, + 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, + 0x31, 0x32, + } + + // Dump! + spew.Dump(s1, f, b) + + // Output: + // (spew_test.Foo) { + // unexportedField: (spew_test.Bar) { + // data: (uintptr) + // }, + // ExportedField: (map[interface {}]interface {}) (len=1) { + // (string) (len=3) "one": (bool) true + // } + // } + // (spew_test.Flag) Unknown flag (5) + // ([]uint8) (len=34 cap=34) { + // 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | + // 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| + // 00000020 31 32 |12| + // } + // +} + +// This example demonstrates how to use Printf to display a variable with a +// format string and inline formatting. +func ExamplePrintf() { + // Create a double pointer to a uint 8. + ui8 := uint8(5) + pui8 := &ui8 + ppui8 := &pui8 + + // Create a circular data type. + type circular struct { + ui8 uint8 + c *circular + } + c := circular{ui8: 1} + c.c = &c + + // Print! + spew.Printf("ppui8: %v\n", ppui8) + spew.Printf("circular: %v\n", c) + + // Output: + // ppui8: <**>5 + // circular: {1 <*>{1 <*>}} +} + +// This example demonstrates how to use a ConfigState. +func ExampleConfigState() { + // Modify the indent level of the ConfigState only. The global + // configuration is not modified. + scs := spew.ConfigState{Indent: "\t"} + + // Output using the ConfigState instance. + v := map[string]int{"one": 1} + scs.Printf("v: %v\n", v) + scs.Dump(v) + + // Output: + // v: map[one:1] + // (map[string]int) (len=1) { + // (string) (len=3) "one": (int) 1 + // } +} + +// This example demonstrates how to use ConfigState.Dump to dump variables to +// stdout +func ExampleConfigState_Dump() { + // See the top-level Dump example for details on the types used in this + // example. + + // Create two ConfigState instances with different indentation. + scs := spew.ConfigState{Indent: "\t"} + scs2 := spew.ConfigState{Indent: " "} + + // Setup some sample data structures for the example. + bar := Bar{uintptr(0)} + s1 := Foo{bar, map[interface{}]interface{}{"one": true}} + + // Dump using the ConfigState instances. + scs.Dump(s1) + scs2.Dump(s1) + + // Output: + // (spew_test.Foo) { + // unexportedField: (spew_test.Bar) { + // data: (uintptr) + // }, + // ExportedField: (map[interface {}]interface {}) (len=1) { + // (string) (len=3) "one": (bool) true + // } + // } + // (spew_test.Foo) { + // unexportedField: (spew_test.Bar) { + // data: (uintptr) + // }, + // ExportedField: (map[interface {}]interface {}) (len=1) { + // (string) (len=3) "one": (bool) true + // } + // } + // +} + +// This example demonstrates how to use ConfigState.Printf to display a variable +// with a format string and inline formatting. +func ExampleConfigState_Printf() { + // See the top-level Dump example for details on the types used in this + // example. + + // Create two ConfigState instances and modify the method handling of the + // first ConfigState only. + scs := spew.NewDefaultConfig() + scs2 := spew.NewDefaultConfig() + scs.DisableMethods = true + + // Alternatively + // scs := spew.ConfigState{Indent: " ", DisableMethods: true} + // scs2 := spew.ConfigState{Indent: " "} + + // This is of type Flag which implements a Stringer and has raw value 1. + f := flagTwo + + // Dump using the ConfigState instances. + scs.Printf("f: %v\n", f) + scs2.Printf("f: %v\n", f) + + // Output: + // f: 1 + // f: flagTwo +} diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go new file mode 100644 index 00000000..c49875ba --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/format.go @@ -0,0 +1,419 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "reflect" + "strconv" + "strings" +) + +// supportedFlags is a list of all the character flags supported by fmt package. +const supportedFlags = "0-+# " + +// formatState implements the fmt.Formatter interface and contains information +// about the state of a formatting operation. The NewFormatter function can +// be used to get a new Formatter which can be used directly as arguments +// in standard fmt package printing calls. +type formatState struct { + value interface{} + fs fmt.State + depth int + pointers map[uintptr]int + ignoreNextType bool + cs *ConfigState +} + +// buildDefaultFormat recreates the original format string without precision +// and width information to pass in to fmt.Sprintf in the case of an +// unrecognized type. Unless new types are added to the language, this +// function won't ever be called. +func (f *formatState) buildDefaultFormat() (format string) { + buf := bytes.NewBuffer(percentBytes) + + for _, flag := range supportedFlags { + if f.fs.Flag(int(flag)) { + buf.WriteRune(flag) + } + } + + buf.WriteRune('v') + + format = buf.String() + return format +} + +// constructOrigFormat recreates the original format string including precision +// and width information to pass along to the standard fmt package. This allows +// automatic deferral of all format strings this package doesn't support. +func (f *formatState) constructOrigFormat(verb rune) (format string) { + buf := bytes.NewBuffer(percentBytes) + + for _, flag := range supportedFlags { + if f.fs.Flag(int(flag)) { + buf.WriteRune(flag) + } + } + + if width, ok := f.fs.Width(); ok { + buf.WriteString(strconv.Itoa(width)) + } + + if precision, ok := f.fs.Precision(); ok { + buf.Write(precisionBytes) + buf.WriteString(strconv.Itoa(precision)) + } + + buf.WriteRune(verb) + + format = buf.String() + return format +} + +// unpackValue returns values inside of non-nil interfaces when possible and +// ensures that types for values which have been unpacked from an interface +// are displayed when the show types flag is also set. +// This is useful for data types like structs, arrays, slices, and maps which +// can contain varying types packed inside an interface. +func (f *formatState) unpackValue(v reflect.Value) reflect.Value { + if v.Kind() == reflect.Interface { + f.ignoreNextType = false + if !v.IsNil() { + v = v.Elem() + } + } + return v +} + +// formatPtr handles formatting of pointers by indirecting them as necessary. +func (f *formatState) formatPtr(v reflect.Value) { + // Display nil if top level pointer is nil. + showTypes := f.fs.Flag('#') + if v.IsNil() && (!showTypes || f.ignoreNextType) { + f.fs.Write(nilAngleBytes) + return + } + + // Remove pointers at or below the current depth from map used to detect + // circular refs. + for k, depth := range f.pointers { + if depth >= f.depth { + delete(f.pointers, k) + } + } + + // Keep list of all dereferenced pointers to possibly show later. + pointerChain := make([]uintptr, 0) + + // Figure out how many levels of indirection there are by derferencing + // pointers and unpacking interfaces down the chain while detecting circular + // references. + nilFound := false + cycleFound := false + indirects := 0 + ve := v + for ve.Kind() == reflect.Ptr { + if ve.IsNil() { + nilFound = true + break + } + indirects++ + addr := ve.Pointer() + pointerChain = append(pointerChain, addr) + if pd, ok := f.pointers[addr]; ok && pd < f.depth { + cycleFound = true + indirects-- + break + } + f.pointers[addr] = f.depth + + ve = ve.Elem() + if ve.Kind() == reflect.Interface { + if ve.IsNil() { + nilFound = true + break + } + ve = ve.Elem() + } + } + + // Display type or indirection level depending on flags. + if showTypes && !f.ignoreNextType { + f.fs.Write(openParenBytes) + f.fs.Write(bytes.Repeat(asteriskBytes, indirects)) + f.fs.Write([]byte(ve.Type().String())) + f.fs.Write(closeParenBytes) + } else { + if nilFound || cycleFound { + indirects += strings.Count(ve.Type().String(), "*") + } + f.fs.Write(openAngleBytes) + f.fs.Write([]byte(strings.Repeat("*", indirects))) + f.fs.Write(closeAngleBytes) + } + + // Display pointer information depending on flags. + if f.fs.Flag('+') && (len(pointerChain) > 0) { + f.fs.Write(openParenBytes) + for i, addr := range pointerChain { + if i > 0 { + f.fs.Write(pointerChainBytes) + } + printHexPtr(f.fs, addr) + } + f.fs.Write(closeParenBytes) + } + + // Display dereferenced value. + switch { + case nilFound == true: + f.fs.Write(nilAngleBytes) + + case cycleFound == true: + f.fs.Write(circularShortBytes) + + default: + f.ignoreNextType = true + f.format(ve) + } +} + +// format is the main workhorse for providing the Formatter interface. It +// uses the passed reflect value to figure out what kind of object we are +// dealing with and formats it appropriately. It is a recursive function, +// however circular data structures are detected and handled properly. +func (f *formatState) format(v reflect.Value) { + // Handle invalid reflect values immediately. + kind := v.Kind() + if kind == reflect.Invalid { + f.fs.Write(invalidAngleBytes) + return + } + + // Handle pointers specially. + if kind == reflect.Ptr { + f.formatPtr(v) + return + } + + // Print type information unless already handled elsewhere. + if !f.ignoreNextType && f.fs.Flag('#') { + f.fs.Write(openParenBytes) + f.fs.Write([]byte(v.Type().String())) + f.fs.Write(closeParenBytes) + } + f.ignoreNextType = false + + // Call Stringer/error interfaces if they exist and the handle methods + // flag is enabled. + if !f.cs.DisableMethods { + if (kind != reflect.Invalid) && (kind != reflect.Interface) { + if handled := handleMethods(f.cs, f.fs, v); handled { + return + } + } + } + + switch kind { + case reflect.Invalid: + // Do nothing. We should never get here since invalid has already + // been handled above. + + case reflect.Bool: + printBool(f.fs, v.Bool()) + + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + printInt(f.fs, v.Int(), 10) + + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + printUint(f.fs, v.Uint(), 10) + + case reflect.Float32: + printFloat(f.fs, v.Float(), 32) + + case reflect.Float64: + printFloat(f.fs, v.Float(), 64) + + case reflect.Complex64: + printComplex(f.fs, v.Complex(), 32) + + case reflect.Complex128: + printComplex(f.fs, v.Complex(), 64) + + case reflect.Slice: + if v.IsNil() { + f.fs.Write(nilAngleBytes) + break + } + fallthrough + + case reflect.Array: + f.fs.Write(openBracketBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + numEntries := v.Len() + for i := 0; i < numEntries; i++ { + if i > 0 { + f.fs.Write(spaceBytes) + } + f.ignoreNextType = true + f.format(f.unpackValue(v.Index(i))) + } + } + f.depth-- + f.fs.Write(closeBracketBytes) + + case reflect.String: + f.fs.Write([]byte(v.String())) + + case reflect.Interface: + // The only time we should get here is for nil interfaces due to + // unpackValue calls. + if v.IsNil() { + f.fs.Write(nilAngleBytes) + } + + case reflect.Ptr: + // Do nothing. We should never get here since pointers have already + // been handled above. + + case reflect.Map: + // nil maps should be indicated as different than empty maps + if v.IsNil() { + f.fs.Write(nilAngleBytes) + break + } + + f.fs.Write(openMapBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + keys := v.MapKeys() + if f.cs.SortKeys { + sortValues(keys, f.cs) + } + for i, key := range keys { + if i > 0 { + f.fs.Write(spaceBytes) + } + f.ignoreNextType = true + f.format(f.unpackValue(key)) + f.fs.Write(colonBytes) + f.ignoreNextType = true + f.format(f.unpackValue(v.MapIndex(key))) + } + } + f.depth-- + f.fs.Write(closeMapBytes) + + case reflect.Struct: + numFields := v.NumField() + f.fs.Write(openBraceBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + vt := v.Type() + for i := 0; i < numFields; i++ { + if i > 0 { + f.fs.Write(spaceBytes) + } + vtf := vt.Field(i) + if f.fs.Flag('+') || f.fs.Flag('#') { + f.fs.Write([]byte(vtf.Name)) + f.fs.Write(colonBytes) + } + f.format(f.unpackValue(v.Field(i))) + } + } + f.depth-- + f.fs.Write(closeBraceBytes) + + case reflect.Uintptr: + printHexPtr(f.fs, uintptr(v.Uint())) + + case reflect.UnsafePointer, reflect.Chan, reflect.Func: + printHexPtr(f.fs, v.Pointer()) + + // There were not any other types at the time this code was written, but + // fall back to letting the default fmt package handle it if any get added. + default: + format := f.buildDefaultFormat() + if v.CanInterface() { + fmt.Fprintf(f.fs, format, v.Interface()) + } else { + fmt.Fprintf(f.fs, format, v.String()) + } + } +} + +// Format satisfies the fmt.Formatter interface. See NewFormatter for usage +// details. +func (f *formatState) Format(fs fmt.State, verb rune) { + f.fs = fs + + // Use standard formatting for verbs that are not v. + if verb != 'v' { + format := f.constructOrigFormat(verb) + fmt.Fprintf(fs, format, f.value) + return + } + + if f.value == nil { + if fs.Flag('#') { + fs.Write(interfaceBytes) + } + fs.Write(nilAngleBytes) + return + } + + f.format(reflect.ValueOf(f.value)) +} + +// newFormatter is a helper function to consolidate the logic from the various +// public methods which take varying config states. +func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter { + fs := &formatState{value: v, cs: cs} + fs.pointers = make(map[uintptr]int) + return fs +} + +/* +NewFormatter returns a custom formatter that satisfies the fmt.Formatter +interface. As a result, it integrates cleanly with standard fmt package +printing functions. The formatter is useful for inline printing of smaller data +types similar to the standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Typically this function shouldn't be called directly. It is much easier to make +use of the custom formatter by calling one of the convenience functions such as +Printf, Println, or Fprintf. +*/ +func NewFormatter(v interface{}) fmt.Formatter { + return newFormatter(&Config, v) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/format_test.go b/vendor/github.com/davecgh/go-spew/spew/format_test.go new file mode 100644 index 00000000..f9b93abe --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/format_test.go @@ -0,0 +1,1558 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* +Test Summary: +NOTE: For each test, a nil pointer, a single pointer and double pointer to the +base test element are also tested to ensure proper indirection across all types. + +- Max int8, int16, int32, int64, int +- Max uint8, uint16, uint32, uint64, uint +- Boolean true and false +- Standard complex64 and complex128 +- Array containing standard ints +- Array containing type with custom formatter on pointer receiver only +- Array containing interfaces +- Slice containing standard float32 values +- Slice containing type with custom formatter on pointer receiver only +- Slice containing interfaces +- Nil slice +- Standard string +- Nil interface +- Sub-interface +- Map with string keys and int vals +- Map with custom formatter type on pointer receiver only keys and vals +- Map with interface keys and values +- Map with nil interface value +- Struct with primitives +- Struct that contains another struct +- Struct that contains custom type with Stringer pointer interface via both + exported and unexported fields +- Struct that contains embedded struct and field to same struct +- Uintptr to 0 (null pointer) +- Uintptr address of real variable +- Unsafe.Pointer to 0 (null pointer) +- Unsafe.Pointer to address of real variable +- Nil channel +- Standard int channel +- Function with no params and no returns +- Function with param and no returns +- Function with multiple params and multiple returns +- Struct that is circular through self referencing +- Structs that are circular through cross referencing +- Structs that are indirectly circular +- Type that panics in its Stringer interface +- Type that has a custom Error interface +- %x passthrough with uint +- %#x passthrough with uint +- %f passthrough with precision +- %f passthrough with width and precision +- %d passthrough with width +- %q passthrough with string +*/ + +package spew_test + +import ( + "bytes" + "fmt" + "testing" + "unsafe" + + "github.com/davecgh/go-spew/spew" +) + +// formatterTest is used to describe a test to be performed against NewFormatter. +type formatterTest struct { + format string + in interface{} + wants []string +} + +// formatterTests houses all of the tests to be performed against NewFormatter. +var formatterTests = make([]formatterTest, 0) + +// addFormatterTest is a helper method to append the passed input and desired +// result to formatterTests. +func addFormatterTest(format string, in interface{}, wants ...string) { + test := formatterTest{format, in, wants} + formatterTests = append(formatterTests, test) +} + +func addIntFormatterTests() { + // Max int8. + v := int8(127) + nv := (*int8)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "int8" + vs := "127" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Max int16. + v2 := int16(32767) + nv2 := (*int16)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "int16" + v2s := "32767" + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%v", nv2, "") + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") + + // Max int32. + v3 := int32(2147483647) + nv3 := (*int32)(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "int32" + v3s := "2147483647" + addFormatterTest("%v", v3, v3s) + addFormatterTest("%v", pv3, "<*>"+v3s) + addFormatterTest("%v", &pv3, "<**>"+v3s) + addFormatterTest("%v", nv3, "") + addFormatterTest("%+v", v3, v3s) + addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s) + addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%#v", v3, "("+v3t+")"+v3s) + addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s) + addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s) + addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") + addFormatterTest("%#+v", v3, "("+v3t+")"+v3s) + addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s) + addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s) + addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") + + // Max int64. + v4 := int64(9223372036854775807) + nv4 := (*int64)(nil) + pv4 := &v4 + v4Addr := fmt.Sprintf("%p", pv4) + pv4Addr := fmt.Sprintf("%p", &pv4) + v4t := "int64" + v4s := "9223372036854775807" + addFormatterTest("%v", v4, v4s) + addFormatterTest("%v", pv4, "<*>"+v4s) + addFormatterTest("%v", &pv4, "<**>"+v4s) + addFormatterTest("%v", nv4, "") + addFormatterTest("%+v", v4, v4s) + addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s) + addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s) + addFormatterTest("%+v", nv4, "") + addFormatterTest("%#v", v4, "("+v4t+")"+v4s) + addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s) + addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s) + addFormatterTest("%#v", nv4, "(*"+v4t+")"+"") + addFormatterTest("%#+v", v4, "("+v4t+")"+v4s) + addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s) + addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s) + addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"") + + // Max int. + v5 := int(2147483647) + nv5 := (*int)(nil) + pv5 := &v5 + v5Addr := fmt.Sprintf("%p", pv5) + pv5Addr := fmt.Sprintf("%p", &pv5) + v5t := "int" + v5s := "2147483647" + addFormatterTest("%v", v5, v5s) + addFormatterTest("%v", pv5, "<*>"+v5s) + addFormatterTest("%v", &pv5, "<**>"+v5s) + addFormatterTest("%v", nv5, "") + addFormatterTest("%+v", v5, v5s) + addFormatterTest("%+v", pv5, "<*>("+v5Addr+")"+v5s) + addFormatterTest("%+v", &pv5, "<**>("+pv5Addr+"->"+v5Addr+")"+v5s) + addFormatterTest("%+v", nv5, "") + addFormatterTest("%#v", v5, "("+v5t+")"+v5s) + addFormatterTest("%#v", pv5, "(*"+v5t+")"+v5s) + addFormatterTest("%#v", &pv5, "(**"+v5t+")"+v5s) + addFormatterTest("%#v", nv5, "(*"+v5t+")"+"") + addFormatterTest("%#+v", v5, "("+v5t+")"+v5s) + addFormatterTest("%#+v", pv5, "(*"+v5t+")("+v5Addr+")"+v5s) + addFormatterTest("%#+v", &pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")"+v5s) + addFormatterTest("%#+v", nv5, "(*"+v5t+")"+"") +} + +func addUintFormatterTests() { + // Max uint8. + v := uint8(255) + nv := (*uint8)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "uint8" + vs := "255" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Max uint16. + v2 := uint16(65535) + nv2 := (*uint16)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "uint16" + v2s := "65535" + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%v", nv2, "") + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") + + // Max uint32. + v3 := uint32(4294967295) + nv3 := (*uint32)(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "uint32" + v3s := "4294967295" + addFormatterTest("%v", v3, v3s) + addFormatterTest("%v", pv3, "<*>"+v3s) + addFormatterTest("%v", &pv3, "<**>"+v3s) + addFormatterTest("%v", nv3, "") + addFormatterTest("%+v", v3, v3s) + addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s) + addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%#v", v3, "("+v3t+")"+v3s) + addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s) + addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s) + addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") + addFormatterTest("%#+v", v3, "("+v3t+")"+v3s) + addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s) + addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s) + addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") + + // Max uint64. + v4 := uint64(18446744073709551615) + nv4 := (*uint64)(nil) + pv4 := &v4 + v4Addr := fmt.Sprintf("%p", pv4) + pv4Addr := fmt.Sprintf("%p", &pv4) + v4t := "uint64" + v4s := "18446744073709551615" + addFormatterTest("%v", v4, v4s) + addFormatterTest("%v", pv4, "<*>"+v4s) + addFormatterTest("%v", &pv4, "<**>"+v4s) + addFormatterTest("%v", nv4, "") + addFormatterTest("%+v", v4, v4s) + addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s) + addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s) + addFormatterTest("%+v", nv4, "") + addFormatterTest("%#v", v4, "("+v4t+")"+v4s) + addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s) + addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s) + addFormatterTest("%#v", nv4, "(*"+v4t+")"+"") + addFormatterTest("%#+v", v4, "("+v4t+")"+v4s) + addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s) + addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s) + addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"") + + // Max uint. + v5 := uint(4294967295) + nv5 := (*uint)(nil) + pv5 := &v5 + v5Addr := fmt.Sprintf("%p", pv5) + pv5Addr := fmt.Sprintf("%p", &pv5) + v5t := "uint" + v5s := "4294967295" + addFormatterTest("%v", v5, v5s) + addFormatterTest("%v", pv5, "<*>"+v5s) + addFormatterTest("%v", &pv5, "<**>"+v5s) + addFormatterTest("%v", nv5, "") + addFormatterTest("%+v", v5, v5s) + addFormatterTest("%+v", pv5, "<*>("+v5Addr+")"+v5s) + addFormatterTest("%+v", &pv5, "<**>("+pv5Addr+"->"+v5Addr+")"+v5s) + addFormatterTest("%+v", nv5, "") + addFormatterTest("%#v", v5, "("+v5t+")"+v5s) + addFormatterTest("%#v", pv5, "(*"+v5t+")"+v5s) + addFormatterTest("%#v", &pv5, "(**"+v5t+")"+v5s) + addFormatterTest("%#v", nv5, "(*"+v5t+")"+"") + addFormatterTest("%#+v", v5, "("+v5t+")"+v5s) + addFormatterTest("%#+v", pv5, "(*"+v5t+")("+v5Addr+")"+v5s) + addFormatterTest("%#+v", &pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")"+v5s) + addFormatterTest("%#v", nv5, "(*"+v5t+")"+"") +} + +func addBoolFormatterTests() { + // Boolean true. + v := bool(true) + nv := (*bool)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "bool" + vs := "true" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Boolean false. + v2 := bool(false) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "bool" + v2s := "false" + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) +} + +func addFloatFormatterTests() { + // Standard float32. + v := float32(3.1415) + nv := (*float32)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "float32" + vs := "3.1415" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Standard float64. + v2 := float64(3.1415926) + nv2 := (*float64)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "float64" + v2s := "3.1415926" + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") +} + +func addComplexFormatterTests() { + // Standard complex64. + v := complex(float32(6), -2) + nv := (*complex64)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "complex64" + vs := "(6-2i)" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Standard complex128. + v2 := complex(float64(-6), 2) + nv2 := (*complex128)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "complex128" + v2s := "(-6+2i)" + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") +} + +func addArrayFormatterTests() { + // Array containing standard ints. + v := [3]int{1, 2, 3} + nv := (*[3]int)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "[3]int" + vs := "[1 2 3]" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Array containing type with custom formatter on pointer receiver only. + v2 := [3]pstringer{"1", "2", "3"} + nv2 := (*[3]pstringer)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "[3]spew_test.pstringer" + v2sp := "[stringer 1 stringer 2 stringer 3]" + v2s := v2sp + if spew.UnsafeDisabled { + v2s = "[1 2 3]" + } + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2sp) + addFormatterTest("%v", &pv2, "<**>"+v2sp) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2sp) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2sp) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2sp) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2sp) + addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2sp) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2sp) + addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") + + // Array containing interfaces. + v3 := [3]interface{}{"one", int(2), uint(3)} + nv3 := (*[3]interface{})(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "[3]interface {}" + v3t2 := "string" + v3t3 := "int" + v3t4 := "uint" + v3s := "[one 2 3]" + v3s2 := "[(" + v3t2 + ")one (" + v3t3 + ")2 (" + v3t4 + ")3]" + addFormatterTest("%v", v3, v3s) + addFormatterTest("%v", pv3, "<*>"+v3s) + addFormatterTest("%v", &pv3, "<**>"+v3s) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%+v", v3, v3s) + addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s) + addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%#v", v3, "("+v3t+")"+v3s2) + addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s2) + addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s2) + addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") + addFormatterTest("%#+v", v3, "("+v3t+")"+v3s2) + addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s2) + addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s2) + addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"") +} + +func addSliceFormatterTests() { + // Slice containing standard float32 values. + v := []float32{3.14, 6.28, 12.56} + nv := (*[]float32)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "[]float32" + vs := "[3.14 6.28 12.56]" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Slice containing type with custom formatter on pointer receiver only. + v2 := []pstringer{"1", "2", "3"} + nv2 := (*[]pstringer)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "[]spew_test.pstringer" + v2s := "[stringer 1 stringer 2 stringer 3]" + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") + + // Slice containing interfaces. + v3 := []interface{}{"one", int(2), uint(3), nil} + nv3 := (*[]interface{})(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "[]interface {}" + v3t2 := "string" + v3t3 := "int" + v3t4 := "uint" + v3t5 := "interface {}" + v3s := "[one 2 3 ]" + v3s2 := "[(" + v3t2 + ")one (" + v3t3 + ")2 (" + v3t4 + ")3 (" + v3t5 + + ")]" + addFormatterTest("%v", v3, v3s) + addFormatterTest("%v", pv3, "<*>"+v3s) + addFormatterTest("%v", &pv3, "<**>"+v3s) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%+v", v3, v3s) + addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s) + addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%#v", v3, "("+v3t+")"+v3s2) + addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s2) + addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s2) + addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") + addFormatterTest("%#+v", v3, "("+v3t+")"+v3s2) + addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s2) + addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s2) + addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"") + + // Nil slice. + var v4 []int + nv4 := (*[]int)(nil) + pv4 := &v4 + v4Addr := fmt.Sprintf("%p", pv4) + pv4Addr := fmt.Sprintf("%p", &pv4) + v4t := "[]int" + v4s := "" + addFormatterTest("%v", v4, v4s) + addFormatterTest("%v", pv4, "<*>"+v4s) + addFormatterTest("%v", &pv4, "<**>"+v4s) + addFormatterTest("%+v", nv4, "") + addFormatterTest("%+v", v4, v4s) + addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s) + addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s) + addFormatterTest("%+v", nv4, "") + addFormatterTest("%#v", v4, "("+v4t+")"+v4s) + addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s) + addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s) + addFormatterTest("%#v", nv4, "(*"+v4t+")"+"") + addFormatterTest("%#+v", v4, "("+v4t+")"+v4s) + addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s) + addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s) + addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"") +} + +func addStringFormatterTests() { + // Standard string. + v := "test" + nv := (*string)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "string" + vs := "test" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") +} + +func addInterfaceFormatterTests() { + // Nil interface. + var v interface{} + nv := (*interface{})(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "interface {}" + vs := "" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Sub-interface. + v2 := interface{}(uint16(65535)) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "uint16" + v2s := "65535" + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) +} + +func addMapFormatterTests() { + // Map with string keys and int vals. + v := map[string]int{"one": 1, "two": 2} + nilMap := map[string]int(nil) + nv := (*map[string]int)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "map[string]int" + vs := "map[one:1 two:2]" + vs2 := "map[two:2 one:1]" + addFormatterTest("%v", v, vs, vs2) + addFormatterTest("%v", pv, "<*>"+vs, "<*>"+vs2) + addFormatterTest("%v", &pv, "<**>"+vs, "<**>"+vs2) + addFormatterTest("%+v", nilMap, "") + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs, vs2) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs, "<*>("+vAddr+")"+vs2) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs, + "<**>("+pvAddr+"->"+vAddr+")"+vs2) + addFormatterTest("%+v", nilMap, "") + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs, "("+vt+")"+vs2) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs, "(*"+vt+")"+vs2) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs, "(**"+vt+")"+vs2) + addFormatterTest("%#v", nilMap, "("+vt+")"+"") + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs, "("+vt+")"+vs2) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs, + "(*"+vt+")("+vAddr+")"+vs2) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs, + "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs2) + addFormatterTest("%#+v", nilMap, "("+vt+")"+"") + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Map with custom formatter type on pointer receiver only keys and vals. + v2 := map[pstringer]pstringer{"one": "1"} + nv2 := (*map[pstringer]pstringer)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "map[spew_test.pstringer]spew_test.pstringer" + v2s := "map[stringer one:stringer 1]" + if spew.UnsafeDisabled { + v2s = "map[one:1]" + } + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") + + // Map with interface keys and values. + v3 := map[interface{}]interface{}{"one": 1} + nv3 := (*map[interface{}]interface{})(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "map[interface {}]interface {}" + v3t1 := "string" + v3t2 := "int" + v3s := "map[one:1]" + v3s2 := "map[(" + v3t1 + ")one:(" + v3t2 + ")1]" + addFormatterTest("%v", v3, v3s) + addFormatterTest("%v", pv3, "<*>"+v3s) + addFormatterTest("%v", &pv3, "<**>"+v3s) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%+v", v3, v3s) + addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s) + addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%#v", v3, "("+v3t+")"+v3s2) + addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s2) + addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s2) + addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") + addFormatterTest("%#+v", v3, "("+v3t+")"+v3s2) + addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s2) + addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s2) + addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"") + + // Map with nil interface value + v4 := map[string]interface{}{"nil": nil} + nv4 := (*map[string]interface{})(nil) + pv4 := &v4 + v4Addr := fmt.Sprintf("%p", pv4) + pv4Addr := fmt.Sprintf("%p", &pv4) + v4t := "map[string]interface {}" + v4t1 := "interface {}" + v4s := "map[nil:]" + v4s2 := "map[nil:(" + v4t1 + ")]" + addFormatterTest("%v", v4, v4s) + addFormatterTest("%v", pv4, "<*>"+v4s) + addFormatterTest("%v", &pv4, "<**>"+v4s) + addFormatterTest("%+v", nv4, "") + addFormatterTest("%+v", v4, v4s) + addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s) + addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s) + addFormatterTest("%+v", nv4, "") + addFormatterTest("%#v", v4, "("+v4t+")"+v4s2) + addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s2) + addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s2) + addFormatterTest("%#v", nv4, "(*"+v4t+")"+"") + addFormatterTest("%#+v", v4, "("+v4t+")"+v4s2) + addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s2) + addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s2) + addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"") +} + +func addStructFormatterTests() { + // Struct with primitives. + type s1 struct { + a int8 + b uint8 + } + v := s1{127, 255} + nv := (*s1)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "spew_test.s1" + vt2 := "int8" + vt3 := "uint8" + vs := "{127 255}" + vs2 := "{a:127 b:255}" + vs3 := "{a:(" + vt2 + ")127 b:(" + vt3 + ")255}" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs2) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs2) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs2) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs3) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs3) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs3) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs3) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs3) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs3) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Struct that contains another struct. + type s2 struct { + s1 s1 + b bool + } + v2 := s2{s1{127, 255}, true} + nv2 := (*s2)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "spew_test.s2" + v2t2 := "spew_test.s1" + v2t3 := "int8" + v2t4 := "uint8" + v2t5 := "bool" + v2s := "{{127 255} true}" + v2s2 := "{s1:{a:127 b:255} b:true}" + v2s3 := "{s1:(" + v2t2 + "){a:(" + v2t3 + ")127 b:(" + v2t4 + ")255} b:(" + + v2t5 + ")true}" + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%+v", v2, v2s2) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s2) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s2) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%#v", v2, "("+v2t+")"+v2s3) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s3) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s3) + addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s3) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s3) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s3) + addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") + + // Struct that contains custom type with Stringer pointer interface via both + // exported and unexported fields. + type s3 struct { + s pstringer + S pstringer + } + v3 := s3{"test", "test2"} + nv3 := (*s3)(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "spew_test.s3" + v3t2 := "spew_test.pstringer" + v3s := "{stringer test stringer test2}" + v3sp := v3s + v3s2 := "{s:stringer test S:stringer test2}" + v3s2p := v3s2 + v3s3 := "{s:(" + v3t2 + ")stringer test S:(" + v3t2 + ")stringer test2}" + v3s3p := v3s3 + if spew.UnsafeDisabled { + v3s = "{test test2}" + v3sp = "{test stringer test2}" + v3s2 = "{s:test S:test2}" + v3s2p = "{s:test S:stringer test2}" + v3s3 = "{s:(" + v3t2 + ")test S:(" + v3t2 + ")test2}" + v3s3p = "{s:(" + v3t2 + ")test S:(" + v3t2 + ")stringer test2}" + } + addFormatterTest("%v", v3, v3s) + addFormatterTest("%v", pv3, "<*>"+v3sp) + addFormatterTest("%v", &pv3, "<**>"+v3sp) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%+v", v3, v3s2) + addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s2p) + addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s2p) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%#v", v3, "("+v3t+")"+v3s3) + addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s3p) + addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s3p) + addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") + addFormatterTest("%#+v", v3, "("+v3t+")"+v3s3) + addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s3p) + addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s3p) + addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"") + + // Struct that contains embedded struct and field to same struct. + e := embed{"embedstr"} + v4 := embedwrap{embed: &e, e: &e} + nv4 := (*embedwrap)(nil) + pv4 := &v4 + eAddr := fmt.Sprintf("%p", &e) + v4Addr := fmt.Sprintf("%p", pv4) + pv4Addr := fmt.Sprintf("%p", &pv4) + v4t := "spew_test.embedwrap" + v4t2 := "spew_test.embed" + v4t3 := "string" + v4s := "{<*>{embedstr} <*>{embedstr}}" + v4s2 := "{embed:<*>(" + eAddr + "){a:embedstr} e:<*>(" + eAddr + + "){a:embedstr}}" + v4s3 := "{embed:(*" + v4t2 + "){a:(" + v4t3 + ")embedstr} e:(*" + v4t2 + + "){a:(" + v4t3 + ")embedstr}}" + v4s4 := "{embed:(*" + v4t2 + ")(" + eAddr + "){a:(" + v4t3 + + ")embedstr} e:(*" + v4t2 + ")(" + eAddr + "){a:(" + v4t3 + ")embedstr}}" + addFormatterTest("%v", v4, v4s) + addFormatterTest("%v", pv4, "<*>"+v4s) + addFormatterTest("%v", &pv4, "<**>"+v4s) + addFormatterTest("%+v", nv4, "") + addFormatterTest("%+v", v4, v4s2) + addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s2) + addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s2) + addFormatterTest("%+v", nv4, "") + addFormatterTest("%#v", v4, "("+v4t+")"+v4s3) + addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s3) + addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s3) + addFormatterTest("%#v", nv4, "(*"+v4t+")"+"") + addFormatterTest("%#+v", v4, "("+v4t+")"+v4s4) + addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s4) + addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s4) + addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"") +} + +func addUintptrFormatterTests() { + // Null pointer. + v := uintptr(0) + nv := (*uintptr)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "uintptr" + vs := "" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Address of real variable. + i := 1 + v2 := uintptr(unsafe.Pointer(&i)) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "uintptr" + v2s := fmt.Sprintf("%p", &i) + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) +} + +func addUnsafePointerFormatterTests() { + // Null pointer. + v := unsafe.Pointer(uintptr(0)) + nv := (*unsafe.Pointer)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "unsafe.Pointer" + vs := "" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Address of real variable. + i := 1 + v2 := unsafe.Pointer(&i) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "unsafe.Pointer" + v2s := fmt.Sprintf("%p", &i) + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) +} + +func addChanFormatterTests() { + // Nil channel. + var v chan int + pv := &v + nv := (*chan int)(nil) + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "chan int" + vs := "" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Real channel. + v2 := make(chan int) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "chan int" + v2s := fmt.Sprintf("%p", v2) + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) +} + +func addFuncFormatterTests() { + // Function with no params and no returns. + v := addIntFormatterTests + nv := (*func())(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "func()" + vs := fmt.Sprintf("%p", v) + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Function with param and no returns. + v2 := TestFormatter + nv2 := (*func(*testing.T))(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "func(*testing.T)" + v2s := fmt.Sprintf("%p", v2) + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") + + // Function with multiple params and multiple returns. + var v3 = func(i int, s string) (b bool, err error) { + return true, nil + } + nv3 := (*func(int, string) (bool, error))(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "func(int, string) (bool, error)" + v3s := fmt.Sprintf("%p", v3) + addFormatterTest("%v", v3, v3s) + addFormatterTest("%v", pv3, "<*>"+v3s) + addFormatterTest("%v", &pv3, "<**>"+v3s) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%+v", v3, v3s) + addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s) + addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%#v", v3, "("+v3t+")"+v3s) + addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s) + addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s) + addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") + addFormatterTest("%#+v", v3, "("+v3t+")"+v3s) + addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s) + addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s) + addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"") +} + +func addCircularFormatterTests() { + // Struct that is circular through self referencing. + type circular struct { + c *circular + } + v := circular{nil} + v.c = &v + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "spew_test.circular" + vs := "{<*>{<*>}}" + vs2 := "{<*>}" + vs3 := "{c:<*>(" + vAddr + "){c:<*>(" + vAddr + ")}}" + vs4 := "{c:<*>(" + vAddr + ")}" + vs5 := "{c:(*" + vt + "){c:(*" + vt + ")}}" + vs6 := "{c:(*" + vt + ")}" + vs7 := "{c:(*" + vt + ")(" + vAddr + "){c:(*" + vt + ")(" + vAddr + + ")}}" + vs8 := "{c:(*" + vt + ")(" + vAddr + ")}" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs2) + addFormatterTest("%v", &pv, "<**>"+vs2) + addFormatterTest("%+v", v, vs3) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs4) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs4) + addFormatterTest("%#v", v, "("+vt+")"+vs5) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs6) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs6) + addFormatterTest("%#+v", v, "("+vt+")"+vs7) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs8) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs8) + + // Structs that are circular through cross referencing. + v2 := xref1{nil} + ts2 := xref2{&v2} + v2.ps2 = &ts2 + pv2 := &v2 + ts2Addr := fmt.Sprintf("%p", &ts2) + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "spew_test.xref1" + v2t2 := "spew_test.xref2" + v2s := "{<*>{<*>{<*>}}}" + v2s2 := "{<*>{<*>}}" + v2s3 := "{ps2:<*>(" + ts2Addr + "){ps1:<*>(" + v2Addr + "){ps2:<*>(" + + ts2Addr + ")}}}" + v2s4 := "{ps2:<*>(" + ts2Addr + "){ps1:<*>(" + v2Addr + ")}}" + v2s5 := "{ps2:(*" + v2t2 + "){ps1:(*" + v2t + "){ps2:(*" + v2t2 + + ")}}}" + v2s6 := "{ps2:(*" + v2t2 + "){ps1:(*" + v2t + ")}}" + v2s7 := "{ps2:(*" + v2t2 + ")(" + ts2Addr + "){ps1:(*" + v2t + + ")(" + v2Addr + "){ps2:(*" + v2t2 + ")(" + ts2Addr + + ")}}}" + v2s8 := "{ps2:(*" + v2t2 + ")(" + ts2Addr + "){ps1:(*" + v2t + + ")(" + v2Addr + ")}}" + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s2) + addFormatterTest("%v", &pv2, "<**>"+v2s2) + addFormatterTest("%+v", v2, v2s3) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s4) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s4) + addFormatterTest("%#v", v2, "("+v2t+")"+v2s5) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s6) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s6) + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s7) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s8) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s8) + + // Structs that are indirectly circular. + v3 := indirCir1{nil} + tic2 := indirCir2{nil} + tic3 := indirCir3{&v3} + tic2.ps3 = &tic3 + v3.ps2 = &tic2 + pv3 := &v3 + tic2Addr := fmt.Sprintf("%p", &tic2) + tic3Addr := fmt.Sprintf("%p", &tic3) + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "spew_test.indirCir1" + v3t2 := "spew_test.indirCir2" + v3t3 := "spew_test.indirCir3" + v3s := "{<*>{<*>{<*>{<*>}}}}" + v3s2 := "{<*>{<*>{<*>}}}" + v3s3 := "{ps2:<*>(" + tic2Addr + "){ps3:<*>(" + tic3Addr + "){ps1:<*>(" + + v3Addr + "){ps2:<*>(" + tic2Addr + ")}}}}" + v3s4 := "{ps2:<*>(" + tic2Addr + "){ps3:<*>(" + tic3Addr + "){ps1:<*>(" + + v3Addr + ")}}}" + v3s5 := "{ps2:(*" + v3t2 + "){ps3:(*" + v3t3 + "){ps1:(*" + v3t + + "){ps2:(*" + v3t2 + ")}}}}" + v3s6 := "{ps2:(*" + v3t2 + "){ps3:(*" + v3t3 + "){ps1:(*" + v3t + + ")}}}" + v3s7 := "{ps2:(*" + v3t2 + ")(" + tic2Addr + "){ps3:(*" + v3t3 + ")(" + + tic3Addr + "){ps1:(*" + v3t + ")(" + v3Addr + "){ps2:(*" + v3t2 + + ")(" + tic2Addr + ")}}}}" + v3s8 := "{ps2:(*" + v3t2 + ")(" + tic2Addr + "){ps3:(*" + v3t3 + ")(" + + tic3Addr + "){ps1:(*" + v3t + ")(" + v3Addr + ")}}}" + addFormatterTest("%v", v3, v3s) + addFormatterTest("%v", pv3, "<*>"+v3s2) + addFormatterTest("%v", &pv3, "<**>"+v3s2) + addFormatterTest("%+v", v3, v3s3) + addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s4) + addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s4) + addFormatterTest("%#v", v3, "("+v3t+")"+v3s5) + addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s6) + addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s6) + addFormatterTest("%#+v", v3, "("+v3t+")"+v3s7) + addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s8) + addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s8) +} + +func addPanicFormatterTests() { + // Type that panics in its Stringer interface. + v := panicer(127) + nv := (*panicer)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "spew_test.panicer" + vs := "(PANIC=test panic)127" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") +} + +func addErrorFormatterTests() { + // Type that has a custom Error interface. + v := customError(127) + nv := (*customError)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "spew_test.customError" + vs := "error: 127" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") +} + +func addPassthroughFormatterTests() { + // %x passthrough with uint. + v := uint(4294967295) + pv := &v + vAddr := fmt.Sprintf("%x", pv) + pvAddr := fmt.Sprintf("%x", &pv) + vs := "ffffffff" + addFormatterTest("%x", v, vs) + addFormatterTest("%x", pv, vAddr) + addFormatterTest("%x", &pv, pvAddr) + + // %#x passthrough with uint. + v2 := int(2147483647) + pv2 := &v2 + v2Addr := fmt.Sprintf("%#x", pv2) + pv2Addr := fmt.Sprintf("%#x", &pv2) + v2s := "0x7fffffff" + addFormatterTest("%#x", v2, v2s) + addFormatterTest("%#x", pv2, v2Addr) + addFormatterTest("%#x", &pv2, pv2Addr) + + // %f passthrough with precision. + addFormatterTest("%.2f", 3.1415, "3.14") + addFormatterTest("%.3f", 3.1415, "3.142") + addFormatterTest("%.4f", 3.1415, "3.1415") + + // %f passthrough with width and precision. + addFormatterTest("%5.2f", 3.1415, " 3.14") + addFormatterTest("%6.3f", 3.1415, " 3.142") + addFormatterTest("%7.4f", 3.1415, " 3.1415") + + // %d passthrough with width. + addFormatterTest("%3d", 127, "127") + addFormatterTest("%4d", 127, " 127") + addFormatterTest("%5d", 127, " 127") + + // %q passthrough with string. + addFormatterTest("%q", "test", "\"test\"") +} + +// TestFormatter executes all of the tests described by formatterTests. +func TestFormatter(t *testing.T) { + // Setup tests. + addIntFormatterTests() + addUintFormatterTests() + addBoolFormatterTests() + addFloatFormatterTests() + addComplexFormatterTests() + addArrayFormatterTests() + addSliceFormatterTests() + addStringFormatterTests() + addInterfaceFormatterTests() + addMapFormatterTests() + addStructFormatterTests() + addUintptrFormatterTests() + addUnsafePointerFormatterTests() + addChanFormatterTests() + addFuncFormatterTests() + addCircularFormatterTests() + addPanicFormatterTests() + addErrorFormatterTests() + addPassthroughFormatterTests() + + t.Logf("Running %d tests", len(formatterTests)) + for i, test := range formatterTests { + buf := new(bytes.Buffer) + spew.Fprintf(buf, test.format, test.in) + s := buf.String() + if testFailed(s, test.wants) { + t.Errorf("Formatter #%d format: %s got: %s %s", i, test.format, s, + stringizeWants(test.wants)) + continue + } + } +} + +type testStruct struct { + x int +} + +func (ts testStruct) String() string { + return fmt.Sprintf("ts.%d", ts.x) +} + +type testStructP struct { + x int +} + +func (ts *testStructP) String() string { + return fmt.Sprintf("ts.%d", ts.x) +} + +func TestPrintSortedKeys(t *testing.T) { + cfg := spew.ConfigState{SortKeys: true} + s := cfg.Sprint(map[int]string{1: "1", 3: "3", 2: "2"}) + expected := "map[1:1 2:2 3:3]" + if s != expected { + t.Errorf("Sorted keys mismatch 1:\n %v %v", s, expected) + } + + s = cfg.Sprint(map[stringer]int{"1": 1, "3": 3, "2": 2}) + expected = "map[stringer 1:1 stringer 2:2 stringer 3:3]" + if s != expected { + t.Errorf("Sorted keys mismatch 2:\n %v %v", s, expected) + } + + s = cfg.Sprint(map[pstringer]int{pstringer("1"): 1, pstringer("3"): 3, pstringer("2"): 2}) + expected = "map[stringer 1:1 stringer 2:2 stringer 3:3]" + if spew.UnsafeDisabled { + expected = "map[1:1 2:2 3:3]" + } + if s != expected { + t.Errorf("Sorted keys mismatch 3:\n %v %v", s, expected) + } + + s = cfg.Sprint(map[testStruct]int{testStruct{1}: 1, testStruct{3}: 3, testStruct{2}: 2}) + expected = "map[ts.1:1 ts.2:2 ts.3:3]" + if s != expected { + t.Errorf("Sorted keys mismatch 4:\n %v %v", s, expected) + } + + if !spew.UnsafeDisabled { + s = cfg.Sprint(map[testStructP]int{testStructP{1}: 1, testStructP{3}: 3, testStructP{2}: 2}) + expected = "map[ts.1:1 ts.2:2 ts.3:3]" + if s != expected { + t.Errorf("Sorted keys mismatch 5:\n %v %v", s, expected) + } + } + + s = cfg.Sprint(map[customError]int{customError(1): 1, customError(3): 3, customError(2): 2}) + expected = "map[error: 1:1 error: 2:2 error: 3:3]" + if s != expected { + t.Errorf("Sorted keys mismatch 6:\n %v %v", s, expected) + } +} diff --git a/vendor/github.com/davecgh/go-spew/spew/internal_test.go b/vendor/github.com/davecgh/go-spew/spew/internal_test.go new file mode 100644 index 00000000..20a9cfef --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/internal_test.go @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* +This test file is part of the spew package rather than than the spew_test +package because it needs access to internals to properly test certain cases +which are not possible via the public interface since they should never happen. +*/ + +package spew + +import ( + "bytes" + "reflect" + "testing" +) + +// dummyFmtState implements a fake fmt.State to use for testing invalid +// reflect.Value handling. This is necessary because the fmt package catches +// invalid values before invoking the formatter on them. +type dummyFmtState struct { + bytes.Buffer +} + +func (dfs *dummyFmtState) Flag(f int) bool { + if f == int('+') { + return true + } + return false +} + +func (dfs *dummyFmtState) Precision() (int, bool) { + return 0, false +} + +func (dfs *dummyFmtState) Width() (int, bool) { + return 0, false +} + +// TestInvalidReflectValue ensures the dump and formatter code handles an +// invalid reflect value properly. This needs access to internal state since it +// should never happen in real code and therefore can't be tested via the public +// API. +func TestInvalidReflectValue(t *testing.T) { + i := 1 + + // Dump invalid reflect value. + v := new(reflect.Value) + buf := new(bytes.Buffer) + d := dumpState{w: buf, cs: &Config} + d.dump(*v) + s := buf.String() + want := "" + if s != want { + t.Errorf("InvalidReflectValue #%d\n got: %s want: %s", i, s, want) + } + i++ + + // Formatter invalid reflect value. + buf2 := new(dummyFmtState) + f := formatState{value: *v, cs: &Config, fs: buf2} + f.format(*v) + s = buf2.String() + want = "" + if s != want { + t.Errorf("InvalidReflectValue #%d got: %s want: %s", i, s, want) + } +} + +// SortValues makes the internal sortValues function available to the test +// package. +func SortValues(values []reflect.Value, cs *ConfigState) { + sortValues(values, cs) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/internalunsafe_test.go b/vendor/github.com/davecgh/go-spew/spew/internalunsafe_test.go new file mode 100644 index 00000000..a0c612ec --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/internalunsafe_test.go @@ -0,0 +1,102 @@ +// Copyright (c) 2013-2016 Dave Collins + +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. + +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when the code is not running on Google App Engine, compiled by GopherJS, and +// "-tags safe" is not added to the go build command line. The "disableunsafe" +// tag is deprecated and thus should not be used. +// +build !js,!appengine,!safe,!disableunsafe + +/* +This test file is part of the spew package rather than than the spew_test +package because it needs access to internals to properly test certain cases +which are not possible via the public interface since they should never happen. +*/ + +package spew + +import ( + "bytes" + "reflect" + "testing" + "unsafe" +) + +// changeKind uses unsafe to intentionally change the kind of a reflect.Value to +// the maximum kind value which does not exist. This is needed to test the +// fallback code which punts to the standard fmt library for new types that +// might get added to the language. +func changeKind(v *reflect.Value, readOnly bool) { + rvf := (*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + offsetFlag)) + *rvf = *rvf | ((1< + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "fmt" + "io" +) + +// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the formatted string as a value that satisfies error. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Errorf(format string, a ...interface{}) (err error) { + return fmt.Errorf(format, convertArgs(a)...) +} + +// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprint(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprint(w, convertArgs(a)...) +} + +// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + return fmt.Fprintf(w, format, convertArgs(a)...) +} + +// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it +// passed with a default Formatter interface returned by NewFormatter. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprintln(w, convertArgs(a)...) +} + +// Print is a wrapper for fmt.Print that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b)) +func Print(a ...interface{}) (n int, err error) { + return fmt.Print(convertArgs(a)...) +} + +// Printf is a wrapper for fmt.Printf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Printf(format string, a ...interface{}) (n int, err error) { + return fmt.Printf(format, convertArgs(a)...) +} + +// Println is a wrapper for fmt.Println that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b)) +func Println(a ...interface{}) (n int, err error) { + return fmt.Println(convertArgs(a)...) +} + +// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprint(a ...interface{}) string { + return fmt.Sprint(convertArgs(a)...) +} + +// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprintf(format string, a ...interface{}) string { + return fmt.Sprintf(format, convertArgs(a)...) +} + +// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it +// were passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprintln(a ...interface{}) string { + return fmt.Sprintln(convertArgs(a)...) +} + +// convertArgs accepts a slice of arguments and returns a slice of the same +// length with each argument converted to a default spew Formatter interface. +func convertArgs(args []interface{}) (formatters []interface{}) { + formatters = make([]interface{}, len(args)) + for index, arg := range args { + formatters[index] = NewFormatter(arg) + } + return formatters +} diff --git a/vendor/github.com/davecgh/go-spew/spew/spew_test.go b/vendor/github.com/davecgh/go-spew/spew/spew_test.go new file mode 100644 index 00000000..b70466c6 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/spew_test.go @@ -0,0 +1,320 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew_test + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "testing" + + "github.com/davecgh/go-spew/spew" +) + +// spewFunc is used to identify which public function of the spew package or +// ConfigState a test applies to. +type spewFunc int + +const ( + fCSFdump spewFunc = iota + fCSFprint + fCSFprintf + fCSFprintln + fCSPrint + fCSPrintln + fCSSdump + fCSSprint + fCSSprintf + fCSSprintln + fCSErrorf + fCSNewFormatter + fErrorf + fFprint + fFprintln + fPrint + fPrintln + fSdump + fSprint + fSprintf + fSprintln +) + +// Map of spewFunc values to names for pretty printing. +var spewFuncStrings = map[spewFunc]string{ + fCSFdump: "ConfigState.Fdump", + fCSFprint: "ConfigState.Fprint", + fCSFprintf: "ConfigState.Fprintf", + fCSFprintln: "ConfigState.Fprintln", + fCSSdump: "ConfigState.Sdump", + fCSPrint: "ConfigState.Print", + fCSPrintln: "ConfigState.Println", + fCSSprint: "ConfigState.Sprint", + fCSSprintf: "ConfigState.Sprintf", + fCSSprintln: "ConfigState.Sprintln", + fCSErrorf: "ConfigState.Errorf", + fCSNewFormatter: "ConfigState.NewFormatter", + fErrorf: "spew.Errorf", + fFprint: "spew.Fprint", + fFprintln: "spew.Fprintln", + fPrint: "spew.Print", + fPrintln: "spew.Println", + fSdump: "spew.Sdump", + fSprint: "spew.Sprint", + fSprintf: "spew.Sprintf", + fSprintln: "spew.Sprintln", +} + +func (f spewFunc) String() string { + if s, ok := spewFuncStrings[f]; ok { + return s + } + return fmt.Sprintf("Unknown spewFunc (%d)", int(f)) +} + +// spewTest is used to describe a test to be performed against the public +// functions of the spew package or ConfigState. +type spewTest struct { + cs *spew.ConfigState + f spewFunc + format string + in interface{} + want string +} + +// spewTests houses the tests to be performed against the public functions of +// the spew package and ConfigState. +// +// These tests are only intended to ensure the public functions are exercised +// and are intentionally not exhaustive of types. The exhaustive type +// tests are handled in the dump and format tests. +var spewTests []spewTest + +// redirStdout is a helper function to return the standard output from f as a +// byte slice. +func redirStdout(f func()) ([]byte, error) { + tempFile, err := ioutil.TempFile("", "ss-test") + if err != nil { + return nil, err + } + fileName := tempFile.Name() + defer os.Remove(fileName) // Ignore error + + origStdout := os.Stdout + os.Stdout = tempFile + f() + os.Stdout = origStdout + tempFile.Close() + + return ioutil.ReadFile(fileName) +} + +func initSpewTests() { + // Config states with various settings. + scsDefault := spew.NewDefaultConfig() + scsNoMethods := &spew.ConfigState{Indent: " ", DisableMethods: true} + scsNoPmethods := &spew.ConfigState{Indent: " ", DisablePointerMethods: true} + scsMaxDepth := &spew.ConfigState{Indent: " ", MaxDepth: 1} + scsContinue := &spew.ConfigState{Indent: " ", ContinueOnMethod: true} + scsNoPtrAddr := &spew.ConfigState{DisablePointerAddresses: true} + scsNoCap := &spew.ConfigState{DisableCapacities: true} + + // Variables for tests on types which implement Stringer interface with and + // without a pointer receiver. + ts := stringer("test") + tps := pstringer("test") + + type ptrTester struct { + s *struct{} + } + tptr := &ptrTester{s: &struct{}{}} + + // depthTester is used to test max depth handling for structs, array, slices + // and maps. + type depthTester struct { + ic indirCir1 + arr [1]string + slice []string + m map[string]int + } + dt := depthTester{indirCir1{nil}, [1]string{"arr"}, []string{"slice"}, + map[string]int{"one": 1}} + + // Variable for tests on types which implement error interface. + te := customError(10) + + spewTests = []spewTest{ + {scsDefault, fCSFdump, "", int8(127), "(int8) 127\n"}, + {scsDefault, fCSFprint, "", int16(32767), "32767"}, + {scsDefault, fCSFprintf, "%v", int32(2147483647), "2147483647"}, + {scsDefault, fCSFprintln, "", int(2147483647), "2147483647\n"}, + {scsDefault, fCSPrint, "", int64(9223372036854775807), "9223372036854775807"}, + {scsDefault, fCSPrintln, "", uint8(255), "255\n"}, + {scsDefault, fCSSdump, "", uint8(64), "(uint8) 64\n"}, + {scsDefault, fCSSprint, "", complex(1, 2), "(1+2i)"}, + {scsDefault, fCSSprintf, "%v", complex(float32(3), 4), "(3+4i)"}, + {scsDefault, fCSSprintln, "", complex(float64(5), 6), "(5+6i)\n"}, + {scsDefault, fCSErrorf, "%#v", uint16(65535), "(uint16)65535"}, + {scsDefault, fCSNewFormatter, "%v", uint32(4294967295), "4294967295"}, + {scsDefault, fErrorf, "%v", uint64(18446744073709551615), "18446744073709551615"}, + {scsDefault, fFprint, "", float32(3.14), "3.14"}, + {scsDefault, fFprintln, "", float64(6.28), "6.28\n"}, + {scsDefault, fPrint, "", true, "true"}, + {scsDefault, fPrintln, "", false, "false\n"}, + {scsDefault, fSdump, "", complex(-10, -20), "(complex128) (-10-20i)\n"}, + {scsDefault, fSprint, "", complex(-1, -2), "(-1-2i)"}, + {scsDefault, fSprintf, "%v", complex(float32(-3), -4), "(-3-4i)"}, + {scsDefault, fSprintln, "", complex(float64(-5), -6), "(-5-6i)\n"}, + {scsNoMethods, fCSFprint, "", ts, "test"}, + {scsNoMethods, fCSFprint, "", &ts, "<*>test"}, + {scsNoMethods, fCSFprint, "", tps, "test"}, + {scsNoMethods, fCSFprint, "", &tps, "<*>test"}, + {scsNoPmethods, fCSFprint, "", ts, "stringer test"}, + {scsNoPmethods, fCSFprint, "", &ts, "<*>stringer test"}, + {scsNoPmethods, fCSFprint, "", tps, "test"}, + {scsNoPmethods, fCSFprint, "", &tps, "<*>stringer test"}, + {scsMaxDepth, fCSFprint, "", dt, "{{} [] [] map[]}"}, + {scsMaxDepth, fCSFdump, "", dt, "(spew_test.depthTester) {\n" + + " ic: (spew_test.indirCir1) {\n \n },\n" + + " arr: ([1]string) (len=1 cap=1) {\n \n },\n" + + " slice: ([]string) (len=1 cap=1) {\n \n },\n" + + " m: (map[string]int) (len=1) {\n \n }\n}\n"}, + {scsContinue, fCSFprint, "", ts, "(stringer test) test"}, + {scsContinue, fCSFdump, "", ts, "(spew_test.stringer) " + + "(len=4) (stringer test) \"test\"\n"}, + {scsContinue, fCSFprint, "", te, "(error: 10) 10"}, + {scsContinue, fCSFdump, "", te, "(spew_test.customError) " + + "(error: 10) 10\n"}, + {scsNoPtrAddr, fCSFprint, "", tptr, "<*>{<*>{}}"}, + {scsNoPtrAddr, fCSSdump, "", tptr, "(*spew_test.ptrTester)({\ns: (*struct {})({\n})\n})\n"}, + {scsNoCap, fCSSdump, "", make([]string, 0, 10), "([]string) {\n}\n"}, + {scsNoCap, fCSSdump, "", make([]string, 1, 10), "([]string) (len=1) {\n(string) \"\"\n}\n"}, + } +} + +// TestSpew executes all of the tests described by spewTests. +func TestSpew(t *testing.T) { + initSpewTests() + + t.Logf("Running %d tests", len(spewTests)) + for i, test := range spewTests { + buf := new(bytes.Buffer) + switch test.f { + case fCSFdump: + test.cs.Fdump(buf, test.in) + + case fCSFprint: + test.cs.Fprint(buf, test.in) + + case fCSFprintf: + test.cs.Fprintf(buf, test.format, test.in) + + case fCSFprintln: + test.cs.Fprintln(buf, test.in) + + case fCSPrint: + b, err := redirStdout(func() { test.cs.Print(test.in) }) + if err != nil { + t.Errorf("%v #%d %v", test.f, i, err) + continue + } + buf.Write(b) + + case fCSPrintln: + b, err := redirStdout(func() { test.cs.Println(test.in) }) + if err != nil { + t.Errorf("%v #%d %v", test.f, i, err) + continue + } + buf.Write(b) + + case fCSSdump: + str := test.cs.Sdump(test.in) + buf.WriteString(str) + + case fCSSprint: + str := test.cs.Sprint(test.in) + buf.WriteString(str) + + case fCSSprintf: + str := test.cs.Sprintf(test.format, test.in) + buf.WriteString(str) + + case fCSSprintln: + str := test.cs.Sprintln(test.in) + buf.WriteString(str) + + case fCSErrorf: + err := test.cs.Errorf(test.format, test.in) + buf.WriteString(err.Error()) + + case fCSNewFormatter: + fmt.Fprintf(buf, test.format, test.cs.NewFormatter(test.in)) + + case fErrorf: + err := spew.Errorf(test.format, test.in) + buf.WriteString(err.Error()) + + case fFprint: + spew.Fprint(buf, test.in) + + case fFprintln: + spew.Fprintln(buf, test.in) + + case fPrint: + b, err := redirStdout(func() { spew.Print(test.in) }) + if err != nil { + t.Errorf("%v #%d %v", test.f, i, err) + continue + } + buf.Write(b) + + case fPrintln: + b, err := redirStdout(func() { spew.Println(test.in) }) + if err != nil { + t.Errorf("%v #%d %v", test.f, i, err) + continue + } + buf.Write(b) + + case fSdump: + str := spew.Sdump(test.in) + buf.WriteString(str) + + case fSprint: + str := spew.Sprint(test.in) + buf.WriteString(str) + + case fSprintf: + str := spew.Sprintf(test.format, test.in) + buf.WriteString(str) + + case fSprintln: + str := spew.Sprintln(test.in) + buf.WriteString(str) + + default: + t.Errorf("%v #%d unrecognized function", test.f, i) + continue + } + s := buf.String() + if test.want != s { + t.Errorf("ConfigState #%d\n got: %s want: %s", i, s, test.want) + continue + } + } +} diff --git a/vendor/github.com/davecgh/go-spew/test_coverage.txt b/vendor/github.com/davecgh/go-spew/test_coverage.txt new file mode 100644 index 00000000..2cd087a2 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/test_coverage.txt @@ -0,0 +1,61 @@ + +github.com/davecgh/go-spew/spew/dump.go dumpState.dump 100.00% (88/88) +github.com/davecgh/go-spew/spew/format.go formatState.format 100.00% (82/82) +github.com/davecgh/go-spew/spew/format.go formatState.formatPtr 100.00% (52/52) +github.com/davecgh/go-spew/spew/dump.go dumpState.dumpPtr 100.00% (44/44) +github.com/davecgh/go-spew/spew/dump.go dumpState.dumpSlice 100.00% (39/39) +github.com/davecgh/go-spew/spew/common.go handleMethods 100.00% (30/30) +github.com/davecgh/go-spew/spew/common.go printHexPtr 100.00% (18/18) +github.com/davecgh/go-spew/spew/common.go unsafeReflectValue 100.00% (13/13) +github.com/davecgh/go-spew/spew/format.go formatState.constructOrigFormat 100.00% (12/12) +github.com/davecgh/go-spew/spew/dump.go fdump 100.00% (11/11) +github.com/davecgh/go-spew/spew/format.go formatState.Format 100.00% (11/11) +github.com/davecgh/go-spew/spew/common.go init 100.00% (10/10) +github.com/davecgh/go-spew/spew/common.go printComplex 100.00% (9/9) +github.com/davecgh/go-spew/spew/common.go valuesSorter.Less 100.00% (8/8) +github.com/davecgh/go-spew/spew/format.go formatState.buildDefaultFormat 100.00% (7/7) +github.com/davecgh/go-spew/spew/format.go formatState.unpackValue 100.00% (5/5) +github.com/davecgh/go-spew/spew/dump.go dumpState.indent 100.00% (4/4) +github.com/davecgh/go-spew/spew/common.go catchPanic 100.00% (4/4) +github.com/davecgh/go-spew/spew/config.go ConfigState.convertArgs 100.00% (4/4) +github.com/davecgh/go-spew/spew/spew.go convertArgs 100.00% (4/4) +github.com/davecgh/go-spew/spew/format.go newFormatter 100.00% (3/3) +github.com/davecgh/go-spew/spew/dump.go Sdump 100.00% (3/3) +github.com/davecgh/go-spew/spew/common.go printBool 100.00% (3/3) +github.com/davecgh/go-spew/spew/common.go sortValues 100.00% (3/3) +github.com/davecgh/go-spew/spew/config.go ConfigState.Sdump 100.00% (3/3) +github.com/davecgh/go-spew/spew/dump.go dumpState.unpackValue 100.00% (3/3) +github.com/davecgh/go-spew/spew/spew.go Printf 100.00% (1/1) +github.com/davecgh/go-spew/spew/spew.go Println 100.00% (1/1) +github.com/davecgh/go-spew/spew/spew.go Sprint 100.00% (1/1) +github.com/davecgh/go-spew/spew/spew.go Sprintf 100.00% (1/1) +github.com/davecgh/go-spew/spew/spew.go Sprintln 100.00% (1/1) +github.com/davecgh/go-spew/spew/common.go printFloat 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go NewDefaultConfig 100.00% (1/1) +github.com/davecgh/go-spew/spew/common.go printInt 100.00% (1/1) +github.com/davecgh/go-spew/spew/common.go printUint 100.00% (1/1) +github.com/davecgh/go-spew/spew/common.go valuesSorter.Len 100.00% (1/1) +github.com/davecgh/go-spew/spew/common.go valuesSorter.Swap 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Errorf 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Fprint 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Fprintf 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Fprintln 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Print 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Printf 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Println 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Sprint 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Sprintf 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Sprintln 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.NewFormatter 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Fdump 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Dump 100.00% (1/1) +github.com/davecgh/go-spew/spew/dump.go Fdump 100.00% (1/1) +github.com/davecgh/go-spew/spew/dump.go Dump 100.00% (1/1) +github.com/davecgh/go-spew/spew/spew.go Fprintln 100.00% (1/1) +github.com/davecgh/go-spew/spew/format.go NewFormatter 100.00% (1/1) +github.com/davecgh/go-spew/spew/spew.go Errorf 100.00% (1/1) +github.com/davecgh/go-spew/spew/spew.go Fprint 100.00% (1/1) +github.com/davecgh/go-spew/spew/spew.go Fprintf 100.00% (1/1) +github.com/davecgh/go-spew/spew/spew.go Print 100.00% (1/1) +github.com/davecgh/go-spew/spew ------------------------------- 100.00% (505/505) + diff --git a/vendor/k8s.io/apiextensions-apiserver/CONTRIBUTING.md b/vendor/k8s.io/apiextensions-apiserver/CONTRIBUTING.md new file mode 100644 index 00000000..d8ad4d2d --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/CONTRIBUTING.md @@ -0,0 +1,7 @@ +# Contributing guidelines + +Do not open pull requests directly against this repository, they will be ignored. Instead, please open pull requests against [kubernetes/kubernetes](https://git.k8s.io/kubernetes/). Please follow the same [contributing guide](https://git.k8s.io/kubernetes/CONTRIBUTING.md) you would follow for any other pull request made to kubernetes/kubernetes. + +This repository is published from [kubernetes/kubernetes/staging/src/k8s.io/apiextensions-apiserver](https://git.k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver) by the [kubernetes publishing-bot](https://git.k8s.io/publishing-bot). + +Please see [Staging Directory and Publishing](https://git.k8s.io/community/contributors/devel/staging.md) for more information diff --git a/vendor/k8s.io/apiextensions-apiserver/LICENSE b/vendor/k8s.io/apiextensions-apiserver/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/apiextensions-apiserver/OWNERS b/vendor/k8s.io/apiextensions-apiserver/OWNERS new file mode 100644 index 00000000..d9cb557f --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/OWNERS @@ -0,0 +1,9 @@ +reviewers: +- deads2k +- sttts +- enisoc +- mbohlool +approvers: +- deads2k +- lavalamp +- sttts diff --git a/vendor/k8s.io/apiextensions-apiserver/README.md b/vendor/k8s.io/apiextensions-apiserver/README.md new file mode 100644 index 00000000..be75b9ba --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/README.md @@ -0,0 +1,20 @@ +# apiextensions-apiserver + +Implements: https://github.com/kubernetes/community/blob/master/contributors/design-proposals/api-machinery/thirdpartyresources.md + +It provides an API for registering `CustomResourceDefinitions`. + +## Purpose + +This API server provides the implementation for `CustomResourceDefinitions` which is included as +delegate server inside of `kube-apiserver`. + + +## Compatibility + +HEAD of this repo will match HEAD of k8s.io/apiserver, k8s.io/apimachinery, and k8s.io/client-go. + +## Where does it come from? + +`apiextensions-apiserver` is synced from https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apiextensions-apiserver. +Code changes are made in that location, merged into `k8s.io/kubernetes` and later synced here. diff --git a/vendor/k8s.io/apimachinery/SECURITY_CONTACTS b/vendor/k8s.io/apiextensions-apiserver/SECURITY_CONTACTS similarity index 100% rename from vendor/k8s.io/apimachinery/SECURITY_CONTACTS rename to vendor/k8s.io/apiextensions-apiserver/SECURITY_CONTACTS diff --git a/vendor/k8s.io/apiextensions-apiserver/code-of-conduct.md b/vendor/k8s.io/apiextensions-apiserver/code-of-conduct.md new file mode 100644 index 00000000..0d15c00c --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/code-of-conduct.md @@ -0,0 +1,3 @@ +# Kubernetes Community Code of Conduct + +Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md) diff --git a/vendor/k8s.io/apiextensions-apiserver/main.go b/vendor/k8s.io/apiextensions-apiserver/main.go new file mode 100644 index 00000000..09143ab6 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/main.go @@ -0,0 +1,40 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "flag" + "os" + + "github.com/golang/glog" + + "k8s.io/apiextensions-apiserver/pkg/cmd/server" + genericapiserver "k8s.io/apiserver/pkg/server" + "k8s.io/apiserver/pkg/util/logs" +) + +func main() { + logs.InitLogs() + defer logs.FlushLogs() + + stopCh := genericapiserver.SetupSignalHandler() + cmd := server.NewCommandStartCustomResourceDefinitionsServer(os.Stdout, os.Stderr, stopCh) + cmd.Flags().AddGoFlagSet(flag.CommandLine) + if err := cmd.Execute(); err != nil { + glog.Fatal(err) + } +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/features/OWNERS b/vendor/k8s.io/apiextensions-apiserver/pkg/features/OWNERS new file mode 100644 index 00000000..fe7b0144 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/features/OWNERS @@ -0,0 +1,2 @@ +approvers: +- feature-approvers diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/features/kube_features.go b/vendor/k8s.io/apiextensions-apiserver/pkg/features/kube_features.go new file mode 100644 index 00000000..24e72f91 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/features/kube_features.go @@ -0,0 +1,55 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package features + +import ( + utilfeature "k8s.io/apiserver/pkg/util/feature" +) + +const ( + // Every feature gate should add method here following this template: + // + // // owner: @username + // // alpha: v1.4 + // MyFeature() bool + + // owner: @sttts, @nikhita + // alpha: v1.8 + // beta: v1.9 + // + // CustomResourceValidation is a list of validation methods for CustomResources + CustomResourceValidation utilfeature.Feature = "CustomResourceValidation" + + // owner: @sttts, @nikhita + // alpha: v1.10 + // beta: v1.11 + // + // CustomResourceSubresources defines the subresources for CustomResources + CustomResourceSubresources utilfeature.Feature = "CustomResourceSubresources" +) + +func init() { + utilfeature.DefaultFeatureGate.Add(defaultKubernetesFeatureGates) +} + +// defaultKubernetesFeatureGates consists of all known Kubernetes-specific feature keys. +// To add a new feature, define a key for it above and add it here. The features will be +// available throughout Kubernetes binaries. +var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureSpec{ + CustomResourceValidation: {Default: true, PreRelease: utilfeature.Beta}, + CustomResourceSubresources: {Default: true, PreRelease: utilfeature.Beta}, +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/BUILD b/vendor/k8s.io/apimachinery/pkg/api/errors/BUILD new file mode 100644 index 00000000..785e9d8a --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/errors/BUILD @@ -0,0 +1,47 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_test( + name = "go_default_test", + srcs = ["errors_test.go"], + embed = [":go_default_library"], + deps = [ + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", + ], +) + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "errors.go", + ], + importpath = "k8s.io/apimachinery/pkg/api/errors", + deps = [ + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/BUILD b/vendor/k8s.io/apimachinery/pkg/api/meta/BUILD new file mode 100644 index 00000000..a02a1fb5 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/BUILD @@ -0,0 +1,72 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_test( + name = "go_default_test", + srcs = [ + "meta_test.go", + "multirestmapper_test.go", + "priority_test.go", + "restmapper_test.go", + ], + embed = [":go_default_library"], + deps = [ + "//vendor/github.com/google/gofuzz:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library", + ], +) + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "errors.go", + "firsthit_restmapper.go", + "help.go", + "interfaces.go", + "lazy.go", + "meta.go", + "multirestmapper.go", + "priority.go", + "restmapper.go", + "unstructured.go", + ], + importpath = "k8s.io/apimachinery/pkg/api/meta", + deps = [ + "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//staging/src/k8s.io/apimachinery/pkg/api/meta/table:all-srcs", + ], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/interfaces.go b/vendor/k8s.io/apimachinery/pkg/api/meta/interfaces.go index 42eac3af..5dc9d89e 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/interfaces.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/interfaces.go @@ -23,6 +23,12 @@ import ( "k8s.io/apimachinery/pkg/types" ) +// VersionInterfaces contains the interfaces one should use for dealing with types of a particular version. +type VersionInterfaces struct { + runtime.ObjectConvertor + MetadataAccessor +} + type ListMetaAccessor interface { GetListMeta() List } @@ -86,19 +92,28 @@ const ( type RESTScope interface { // Name of the scope Name() RESTScopeName + // ParamName is the optional name of the parameter that should be inserted in the resource url + // If empty, no param will be inserted + ParamName() string + // ArgumentName is the optional name that should be used for the variable holding the value. + ArgumentName() string + // ParamDescription is the optional description to use to document the parameter in api documentation + ParamDescription() string } // RESTMapping contains the information needed to deal with objects of a specific // resource and kind in a RESTful manner. type RESTMapping struct { - // Resource is the GroupVersionResource (location) for this endpoint - Resource schema.GroupVersionResource + // Resource is a string representing the name of this resource as a REST client would see it + Resource string - // GroupVersionKind is the GroupVersionKind (data format) to submit to this endpoint GroupVersionKind schema.GroupVersionKind // Scope contains the information needed to deal with REST Resources that are in a resource hierarchy Scope RESTScope + + runtime.ObjectConvertor + MetadataAccessor } // RESTMapper allows clients to map resources to kind, and map kind and version diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/lazy.go b/vendor/k8s.io/apimachinery/pkg/api/meta/lazy.go index 431a0a63..7f92f39a 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/lazy.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/lazy.go @@ -19,25 +19,27 @@ package meta import ( "sync" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" ) // lazyObject defers loading the mapper and typer until necessary. type lazyObject struct { - loader func() (RESTMapper, error) + loader func() (RESTMapper, runtime.ObjectTyper, error) lock sync.Mutex loaded bool err error mapper RESTMapper + typer runtime.ObjectTyper } // NewLazyObjectLoader handles unrecoverable errors when creating a RESTMapper / ObjectTyper by // returning those initialization errors when the interface methods are invoked. This defers the // initialization and any server calls until a client actually needs to perform the action. -func NewLazyRESTMapperLoader(fn func() (RESTMapper, error)) RESTMapper { +func NewLazyObjectLoader(fn func() (RESTMapper, runtime.ObjectTyper, error)) (RESTMapper, runtime.ObjectTyper) { obj := &lazyObject{loader: fn} - return obj + return obj, obj } // init lazily loads the mapper and typer, returning an error if initialization has failed. @@ -47,12 +49,13 @@ func (o *lazyObject) init() error { if o.loaded { return o.err } - o.mapper, o.err = o.loader() + o.mapper, o.typer, o.err = o.loader() o.loaded = true return o.err } var _ RESTMapper = &lazyObject{} +var _ runtime.ObjectTyper = &lazyObject{} func (o *lazyObject) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { if err := o.init(); err != nil { @@ -102,3 +105,17 @@ func (o *lazyObject) ResourceSingularizer(resource string) (singular string, err } return o.mapper.ResourceSingularizer(resource) } + +func (o *lazyObject) ObjectKinds(obj runtime.Object) ([]schema.GroupVersionKind, bool, error) { + if err := o.init(); err != nil { + return nil, false, err + } + return o.typer.ObjectKinds(obj) +} + +func (o *lazyObject) Recognizes(gvk schema.GroupVersionKind) bool { + if err := o.init(); err != nil { + return false + } + return o.typer.Recognizes(gvk) +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go b/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go index 1c2a83cf..b9670071 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go @@ -38,6 +38,7 @@ var errNotCommon = fmt.Errorf("object does not implement the common interface fo // CommonAccessor returns a Common interface for the provided object or an error if the object does // not provide List. +// TODO: return bool instead of error func CommonAccessor(obj interface{}) (metav1.Common, error) { switch t := obj.(type) { case List: @@ -70,6 +71,7 @@ func CommonAccessor(obj interface{}) (metav1.Common, error) { // not provide List. // IMPORTANT: Objects are NOT a superset of lists. Do not use this check to determine whether an // object *is* a List. +// TODO: return bool instead of error func ListAccessor(obj interface{}) (List, error) { switch t := obj.(type) { case List: @@ -99,6 +101,7 @@ var errNotObject = fmt.Errorf("object does not implement the Object interfaces") // obj must be a pointer to an API type. An error is returned if the minimum // required fields are missing. Fields that are not required return the default // value and are a no-op if set. +// TODO: return bool instead of error func Accessor(obj interface{}) (metav1.Object, error) { switch t := obj.(type) { case metav1.Object: diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/priority.go b/vendor/k8s.io/apimachinery/pkg/api/meta/priority.go index fa11c580..df28e64f 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/priority.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/priority.go @@ -54,12 +54,12 @@ func (m PriorityRESTMapper) String() string { // ResourceFor finds all resources, then passes them through the ResourcePriority patterns to find a single matching hit. func (m PriorityRESTMapper) ResourceFor(partiallySpecifiedResource schema.GroupVersionResource) (schema.GroupVersionResource, error) { - originalGVRs, originalErr := m.Delegate.ResourcesFor(partiallySpecifiedResource) - if originalErr != nil && len(originalGVRs) == 0 { - return schema.GroupVersionResource{}, originalErr + originalGVRs, err := m.Delegate.ResourcesFor(partiallySpecifiedResource) + if err != nil { + return schema.GroupVersionResource{}, err } if len(originalGVRs) == 1 { - return originalGVRs[0], originalErr + return originalGVRs[0], nil } remainingGVRs := append([]schema.GroupVersionResource{}, originalGVRs...) @@ -77,7 +77,7 @@ func (m PriorityRESTMapper) ResourceFor(partiallySpecifiedResource schema.GroupV continue case 1: // one match, return - return matchedGVRs[0], originalErr + return matchedGVRs[0], nil default: // more than one match, use the matched hits as the list moving to the next pattern. // this way you can have a series of selection criteria @@ -90,12 +90,12 @@ func (m PriorityRESTMapper) ResourceFor(partiallySpecifiedResource schema.GroupV // KindFor finds all kinds, then passes them through the KindPriority patterns to find a single matching hit. func (m PriorityRESTMapper) KindFor(partiallySpecifiedResource schema.GroupVersionResource) (schema.GroupVersionKind, error) { - originalGVKs, originalErr := m.Delegate.KindsFor(partiallySpecifiedResource) - if originalErr != nil && len(originalGVKs) == 0 { - return schema.GroupVersionKind{}, originalErr + originalGVKs, err := m.Delegate.KindsFor(partiallySpecifiedResource) + if err != nil { + return schema.GroupVersionKind{}, err } if len(originalGVKs) == 1 { - return originalGVKs[0], originalErr + return originalGVKs[0], nil } remainingGVKs := append([]schema.GroupVersionKind{}, originalGVKs...) @@ -113,7 +113,7 @@ func (m PriorityRESTMapper) KindFor(partiallySpecifiedResource schema.GroupVersi continue case 1: // one match, return - return matchedGVKs[0], originalErr + return matchedGVKs[0], nil default: // more than one match, use the matched hits as the list moving to the next pattern. // this way you can have a series of selection criteria @@ -153,9 +153,9 @@ func kindMatches(pattern schema.GroupVersionKind, kind schema.GroupVersionKind) } func (m PriorityRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (mapping *RESTMapping, err error) { - mappings, originalErr := m.Delegate.RESTMappings(gk, versions...) - if originalErr != nil && len(mappings) == 0 { - return nil, originalErr + mappings, err := m.Delegate.RESTMappings(gk, versions...) + if err != nil { + return nil, err } // any versions the user provides take priority @@ -187,7 +187,7 @@ func (m PriorityRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) continue case 1: // one match, return - return matching[0], originalErr + return matching[0], nil default: // more than one match, use the matched hits as the list moving to the next pattern. // this way you can have a series of selection criteria @@ -195,7 +195,7 @@ func (m PriorityRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) } } if len(remaining) == 1 { - return remaining[0], originalErr + return remaining[0], nil } var kinds []schema.GroupVersionKind diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/priority_test.go b/vendor/k8s.io/apimachinery/pkg/api/meta/priority_test.go index fff1afd1..098d53bd 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/priority_test.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/priority_test.go @@ -34,30 +34,6 @@ func TestPriorityRESTMapperResourceForErrorHandling(t *testing.T) { result schema.GroupVersionResource err string }{ - { - name: "error", - delegate: fixedRESTMapper{err: errors.New("delegateError")}, - err: "delegateError", - }, - { - name: "single hit + error", - delegate: fixedRESTMapper{resourcesFor: []schema.GroupVersionResource{{Resource: "single-hit"}}, err: errors.New("delegateError")}, - result: schema.GroupVersionResource{Resource: "single-hit"}, - err: "delegateError", - }, - { - name: "group selection + error", - delegate: fixedRESTMapper{resourcesFor: []schema.GroupVersionResource{ - {Group: "one", Version: "a", Resource: "first"}, - {Group: "two", Version: "b", Resource: "second"}, - }, err: errors.New("delegateError")}, - resourcePatterns: []schema.GroupVersionResource{ - {Group: "one", Version: AnyVersion, Resource: AnyResource}, - }, - result: schema.GroupVersionResource{Group: "one", Version: "a", Resource: "first"}, - err: "delegateError", - }, - { name: "single hit", delegate: fixedRESTMapper{resourcesFor: []schema.GroupVersionResource{{Resource: "single-hit"}}}, @@ -130,10 +106,6 @@ func TestPriorityRESTMapperResourceForErrorHandling(t *testing.T) { if len(tc.err) == 0 && actualErr == nil { continue } - if len(tc.err) == 0 && actualErr != nil { - t.Errorf("%s: unexpected err: %v", tc.name, actualErr) - continue - } if len(tc.err) > 0 && actualErr == nil { t.Errorf("%s: missing expected err: %v", tc.name, tc.err) continue @@ -153,30 +125,6 @@ func TestPriorityRESTMapperKindForErrorHandling(t *testing.T) { result schema.GroupVersionKind err string }{ - { - name: "error", - delegate: fixedRESTMapper{err: errors.New("delegateErr")}, - err: "delegateErr", - }, - { - name: "single hit + error", - delegate: fixedRESTMapper{kindsFor: []schema.GroupVersionKind{{Kind: "single-hit"}}, err: errors.New("delegateErr")}, - result: schema.GroupVersionKind{Kind: "single-hit"}, - err: "delegateErr", - }, - { - name: "group selection + error", - delegate: fixedRESTMapper{kindsFor: []schema.GroupVersionKind{ - {Group: "one", Version: "a", Kind: "first"}, - {Group: "two", Version: "b", Kind: "second"}, - }, err: errors.New("delegateErr")}, - kindPatterns: []schema.GroupVersionKind{ - {Group: "one", Version: AnyVersion, Kind: AnyKind}, - }, - result: schema.GroupVersionKind{Group: "one", Version: "a", Kind: "first"}, - err: "delegateErr", - }, - { name: "single hit", delegate: fixedRESTMapper{kindsFor: []schema.GroupVersionKind{{Kind: "single-hit"}}}, @@ -249,10 +197,6 @@ func TestPriorityRESTMapperKindForErrorHandling(t *testing.T) { if len(tc.err) == 0 && actualErr == nil { continue } - if len(tc.err) == 0 && actualErr != nil { - t.Errorf("%s: unexpected err: %v", tc.name, actualErr) - continue - } if len(tc.err) > 0 && actualErr == nil { t.Errorf("%s: missing expected err: %v", tc.name, tc.err) continue @@ -304,13 +248,6 @@ func TestPriorityRESTMapperRESTMapping(t *testing.T) { input: schema.GroupKind{Kind: "Foo"}, err: errors.New("fail on this"), }, - { - name: "result + error", - mapper: PriorityRESTMapper{Delegate: fixedRESTMapper{mappings: []*RESTMapping{mapping1}, err: errors.New("fail on this")}}, - input: schema.GroupKind{Kind: "Foo"}, - result: mapping1, - err: errors.New("fail on this"), - }, { name: "return error for ambiguous", mapper: PriorityRESTMapper{ diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/restmapper.go b/vendor/k8s.io/apimachinery/pkg/api/meta/restmapper.go index 41b60d73..ff945acd 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/restmapper.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/restmapper.go @@ -28,15 +28,30 @@ import ( // Implements RESTScope interface type restScope struct { - name RESTScopeName + name RESTScopeName + paramName string + argumentName string + paramDescription string } func (r *restScope) Name() RESTScopeName { return r.name } +func (r *restScope) ParamName() string { + return r.paramName +} +func (r *restScope) ArgumentName() string { + return r.argumentName +} +func (r *restScope) ParamDescription() string { + return r.paramDescription +} var RESTScopeNamespace = &restScope{ - name: RESTScopeNameNamespace, + name: RESTScopeNameNamespace, + paramName: "namespaces", + argumentName: "namespace", + paramDescription: "object name and auth scope, such as for teams and projects", } var RESTScopeRoot = &restScope{ @@ -62,6 +77,8 @@ type DefaultRESTMapper struct { kindToScope map[schema.GroupVersionKind]RESTScope singularToPlural map[schema.GroupVersionResource]schema.GroupVersionResource pluralToSingular map[schema.GroupVersionResource]schema.GroupVersionResource + + interfacesFunc VersionInterfacesFunc } func (m *DefaultRESTMapper) String() string { @@ -70,12 +87,16 @@ func (m *DefaultRESTMapper) String() string { var _ RESTMapper = &DefaultRESTMapper{} +// VersionInterfacesFunc returns the appropriate typer, and metadata accessor for a +// given api version, or an error if no such api version exists. +type VersionInterfacesFunc func(version schema.GroupVersion) (*VersionInterfaces, error) + // NewDefaultRESTMapper initializes a mapping between Kind and APIVersion // to a resource name and back based on the objects in a runtime.Scheme // and the Kubernetes API conventions. Takes a group name, a priority list of the versions // to search when an object has no default version (set empty to return an error), // and a function that retrieves the correct metadata for a given version. -func NewDefaultRESTMapper(defaultGroupVersions []schema.GroupVersion) *DefaultRESTMapper { +func NewDefaultRESTMapper(defaultGroupVersions []schema.GroupVersion, f VersionInterfacesFunc) *DefaultRESTMapper { resourceToKind := make(map[schema.GroupVersionResource]schema.GroupVersionKind) kindToPluralResource := make(map[schema.GroupVersionKind]schema.GroupVersionResource) kindToScope := make(map[schema.GroupVersionKind]RESTScope) @@ -90,6 +111,7 @@ func NewDefaultRESTMapper(defaultGroupVersions []schema.GroupVersion) *DefaultRE defaultGroupVersions: defaultGroupVersions, singularToPlural: singularToPlural, pluralToSingular: pluralToSingular, + interfacesFunc: f, } } @@ -504,10 +526,18 @@ func (m *DefaultRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string return nil, fmt.Errorf("the provided version %q and kind %q cannot be mapped to a supported scope", gvk.GroupVersion(), gvk.Kind) } + interfaces, err := m.interfacesFunc(gvk.GroupVersion()) + if err != nil { + return nil, fmt.Errorf("the provided version %q has no relevant versions: %v", gvk.GroupVersion().String(), err) + } + mappings = append(mappings, &RESTMapping{ - Resource: res, + Resource: res.Resource, GroupVersionKind: gvk, Scope: scope, + + ObjectConvertor: interfaces.ObjectConvertor, + MetadataAccessor: interfaces.MetadataAccessor, }) } diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/restmapper_test.go b/vendor/k8s.io/apimachinery/pkg/api/meta/restmapper_test.go index 853404ad..1b1c70d6 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/restmapper_test.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/restmapper_test.go @@ -17,13 +17,42 @@ limitations under the License. package meta import ( + "errors" "reflect" "strings" "testing" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" ) +type fakeConvertor struct{} + +func (fakeConvertor) Convert(in, out, context interface{}) error { + return nil +} + +func (fakeConvertor) ConvertToVersion(in runtime.Object, _ runtime.GroupVersioner) (runtime.Object, error) { + return in, nil +} + +func (fakeConvertor) ConvertFieldLabel(version, kind, label, value string) (string, string, error) { + return label, value, nil +} + +var validAccessor = resourceAccessor{} +var validConvertor = fakeConvertor{} + +func fakeInterfaces(version schema.GroupVersion) (*VersionInterfaces, error) { + return &VersionInterfaces{ObjectConvertor: validConvertor, MetadataAccessor: validAccessor}, nil +} + +var unmatchedErr = errors.New("no version") + +func unmatchedVersionInterfaces(version schema.GroupVersion) (*VersionInterfaces, error) { + return nil, unmatchedErr +} + func TestRESTMapperVersionAndKindForResource(t *testing.T) { testGroup := "test.group" testVersion := "test" @@ -42,7 +71,7 @@ func TestRESTMapperVersionAndKindForResource(t *testing.T) { {Resource: schema.GroupVersionResource{Resource: "internalobjects"}, ExpectedGVK: testGroupVersion.WithKind("InternalObject")}, } for i, testCase := range testCases { - mapper := NewDefaultRESTMapper([]schema.GroupVersion{testGroupVersion}) + mapper := NewDefaultRESTMapper([]schema.GroupVersion{testGroupVersion}, fakeInterfaces) if len(testCase.ExpectedGVK.Kind) != 0 { mapper.Add(testCase.ExpectedGVK, RESTScopeNamespace) } @@ -75,7 +104,7 @@ func TestRESTMapperGroupForResource(t *testing.T) { {Resource: schema.GroupVersionResource{Resource: "myobje"}, Err: true, GroupVersionKind: schema.GroupVersionKind{Group: "testapi", Version: "test", Kind: "MyObject"}}, } for i, testCase := range testCases { - mapper := NewDefaultRESTMapper([]schema.GroupVersion{testCase.GroupVersionKind.GroupVersion()}) + mapper := NewDefaultRESTMapper([]schema.GroupVersion{testCase.GroupVersionKind.GroupVersion()}, fakeInterfaces) mapper.Add(testCase.GroupVersionKind, RESTScopeNamespace) actualGVK, err := mapper.KindFor(testCase.Resource) @@ -220,7 +249,7 @@ func TestRESTMapperKindsFor(t *testing.T) { } for _, testCase := range testCases { tcName := testCase.Name - mapper := NewDefaultRESTMapper(testCase.PreferredOrder) + mapper := NewDefaultRESTMapper(testCase.PreferredOrder, fakeInterfaces) for _, kind := range testCase.KindsToRegister { mapper.Add(kind, RESTScopeNamespace) } @@ -397,7 +426,7 @@ func TestRESTMapperResourcesFor(t *testing.T) { tcName := testCase.Name for _, partialResource := range []schema.GroupVersionResource{testCase.PluralPartialResourceToRequest, testCase.SingularPartialResourceToRequest} { - mapper := NewDefaultRESTMapper(testCase.PreferredOrder) + mapper := NewDefaultRESTMapper(testCase.PreferredOrder, fakeInterfaces) for _, kind := range testCase.KindsToRegister { mapper.Add(kind, RESTScopeNamespace) } @@ -482,7 +511,7 @@ func TestRESTMapperResourceSingularizer(t *testing.T) { {Kind: "lowercases", Plural: "lowercaseses", Singular: "lowercases"}, } for i, testCase := range testCases { - mapper := NewDefaultRESTMapper([]schema.GroupVersion{testGroupVersion}) + mapper := NewDefaultRESTMapper([]schema.GroupVersion{testGroupVersion}, fakeInterfaces) // create singular/plural mapping mapper.Add(testGroupVersion.WithKind(testCase.Kind), RESTScopeNamespace) @@ -506,7 +535,7 @@ func TestRESTMapperRESTMapping(t *testing.T) { APIGroupVersions []schema.GroupVersion DefaultVersions []schema.GroupVersion - Resource schema.GroupVersionResource + Resource string ExpectedGroupVersion *schema.GroupVersion Err bool }{ @@ -515,19 +544,19 @@ func TestRESTMapperRESTMapping(t *testing.T) { {DefaultVersions: []schema.GroupVersion{testGroupVersion}, Kind: "Unknown", Err: true}, - {DefaultVersions: []schema.GroupVersion{testGroupVersion}, Kind: "InternalObject", APIGroupVersions: []schema.GroupVersion{{Group: testGroup, Version: "test"}}, Resource: testGroupVersion.WithResource("internalobjects")}, - {DefaultVersions: []schema.GroupVersion{testGroupVersion}, Kind: "InternalObject", APIGroupVersions: []schema.GroupVersion{{Group: testGroup, Version: "test"}}, Resource: testGroupVersion.WithResource("internalobjects")}, + {DefaultVersions: []schema.GroupVersion{testGroupVersion}, Kind: "InternalObject", APIGroupVersions: []schema.GroupVersion{{Group: testGroup, Version: "test"}}, Resource: "internalobjects"}, + {DefaultVersions: []schema.GroupVersion{testGroupVersion}, Kind: "InternalObject", APIGroupVersions: []schema.GroupVersion{{Group: testGroup, Version: "test"}}, Resource: "internalobjects"}, - {DefaultVersions: []schema.GroupVersion{testGroupVersion}, Kind: "InternalObject", APIGroupVersions: []schema.GroupVersion{{Group: testGroup, Version: "test"}}, Resource: testGroupVersion.WithResource("internalobjects")}, + {DefaultVersions: []schema.GroupVersion{testGroupVersion}, Kind: "InternalObject", APIGroupVersions: []schema.GroupVersion{{Group: testGroup, Version: "test"}}, Resource: "internalobjects"}, - {DefaultVersions: []schema.GroupVersion{testGroupVersion}, Kind: "InternalObject", APIGroupVersions: []schema.GroupVersion{}, Resource: internalGroupVersion.WithResource("internalobjects"), ExpectedGroupVersion: &schema.GroupVersion{Group: testGroup, Version: "test"}}, + {DefaultVersions: []schema.GroupVersion{testGroupVersion}, Kind: "InternalObject", APIGroupVersions: []schema.GroupVersion{}, Resource: "internalobjects", ExpectedGroupVersion: &schema.GroupVersion{Group: testGroup, Version: "test"}}, - {DefaultVersions: []schema.GroupVersion{testGroupVersion}, Kind: "InternalObject", APIGroupVersions: []schema.GroupVersion{{Group: testGroup, Version: "test"}}, Resource: testGroupVersion.WithResource("internalobjects")}, + {DefaultVersions: []schema.GroupVersion{testGroupVersion}, Kind: "InternalObject", APIGroupVersions: []schema.GroupVersion{{Group: testGroup, Version: "test"}}, Resource: "internalobjects"}, // TODO: add test for a resource that exists in one version but not another } for i, testCase := range testCases { - mapper := NewDefaultRESTMapper(testCase.DefaultVersions) + mapper := NewDefaultRESTMapper(testCase.DefaultVersions, fakeInterfaces) mapper.Add(internalGroupVersion.WithKind("InternalObject"), RESTScopeNamespace) preferredVersions := []string{} @@ -548,6 +577,10 @@ func TestRESTMapperRESTMapping(t *testing.T) { t.Errorf("%d: unexpected resource: %#v", i, mapping) } + if mapping.MetadataAccessor == nil || mapping.ObjectConvertor == nil { + t.Errorf("%d: missing codec and accessor: %#v", i, mapping) + } + groupVersion := testCase.ExpectedGroupVersion if groupVersion == nil { groupVersion = &testCase.APIGroupVersions[0] @@ -566,7 +599,7 @@ func TestRESTMapperRESTMappingSelectsVersion(t *testing.T) { internalObjectGK := schema.GroupKind{Group: "tgroup", Kind: "InternalObject"} otherObjectGK := schema.GroupKind{Group: "tgroup", Kind: "OtherObject"} - mapper := NewDefaultRESTMapper([]schema.GroupVersion{expectedGroupVersion1, expectedGroupVersion2}) + mapper := NewDefaultRESTMapper([]schema.GroupVersion{expectedGroupVersion1, expectedGroupVersion2}, fakeInterfaces) mapper.Add(expectedGroupVersion1.WithKind("InternalObject"), RESTScopeNamespace) mapper.Add(expectedGroupVersion2.WithKind("OtherObject"), RESTScopeNamespace) @@ -575,7 +608,7 @@ func TestRESTMapperRESTMappingSelectsVersion(t *testing.T) { if err != nil { t.Fatalf("unexpected error: %v", err) } - if mapping.Resource != expectedGroupVersion2.WithResource("otherobjects") || mapping.GroupVersionKind.GroupVersion() != expectedGroupVersion2 { + if mapping.Resource != "otherobjects" || mapping.GroupVersionKind.GroupVersion() != expectedGroupVersion2 { t.Errorf("unexpected mapping: %#v", mapping) } @@ -583,7 +616,7 @@ func TestRESTMapperRESTMappingSelectsVersion(t *testing.T) { if err != nil { t.Fatalf("unexpected error: %v", err) } - if mapping.Resource != expectedGroupVersion1.WithResource("internalobjects") || mapping.GroupVersionKind.GroupVersion() != expectedGroupVersion1 { + if mapping.Resource != "internalobjects" || mapping.GroupVersionKind.GroupVersion() != expectedGroupVersion1 { t.Errorf("unexpected mapping: %#v", mapping) } @@ -613,7 +646,7 @@ func TestRESTMapperRESTMappingSelectsVersion(t *testing.T) { if err != nil { t.Fatalf("unexpected error: %v", err) } - if mapping.Resource != expectedGroupVersion2.WithResource("otherobjects") || mapping.GroupVersionKind.GroupVersion() != expectedGroupVersion2 { + if mapping.Resource != "otherobjects" || mapping.GroupVersionKind.GroupVersion() != expectedGroupVersion2 { t.Errorf("unexpected mapping: %#v", mapping) } } @@ -645,7 +678,7 @@ func TestRESTMapperRESTMappings(t *testing.T) { Kind: "InternalObject", APIGroupVersions: []schema.GroupVersion{{Group: testGroup, Version: "v2"}}, AddGroupVersionKind: []schema.GroupVersionKind{schema.GroupVersion{Group: testGroup, Version: "v2"}.WithKind("InternalObject")}, - ExpectedRESTMappings: []*RESTMapping{{Resource: schema.GroupVersionResource{Group: testGroup, Version: "v2", Resource: "internalobjects"}, GroupVersionKind: schema.GroupVersionKind{Group: testGroup, Version: "v2", Kind: "InternalObject"}}}, + ExpectedRESTMappings: []*RESTMapping{{Resource: "internalobjects", GroupVersionKind: schema.GroupVersionKind{Group: testGroup, Version: "v2", Kind: "InternalObject"}}}, }, // ask for specific versions - only one available - check ExpectedRESTMappings @@ -654,29 +687,20 @@ func TestRESTMapperRESTMappings(t *testing.T) { Kind: "InternalObject", APIGroupVersions: []schema.GroupVersion{{Group: testGroup, Version: "v3"}, {Group: testGroup, Version: "v2"}}, AddGroupVersionKind: []schema.GroupVersionKind{schema.GroupVersion{Group: testGroup, Version: "v2"}.WithKind("InternalObject")}, - ExpectedRESTMappings: []*RESTMapping{{Resource: schema.GroupVersionResource{Group: testGroup, Version: "v2", Resource: "internalobjects"}, GroupVersionKind: schema.GroupVersionKind{Group: testGroup, Version: "v2", Kind: "InternalObject"}}}, + ExpectedRESTMappings: []*RESTMapping{{Resource: "internalobjects", GroupVersionKind: schema.GroupVersionKind{Group: testGroup, Version: "v2", Kind: "InternalObject"}}}, }, // do not ask for specific version - search through default versions - check ExpectedRESTMappings { - DefaultVersions: []schema.GroupVersion{testGroupVersion, {Group: testGroup, Version: "v2"}}, - Kind: "InternalObject", - AddGroupVersionKind: []schema.GroupVersionKind{schema.GroupVersion{Group: testGroup, Version: "v1"}.WithKind("InternalObject"), schema.GroupVersion{Group: testGroup, Version: "v2"}.WithKind("InternalObject")}, - ExpectedRESTMappings: []*RESTMapping{ - { - Resource: schema.GroupVersionResource{Group: testGroup, Version: "v1", Resource: "internalobjects"}, - GroupVersionKind: schema.GroupVersionKind{Group: testGroup, Version: "v1", Kind: "InternalObject"}, - }, - { - Resource: schema.GroupVersionResource{Group: testGroup, Version: "v2", Resource: "internalobjects"}, - GroupVersionKind: schema.GroupVersionKind{Group: testGroup, Version: "v2", Kind: "InternalObject"}, - }, - }, + DefaultVersions: []schema.GroupVersion{testGroupVersion, {Group: testGroup, Version: "v2"}}, + Kind: "InternalObject", + AddGroupVersionKind: []schema.GroupVersionKind{schema.GroupVersion{Group: testGroup, Version: "v1"}.WithKind("InternalObject"), schema.GroupVersion{Group: testGroup, Version: "v2"}.WithKind("InternalObject")}, + ExpectedRESTMappings: []*RESTMapping{{Resource: "internalobjects", GroupVersionKind: schema.GroupVersionKind{Group: testGroup, Version: "v1", Kind: "InternalObject"}}, {Resource: "internalobjects", GroupVersionKind: schema.GroupVersionKind{Group: testGroup, Version: "v2", Kind: "InternalObject"}}}, }, } for i, testCase := range testCases { - mapper := NewDefaultRESTMapper(testCase.DefaultVersions) + mapper := NewDefaultRESTMapper(testCase.DefaultVersions, fakeInterfaces) for _, gvk := range testCase.AddGroupVersionKind { mapper.Add(gvk, RESTScopeNamespace) } @@ -703,6 +727,9 @@ func TestRESTMapperRESTMappings(t *testing.T) { if mapping.Resource != exp.Resource { t.Errorf("%d - %d: unexpected resource: %#v", i, j, mapping) } + if mapping.MetadataAccessor == nil || mapping.ObjectConvertor == nil { + t.Errorf("%d - %d: missing codec and accessor: %#v", i, j, mapping) + } if mapping.GroupVersionKind != exp.GroupVersionKind { t.Errorf("%d - %d: unexpected GroupVersionKind: %#v", i, j, mapping) } @@ -715,9 +742,9 @@ func TestRESTMapperReportsErrorOnBadVersion(t *testing.T) { expectedGroupVersion2 := schema.GroupVersion{Group: "tgroup", Version: "test2"} internalObjectGK := schema.GroupKind{Group: "tgroup", Kind: "InternalObject"} - mapper := NewDefaultRESTMapper([]schema.GroupVersion{expectedGroupVersion1, expectedGroupVersion2}) + mapper := NewDefaultRESTMapper([]schema.GroupVersion{expectedGroupVersion1, expectedGroupVersion2}, unmatchedVersionInterfaces) mapper.Add(expectedGroupVersion1.WithKind("InternalObject"), RESTScopeNamespace) - _, err := mapper.RESTMapping(internalObjectGK, "test3") + _, err := mapper.RESTMapping(internalObjectGK, expectedGroupVersion1.Version) if err == nil { t.Errorf("unexpected non-error") } diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/unstructured.go b/vendor/k8s.io/apimachinery/pkg/api/meta/unstructured.go new file mode 100644 index 00000000..4e13efea --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/unstructured.go @@ -0,0 +1,47 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package meta + +import ( + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// InterfacesForUnstructuredConversion returns VersionInterfaces suitable for +// dealing with unstructured.Unstructured objects and supports conversion +// from typed objects (provided by parent) to untyped objects. +func InterfacesForUnstructuredConversion(parent VersionInterfacesFunc) VersionInterfacesFunc { + return func(version schema.GroupVersion) (*VersionInterfaces, error) { + if i, err := parent(version); err == nil { + return &VersionInterfaces{ + ObjectConvertor: i.ObjectConvertor, + MetadataAccessor: NewAccessor(), + }, nil + } + return InterfacesForUnstructured(version) + } +} + +// InterfacesForUnstructured returns VersionInterfaces suitable for +// dealing with unstructured.Unstructured objects. It will return errors for +// other conversions. +func InterfacesForUnstructured(schema.GroupVersion) (*VersionInterfaces, error) { + return &VersionInterfaces{ + ObjectConvertor: &unstructured.UnstructuredObjectConverter{}, + MetadataAccessor: NewAccessor(), + }, nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/BUILD b/vendor/k8s.io/apimachinery/pkg/api/resource/BUILD new file mode 100644 index 00000000..57f85fb2 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/BUILD @@ -0,0 +1,69 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_test( + name = "go_default_test", + srcs = [ + "amount_test.go", + "math_test.go", + "quantity_proto_test.go", + "quantity_test.go", + "scale_int_test.go", + ], + embed = [":go_default_library"], + deps = [ + "//vendor/github.com/google/gofuzz:go_default_library", + "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/gopkg.in/inf.v0:go_default_library", + ], +) + +go_library( + name = "go_default_library", + srcs = [ + "amount.go", + "generated.pb.go", + "math.go", + "quantity.go", + "quantity_proto.go", + "scale_int.go", + "suffix.go", + "zz_generated.deepcopy.go", + ], + importpath = "k8s.io/apimachinery/pkg/api/resource", + deps = [ + "//vendor/github.com/gogo/protobuf/proto:go_default_library", + "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/gopkg.in/inf.v0:go_default_library", + ], +) + +go_test( + name = "go_default_xtest", + srcs = ["quantity_example_test.go"], + deps = ["//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) + +filegroup( + name = "go_default_library_protos", + srcs = ["generated.proto"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go index 083c8225..6de71e50 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto index 31a46a6d..40185777 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go index c3cd1396..6a8bb997 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go @@ -25,6 +25,8 @@ import ( "strconv" "strings" + flag "github.com/spf13/pflag" + inf "gopkg.in/inf.v0" ) @@ -745,3 +747,43 @@ func (q *Quantity) Copy() *Quantity { Format: q.Format, } } + +// qFlag is a helper type for the Flag function +type qFlag struct { + dest *Quantity +} + +// Sets the value of the internal Quantity. (used by flag & pflag) +func (qf qFlag) Set(val string) error { + q, err := ParseQuantity(val) + if err != nil { + return err + } + // This copy is OK because q will not be referenced again. + *qf.dest = q + return nil +} + +// Converts the value of the internal Quantity to a string. (used by flag & pflag) +func (qf qFlag) String() string { + return qf.dest.String() +} + +// States the type of flag this is (Quantity). (used by pflag) +func (qf qFlag) Type() string { + return "quantity" +} + +// QuantityFlag is a helper that makes a quantity flag (using standard flag package). +// Will panic if defaultValue is not a valid quantity. +func QuantityFlag(flagName, defaultValue, description string) *Quantity { + q := MustParse(defaultValue) + flag.Var(NewQuantityFlagValue(&q), flagName, description) + return &q +} + +// NewQuantityFlagValue returns an object that can be used to back a flag, +// pointing at the given Quantity variable. +func NewQuantityFlagValue(q *Quantity) flag.Value { + return qFlag{q} +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity_test.go b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity_test.go index 915b2444..74f091a3 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity_test.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity_test.go @@ -24,6 +24,7 @@ import ( "unicode" fuzz "github.com/google/gofuzz" + "github.com/spf13/pflag" inf "gopkg.in/inf.v0" ) @@ -1058,6 +1059,21 @@ func TestCopy(t *testing.T) { } } +func TestQFlagSet(t *testing.T) { + qf := qFlag{&Quantity{}} + qf.Set("1Ki") + if e, a := "1Ki", qf.String(); e != a { + t.Errorf("Unexpected result %v != %v", e, a) + } +} + +func TestQFlagIsPFlag(t *testing.T) { + var pfv pflag.Value = qFlag{} + if e, a := "quantity", pfv.Type(); e != a { + t.Errorf("Unexpected result %v != %v", e, a) + } +} + func TestSub(t *testing.T) { tests := []struct { a Quantity diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/api/resource/zz_generated.deepcopy.go index ab474079..fc36d981 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/BUILD b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/BUILD new file mode 100644 index 00000000..edb9a6ac --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/BUILD @@ -0,0 +1,59 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_test( + name = "go_default_test", + srcs = [ + "register_test.go", + "roundtrip_test.go", + ], + embed = [":go_default_library"], + deps = [ + "//vendor/k8s.io/apimachinery/pkg/api/testing/roundtrip:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/fuzzer:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library", + ], +) + +go_library( + name = "go_default_library", + srcs = [ + "conversion.go", + "doc.go", + "register.go", + "types.go", + "zz_generated.conversion.go", + "zz_generated.deepcopy.go", + ], + importpath = "k8s.io/apimachinery/pkg/apis/meta/internalversion", + deps = [ + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/conversion.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/conversion.go new file mode 100644 index 00000000..1ea8c137 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/conversion.go @@ -0,0 +1,77 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internalversion + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +func Convert_internalversion_ListOptions_To_v1_ListOptions(in *ListOptions, out *metav1.ListOptions, s conversion.Scope) error { + if err := metav1.Convert_fields_Selector_To_string(&in.FieldSelector, &out.FieldSelector, s); err != nil { + return err + } + if err := metav1.Convert_labels_Selector_To_string(&in.LabelSelector, &out.LabelSelector, s); err != nil { + return err + } + out.IncludeUninitialized = in.IncludeUninitialized + out.ResourceVersion = in.ResourceVersion + out.TimeoutSeconds = in.TimeoutSeconds + out.Watch = in.Watch + out.Limit = in.Limit + out.Continue = in.Continue + return nil +} + +func Convert_v1_ListOptions_To_internalversion_ListOptions(in *metav1.ListOptions, out *ListOptions, s conversion.Scope) error { + if err := metav1.Convert_string_To_fields_Selector(&in.FieldSelector, &out.FieldSelector, s); err != nil { + return err + } + if err := metav1.Convert_string_To_labels_Selector(&in.LabelSelector, &out.LabelSelector, s); err != nil { + return err + } + out.IncludeUninitialized = in.IncludeUninitialized + out.ResourceVersion = in.ResourceVersion + out.TimeoutSeconds = in.TimeoutSeconds + out.Watch = in.Watch + out.Limit = in.Limit + out.Continue = in.Continue + return nil +} + +func Convert_map_to_v1_LabelSelector(in *map[string]string, out *metav1.LabelSelector, s conversion.Scope) error { + if in == nil { + return nil + } + out = new(metav1.LabelSelector) + for labelKey, labelValue := range *in { + metav1.AddLabelToSelector(out, labelKey, labelValue) + } + return nil +} + +func Convert_v1_LabelSelector_to_map(in *metav1.LabelSelector, out *map[string]string, s conversion.Scope) error { + var err error + *out, err = metav1.LabelSelectorAsMap(in) + if err != nil { + err = field.Invalid(field.NewPath("labelSelector"), *in, fmt.Sprintf("cannot convert to old selector: %v", err)) + } + return err +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/doc.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/doc.go new file mode 100644 index 00000000..1e85c5c4 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:conversion-gen=k8s.io/apimachinery/pkg/apis/meta/v1 + +package internalversion diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/register.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/register.go new file mode 100644 index 00000000..4bde90b3 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/register.go @@ -0,0 +1,105 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internalversion + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" +) + +// GroupName is the group name for this API. +const GroupName = "meta.k8s.io" + +// Scheme is the registry for any type that adheres to the meta API spec. +var scheme = runtime.NewScheme() + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder runtime.SchemeBuilder + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Codecs provides access to encoding and decoding for the scheme. +var Codecs = serializer.NewCodecFactory(scheme) + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// ParameterCodec handles versioning of objects that are converted to query parameters. +var ParameterCodec = runtime.NewParameterCodec(scheme) + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// addToGroupVersion registers common meta types into schemas. +func addToGroupVersion(scheme *runtime.Scheme, groupVersion schema.GroupVersion) error { + if err := scheme.AddIgnoredConversionType(&metav1.TypeMeta{}, &metav1.TypeMeta{}); err != nil { + return err + } + scheme.AddConversionFuncs( + metav1.Convert_string_To_labels_Selector, + metav1.Convert_labels_Selector_To_string, + + metav1.Convert_string_To_fields_Selector, + metav1.Convert_fields_Selector_To_string, + + Convert_map_to_v1_LabelSelector, + Convert_v1_LabelSelector_to_map, + + Convert_internalversion_ListOptions_To_v1_ListOptions, + Convert_v1_ListOptions_To_internalversion_ListOptions, + ) + // ListOptions is the only options struct which needs conversion (it exposes labels and fields + // as selectors for convenience). The other types have only a single representation today. + scheme.AddKnownTypes(SchemeGroupVersion, + &ListOptions{}, + &metav1.GetOptions{}, + &metav1.ExportOptions{}, + &metav1.DeleteOptions{}, + ) + scheme.AddKnownTypes(SchemeGroupVersion, + &metav1beta1.Table{}, + &metav1beta1.TableOptions{}, + &metav1beta1.PartialObjectMetadata{}, + &metav1beta1.PartialObjectMetadataList{}, + ) + scheme.AddKnownTypes(metav1beta1.SchemeGroupVersion, + &metav1beta1.Table{}, + &metav1beta1.TableOptions{}, + &metav1beta1.PartialObjectMetadata{}, + &metav1beta1.PartialObjectMetadataList{}, + ) + // Allow delete options to be decoded across all version in this scheme (we may want to be more clever than this) + scheme.AddUnversionedTypes(SchemeGroupVersion, &metav1.DeleteOptions{}) + metav1.AddToGroupVersion(scheme, metav1.SchemeGroupVersion) + return nil +} + +// Unlike other API groups, meta internal knows about all meta external versions, but keeps +// the logic for conversion private. +func init() { + if err := addToGroupVersion(scheme, SchemeGroupVersion); err != nil { + panic(err) + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/register_test.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/register_test.go new file mode 100644 index 00000000..8116f807 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/register_test.go @@ -0,0 +1,89 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internalversion + +import ( + "net/url" + "reflect" + "testing" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/diff" +) + +func TestListOptions(t *testing.T) { + // verify round trip conversion + ten := int64(10) + in := &metav1.ListOptions{ + LabelSelector: "a=1", + FieldSelector: "b=1", + ResourceVersion: "10", + TimeoutSeconds: &ten, + Watch: true, + } + out := &ListOptions{} + if err := scheme.Convert(in, out, nil); err != nil { + t.Fatal(err) + } + actual := &metav1.ListOptions{} + if err := scheme.Convert(out, actual, nil); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(in, actual) { + t.Errorf("unexpected: %s", diff.ObjectReflectDiff(in, actual)) + } + + // verify failing conversion + for i, failingObject := range []*metav1.ListOptions{ + {LabelSelector: "a!!!"}, + {FieldSelector: "a!!!"}, + } { + out = &ListOptions{} + if err := scheme.Convert(failingObject, out, nil); err == nil { + t.Errorf("%d: unexpected conversion: %#v", i, out) + } + } + + // verify kind registration + if gvks, unversioned, err := scheme.ObjectKinds(in); err != nil || unversioned || gvks[0] != metav1.SchemeGroupVersion.WithKind("ListOptions") { + t.Errorf("unexpected: %v %v %v", gvks[0], unversioned, err) + } + if gvks, unversioned, err := scheme.ObjectKinds(out); err != nil || unversioned || gvks[0] != SchemeGroupVersion.WithKind("ListOptions") { + t.Errorf("unexpected: %v %v %v", gvks[0], unversioned, err) + } + + actual = &metav1.ListOptions{} + if err := ParameterCodec.DecodeParameters(url.Values{"watch": []string{"1"}}, metav1.SchemeGroupVersion, actual); err != nil { + t.Fatal(err) + } + if !actual.Watch { + t.Errorf("unexpected watch decode: %#v", actual) + } + + // check ParameterCodec + query, err := ParameterCodec.EncodeParameters(in, metav1.SchemeGroupVersion) + if err != nil { + t.Fatal(err) + } + actual = &metav1.ListOptions{} + if err := ParameterCodec.DecodeParameters(query, metav1.SchemeGroupVersion, actual); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(in, actual) { + t.Errorf("unexpected: %s", diff.ObjectReflectDiff(in, actual)) + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_test.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/roundtrip_test.go similarity index 62% rename from vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_test.go rename to vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/roundtrip_test.go index cbcbbcef..725aa316 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_test.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/roundtrip_test.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,19 +14,15 @@ See the License for the specific language governing permissions and limitations under the License. */ -package unstructured +package internalversion import ( "testing" - "github.com/stretchr/testify/assert" + "k8s.io/apimachinery/pkg/api/testing/roundtrip" + "k8s.io/apimachinery/pkg/apis/meta/fuzzer" ) -func TestNilUnstructuredContent(t *testing.T) { - var u Unstructured - uCopy := u.DeepCopy() - content := u.UnstructuredContent() - expContent := make(map[string]interface{}) - assert.EqualValues(t, expContent, content) - assert.Equal(t, uCopy, &u) +func TestRoundTrip(t *testing.T) { + roundtrip.RoundTripTestForScheme(t, scheme, fuzzer.Funcs) } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/types.go new file mode 100644 index 00000000..2f6740d3 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/types.go @@ -0,0 +1,70 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internalversion + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ListOptions is the query options to a standard REST list call. +type ListOptions struct { + metav1.TypeMeta + + // A selector based on labels + LabelSelector labels.Selector + // A selector based on fields + FieldSelector fields.Selector + // If true, partially initialized resources are included in the response. + // +optional + IncludeUninitialized bool + // If true, watch for changes to this list + Watch bool + // When specified with a watch call, shows changes that occur after that particular version of a resource. + // Defaults to changes from the beginning of history. + // When specified for list: + // - if unset, then the result is returned from remote storage based on quorum-read flag; + // - if it's 0, then we simply return what we currently have in cache, no guarantee; + // - if set to non zero, then the result is at least as fresh as given rv. + ResourceVersion string + // Timeout for the list/watch call. + TimeoutSeconds *int64 + // Limit specifies the maximum number of results to return from the server. The server may + // not support this field on all resource types, but if it does and more results remain it + // will set the continue field on the returned list object. + Limit int64 + // Continue is a token returned by the server that lets a client retrieve chunks of results + // from the server by specifying limit. The server may reject requests for continuation tokens + // it does not recognize and will return a 410 error if the token can no longer be used because + // it has expired. + Continue string +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// List holds a list of objects, which may not be known by the server. +type List struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []runtime.Object +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go new file mode 100644 index 00000000..c3fd40a9 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go @@ -0,0 +1,118 @@ +// +build !ignore_autogenerated + +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package internalversion + +import ( + unsafe "unsafe" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(scheme *runtime.Scheme) error { + return scheme.AddGeneratedConversionFuncs( + Convert_internalversion_List_To_v1_List, + Convert_v1_List_To_internalversion_List, + Convert_internalversion_ListOptions_To_v1_ListOptions, + Convert_v1_ListOptions_To_internalversion_ListOptions, + ) +} + +func autoConvert_internalversion_List_To_v1_List(in *List, out *v1.List, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]runtime.RawExtension, len(*in)) + for i := range *in { + if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_internalversion_List_To_v1_List is an autogenerated conversion function. +func Convert_internalversion_List_To_v1_List(in *List, out *v1.List, s conversion.Scope) error { + return autoConvert_internalversion_List_To_v1_List(in, out, s) +} + +func autoConvert_v1_List_To_internalversion_List(in *v1.List, out *List, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]runtime.Object, len(*in)) + for i := range *in { + if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1_List_To_internalversion_List is an autogenerated conversion function. +func Convert_v1_List_To_internalversion_List(in *v1.List, out *List, s conversion.Scope) error { + return autoConvert_v1_List_To_internalversion_List(in, out, s) +} + +func autoConvert_internalversion_ListOptions_To_v1_ListOptions(in *ListOptions, out *v1.ListOptions, s conversion.Scope) error { + if err := v1.Convert_labels_Selector_To_string(&in.LabelSelector, &out.LabelSelector, s); err != nil { + return err + } + if err := v1.Convert_fields_Selector_To_string(&in.FieldSelector, &out.FieldSelector, s); err != nil { + return err + } + out.IncludeUninitialized = in.IncludeUninitialized + out.Watch = in.Watch + out.ResourceVersion = in.ResourceVersion + out.TimeoutSeconds = (*int64)(unsafe.Pointer(in.TimeoutSeconds)) + out.Limit = in.Limit + out.Continue = in.Continue + return nil +} + +func autoConvert_v1_ListOptions_To_internalversion_ListOptions(in *v1.ListOptions, out *ListOptions, s conversion.Scope) error { + if err := v1.Convert_string_To_labels_Selector(&in.LabelSelector, &out.LabelSelector, s); err != nil { + return err + } + if err := v1.Convert_string_To_fields_Selector(&in.FieldSelector, &out.FieldSelector, s); err != nil { + return err + } + out.IncludeUninitialized = in.IncludeUninitialized + out.Watch = in.Watch + out.ResourceVersion = in.ResourceVersion + out.TimeoutSeconds = (*int64)(unsafe.Pointer(in.TimeoutSeconds)) + out.Limit = in.Limit + out.Continue = in.Continue + return nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go new file mode 100644 index 00000000..e4e5b017 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go @@ -0,0 +1,106 @@ +// +build !ignore_autogenerated + +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package internalversion + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *List) DeepCopyInto(out *List) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]runtime.Object, len(*in)) + for i := range *in { + if (*in)[i] == nil { + (*out)[i] = nil + } else { + (*out)[i] = (*in)[i].DeepCopyObject() + } + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new List. +func (in *List) DeepCopy() *List { + if in == nil { + return nil + } + out := new(List) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *List) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListOptions) DeepCopyInto(out *ListOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.LabelSelector == nil { + out.LabelSelector = nil + } else { + out.LabelSelector = in.LabelSelector.DeepCopySelector() + } + if in.FieldSelector == nil { + out.FieldSelector = nil + } else { + out.FieldSelector = in.FieldSelector.DeepCopySelector() + } + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListOptions. +func (in *ListOptions) DeepCopy() *ListOptions { + if in == nil { + return nil + } + out := new(ListOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ListOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/BUILD b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/BUILD new file mode 100644 index 00000000..186db187 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/BUILD @@ -0,0 +1,101 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_test( + name = "go_default_test", + srcs = [ + "controller_ref_test.go", + "duration_test.go", + "group_version_test.go", + "helpers_test.go", + "labels_test.go", + "micro_time_test.go", + "time_test.go", + "types_test.go", + ], + embed = [":go_default_library"], + deps = [ + "//vendor/github.com/ghodss/yaml:go_default_library", + "//vendor/github.com/json-iterator/go:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + ], +) + +go_library( + name = "go_default_library", + srcs = [ + "controller_ref.go", + "conversion.go", + "doc.go", + "duration.go", + "generated.pb.go", + "group_version.go", + "helpers.go", + "labels.go", + "meta.go", + "micro_time.go", + "micro_time_proto.go", + "register.go", + "time.go", + "time_proto.go", + "types.go", + "types_swagger_doc_generated.go", + "watch.go", + "zz_generated.deepcopy.go", + "zz_generated.defaults.go", + ], + importpath = "k8s.io/apimachinery/pkg/apis/meta/v1", + deps = [ + "//vendor/github.com/gogo/protobuf/proto:go_default_library", + "//vendor/github.com/gogo/protobuf/sortkeys:go_default_library", + "//vendor/github.com/google/gofuzz:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/selection:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:all-srcs", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/validation:all-srcs", + ], + tags = ["automanaged"], +) + +filegroup( + name = "go_default_library_protos", + srcs = ["generated.proto"], + visibility = ["//visibility:public"], +) + +go_test( + name = "go_default_xtest", + srcs = ["conversion_test.go"], + deps = [ + "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + ], +) diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go index c36fc655..cd651bcd 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go @@ -75,8 +75,6 @@ func AddConversionFuncs(scheme *runtime.Scheme) error { Convert_unversioned_LabelSelector_to_map, Convert_Slice_string_To_Slice_int32, - - Convert_Slice_string_To_v1_DeletionPropagation, ) } @@ -306,13 +304,3 @@ func Convert_Slice_string_To_Slice_int32(in *[]string, out *[]int32, s conversio } return nil } - -// Convert_Slice_string_To_v1_DeletionPropagation allows converting a URL query parameter propagationPolicy -func Convert_Slice_string_To_v1_DeletionPropagation(input *[]string, out *DeletionPropagation, s conversion.Scope) error { - if len(*input) > 0 { - *out = DeletionPropagation((*input)[0]) - } else { - *out = "" - } - return nil -} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion_test.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion_test.go index 4ff57fd8..bc591584 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion_test.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion_test.go @@ -47,38 +47,3 @@ func TestMapToLabelSelectorRoundTrip(t *testing.T) { } } } - -func TestConvertSliceStringToDeletionPropagation(t *testing.T) { - tcs := []struct { - Input []string - Output v1.DeletionPropagation - }{ - { - Input: nil, - Output: "", - }, - { - Input: []string{}, - Output: "", - }, - { - Input: []string{"foo"}, - Output: "foo", - }, - { - Input: []string{"bar", "foo"}, - Output: "bar", - }, - } - - for _, tc := range tcs { - var dp v1.DeletionPropagation - if err := v1.Convert_Slice_string_To_v1_DeletionPropagation(&tc.Input, &dp, nil); err != nil { - t.Errorf("Convert_Slice_string_To_v1_DeletionPropagation(%#v): %v", tc.Input, err) - continue - } - if !apiequality.Semantic.DeepEqual(dp, tc.Output) { - t.Errorf("slice string to DeletionPropagation conversion failed: got %v; want %v", dp, tc.Output) - } - } -} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go index 2eaabf07..fea458df 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go @@ -31,10 +31,7 @@ type Duration struct { // UnmarshalJSON implements the json.Unmarshaller interface. func (d *Duration) UnmarshalJSON(b []byte) error { var str string - err := json.Unmarshal(b, &str) - if err != nil { - return err - } + json.Unmarshal(b, &str) pd, err := time.ParseDuration(str) if err != nil { diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go index febace50..1fa478f5 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto index 4baf44f3..bd5abcb7 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -49,7 +49,6 @@ message APIGroup { // The server returns only those CIDRs that it thinks that the client can match. // For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. // Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP. - // +optional repeated ServerAddressByClientCIDR serverAddressByClientCIDRs = 4; } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go index 6f6c5111..7e5bc2d4 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go @@ -104,10 +104,7 @@ func (t *MicroTime) UnmarshalJSON(b []byte) error { } var str string - err := json.Unmarshal(b, &str) - if err != nil { - return err - } + json.Unmarshal(b, &str) pt, err := time.Parse(RFC3339Micro, str) if err != nil { diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go index efff656e..5041954f 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go @@ -106,10 +106,7 @@ func (t *Time) UnmarshalJSON(b []byte) error { } var str string - err := json.Unmarshal(b, &str) - if err != nil { - return err - } + json.Unmarshal(b, &str) pt, err := time.Parse(time.RFC3339, str) if err != nil { diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go index e93df184..917efb37 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go @@ -799,8 +799,7 @@ type APIGroup struct { // The server returns only those CIDRs that it thinks that the client can match. // For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. // Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP. - // +optional - ServerAddressByClientCIDRs []ServerAddressByClientCIDR `json:"serverAddressByClientCIDRs,omitempty" protobuf:"bytes,4,rep,name=serverAddressByClientCIDRs"` + ServerAddressByClientCIDRs []ServerAddressByClientCIDR `json:"serverAddressByClientCIDRs" protobuf:"bytes,4,rep,name=serverAddressByClientCIDRs"` } // ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match. diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go index f91d8a81..caf929ee 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2016 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -26,7 +26,7 @@ package v1 // // Those methods can be generated by using hack/update-generated-swagger-docs.sh -// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +// AUTO-GENERATED FUNCTIONS START HERE var map_APIGroup = map[string]string{ "": "APIGroup contains the name, the supported versions, and the preferred version of a group.", "name": "name is the name of the group.", diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/BUILD b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/BUILD new file mode 100644 index 00000000..22c1acee --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/BUILD @@ -0,0 +1,53 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_test( + name = "go_default_test", + srcs = [ + "helpers_test.go", + "unstructured_list_test.go", + ], + embed = [":go_default_library"], + deps = [ + "//vendor/github.com/stretchr/testify/assert:go_default_library", + "//vendor/github.com/stretchr/testify/require:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + ], +) + +go_library( + name = "go_default_library", + srcs = [ + "helpers.go", + "unstructured.go", + "unstructured_list.go", + "zz_generated.deepcopy.go", + ], + importpath = "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured", + deps = [ + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/json:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go index fc138e75..08705ac8 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go @@ -18,6 +18,7 @@ package unstructured import ( gojson "encoding/json" + "errors" "fmt" "io" "strings" @@ -33,17 +34,14 @@ import ( // Returns false if the value is missing. // No error is returned for a nil field. func NestedFieldCopy(obj map[string]interface{}, fields ...string) (interface{}, bool, error) { - val, found, err := NestedFieldNoCopy(obj, fields...) + val, found, err := nestedFieldNoCopy(obj, fields...) if !found || err != nil { return nil, found, err } return runtime.DeepCopyJSONValue(val), true, nil } -// NestedFieldNoCopy returns a reference to a nested field. -// Returns false if value is not found and an error if unable -// to traverse obj. -func NestedFieldNoCopy(obj map[string]interface{}, fields ...string) (interface{}, bool, error) { +func nestedFieldNoCopy(obj map[string]interface{}, fields ...string) (interface{}, bool, error) { var val interface{} = obj for i, field := range fields { @@ -62,7 +60,7 @@ func NestedFieldNoCopy(obj map[string]interface{}, fields ...string) (interface{ // NestedString returns the string value of a nested field. // Returns false if value is not found and an error if not a string. func NestedString(obj map[string]interface{}, fields ...string) (string, bool, error) { - val, found, err := NestedFieldNoCopy(obj, fields...) + val, found, err := nestedFieldNoCopy(obj, fields...) if !found || err != nil { return "", found, err } @@ -76,7 +74,7 @@ func NestedString(obj map[string]interface{}, fields ...string) (string, bool, e // NestedBool returns the bool value of a nested field. // Returns false if value is not found and an error if not a bool. func NestedBool(obj map[string]interface{}, fields ...string) (bool, bool, error) { - val, found, err := NestedFieldNoCopy(obj, fields...) + val, found, err := nestedFieldNoCopy(obj, fields...) if !found || err != nil { return false, found, err } @@ -90,7 +88,7 @@ func NestedBool(obj map[string]interface{}, fields ...string) (bool, bool, error // NestedFloat64 returns the float64 value of a nested field. // Returns false if value is not found and an error if not a float64. func NestedFloat64(obj map[string]interface{}, fields ...string) (float64, bool, error) { - val, found, err := NestedFieldNoCopy(obj, fields...) + val, found, err := nestedFieldNoCopy(obj, fields...) if !found || err != nil { return 0, found, err } @@ -104,7 +102,7 @@ func NestedFloat64(obj map[string]interface{}, fields ...string) (float64, bool, // NestedInt64 returns the int64 value of a nested field. // Returns false if value is not found and an error if not an int64. func NestedInt64(obj map[string]interface{}, fields ...string) (int64, bool, error) { - val, found, err := NestedFieldNoCopy(obj, fields...) + val, found, err := nestedFieldNoCopy(obj, fields...) if !found || err != nil { return 0, found, err } @@ -118,7 +116,7 @@ func NestedInt64(obj map[string]interface{}, fields ...string) (int64, bool, err // NestedStringSlice returns a copy of []string value of a nested field. // Returns false if value is not found and an error if not a []interface{} or contains non-string items in the slice. func NestedStringSlice(obj map[string]interface{}, fields ...string) ([]string, bool, error) { - val, found, err := NestedFieldNoCopy(obj, fields...) + val, found, err := nestedFieldNoCopy(obj, fields...) if !found || err != nil { return nil, found, err } @@ -140,7 +138,7 @@ func NestedStringSlice(obj map[string]interface{}, fields ...string) ([]string, // NestedSlice returns a deep copy of []interface{} value of a nested field. // Returns false if value is not found and an error if not a []interface{}. func NestedSlice(obj map[string]interface{}, fields ...string) ([]interface{}, bool, error) { - val, found, err := NestedFieldNoCopy(obj, fields...) + val, found, err := nestedFieldNoCopy(obj, fields...) if !found || err != nil { return nil, found, err } @@ -182,7 +180,7 @@ func NestedMap(obj map[string]interface{}, fields ...string) (map[string]interfa // nestedMapNoCopy returns a map[string]interface{} value of a nested field. // Returns false if value is not found and an error if not a map[string]interface{}. func nestedMapNoCopy(obj map[string]interface{}, fields ...string) (map[string]interface{}, bool, error) { - val, found, err := NestedFieldNoCopy(obj, fields...) + val, found, err := nestedFieldNoCopy(obj, fields...) if !found || err != nil { return nil, found, err } @@ -435,17 +433,43 @@ func (s unstructuredJSONScheme) decodeToList(data []byte, list *UnstructuredList return nil } -type JSONFallbackEncoder struct { - runtime.Encoder +// UnstructuredObjectConverter is an ObjectConverter for use with +// Unstructured objects. Since it has no schema or type information, +// it will only succeed for no-op conversions. This is provided as a +// sane implementation for APIs that require an object converter. +type UnstructuredObjectConverter struct{} + +func (UnstructuredObjectConverter) Convert(in, out, context interface{}) error { + unstructIn, ok := in.(*Unstructured) + if !ok { + return fmt.Errorf("input type %T in not valid for unstructured conversion", in) + } + + unstructOut, ok := out.(*Unstructured) + if !ok { + return fmt.Errorf("output type %T in not valid for unstructured conversion", out) + } + + // maybe deep copy the map? It is documented in the + // ObjectConverter interface that this function is not + // guaranteed to not mutate the input. Or maybe set the input + // object to nil. + unstructOut.Object = unstructIn.Object + return nil } -func (c JSONFallbackEncoder) Encode(obj runtime.Object, w io.Writer) error { - err := c.Encoder.Encode(obj, w) - if runtime.IsNotRegisteredError(err) { - switch obj.(type) { - case *Unstructured, *UnstructuredList: - return UnstructuredJSONScheme.Encode(obj, w) +func (UnstructuredObjectConverter) ConvertToVersion(in runtime.Object, target runtime.GroupVersioner) (runtime.Object, error) { + if kind := in.GetObjectKind().GroupVersionKind(); !kind.Empty() { + gvk, ok := target.KindForGroupVersionKinds([]schema.GroupVersionKind{kind}) + if !ok { + // TODO: should this be a typed error? + return nil, fmt.Errorf("%v is unstructured and is not suitable for converting to %q", kind, target) } + in.GetObjectKind().SetGroupVersionKind(gvk) } - return err + return in, nil +} + +func (UnstructuredObjectConverter) ConvertFieldLabel(version, kind, label, value string) (string, string, error) { + return "", "", errors.New("unstructured cannot convert field labels") } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers_test.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers_test.go index d9799621..9e774d1c 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers_test.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers_test.go @@ -58,79 +58,3 @@ func TestRemoveNestedField(t *testing.T) { RemoveNestedField(obj, "x") // Remove of a non-existent field assert.Empty(t, obj) } - -func TestNestedFieldNoCopy(t *testing.T) { - target := map[string]interface{}{"foo": "bar"} - - obj := map[string]interface{}{ - "a": map[string]interface{}{ - "b": target, - "c": nil, - "d": []interface{}{"foo"}, - }, - } - - // case 1: field exists and is non-nil - res, exists, err := NestedFieldNoCopy(obj, "a", "b") - assert.True(t, exists) - assert.Nil(t, err) - assert.Equal(t, target, res) - target["foo"] = "baz" - assert.Equal(t, target["foo"], res.(map[string]interface{})["foo"], "result should be a reference to the expected item") - - // case 2: field exists and is nil - res, exists, err = NestedFieldNoCopy(obj, "a", "c") - assert.True(t, exists) - assert.Nil(t, err) - assert.Nil(t, res) - - // case 3: error traversing obj - res, exists, err = NestedFieldNoCopy(obj, "a", "d", "foo") - assert.False(t, exists) - assert.NotNil(t, err) - assert.Nil(t, res) - - // case 4: field does not exist - res, exists, err = NestedFieldNoCopy(obj, "a", "e") - assert.False(t, exists) - assert.Nil(t, err) - assert.Nil(t, res) -} - -func TestNestedFieldCopy(t *testing.T) { - target := map[string]interface{}{"foo": "bar"} - - obj := map[string]interface{}{ - "a": map[string]interface{}{ - "b": target, - "c": nil, - "d": []interface{}{"foo"}, - }, - } - - // case 1: field exists and is non-nil - res, exists, err := NestedFieldCopy(obj, "a", "b") - assert.True(t, exists) - assert.Nil(t, err) - assert.Equal(t, target, res) - target["foo"] = "baz" - assert.NotEqual(t, target["foo"], res.(map[string]interface{})["foo"], "result should be a copy of the expected item") - - // case 2: field exists and is nil - res, exists, err = NestedFieldCopy(obj, "a", "c") - assert.True(t, exists) - assert.Nil(t, err) - assert.Nil(t, res) - - // case 3: error traversing obj - res, exists, err = NestedFieldCopy(obj, "a", "d", "foo") - assert.False(t, exists) - assert.NotNil(t, err) - assert.Nil(t, res) - - // case 4: field does not exist - res, exists, err = NestedFieldCopy(obj, "a", "e") - assert.False(t, exists) - assert.Nil(t, err) - assert.Nil(t, res) -} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go index 548a01e5..2a133304 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go @@ -58,26 +58,6 @@ func (obj *Unstructured) IsList() bool { _, ok = field.([]interface{}) return ok } -func (obj *Unstructured) ToList() (*UnstructuredList, error) { - if !obj.IsList() { - // return an empty list back - return &UnstructuredList{Object: obj.Object}, nil - } - - ret := &UnstructuredList{} - ret.Object = obj.Object - - err := obj.EachListItem(func(item runtime.Object) error { - castItem := item.(*Unstructured) - ret.Items = append(ret.Items, *castItem) - return nil - }) - if err != nil { - return nil, err - } - - return ret, nil -} func (obj *Unstructured) EachListItem(fn func(runtime.Object) error) error { field, ok := obj.Object["items"] @@ -102,7 +82,7 @@ func (obj *Unstructured) EachListItem(fn func(runtime.Object) error) error { func (obj *Unstructured) UnstructuredContent() map[string]interface{} { if obj.Object == nil { - return make(map[string]interface{}) + obj.Object = make(map[string]interface{}) } return obj.Object } @@ -158,7 +138,7 @@ func (u *Unstructured) setNestedMap(value map[string]string, fields ...string) { } func (u *Unstructured) GetOwnerReferences() []metav1.OwnerReference { - field, found, err := NestedFieldNoCopy(u.Object, "metadata", "ownerReferences") + field, found, err := nestedFieldNoCopy(u.Object, "metadata", "ownerReferences") if !found || err != nil { return nil } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go index bf3fd023..57d78a09 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go @@ -53,18 +53,19 @@ func (u *UnstructuredList) EachListItem(fn func(runtime.Object) error) error { } // UnstructuredContent returns a map contain an overlay of the Items field onto -// the Object field. Items always overwrites overlay. +// the Object field. Items always overwrites overlay. Changing "items" in the +// returned object will affect items in the underlying Items field, but changing +// the "items" slice itself will have no effect. +// TODO: expose SetUnstructuredContent on runtime.Unstructured that allows +// items to be changed. func (u *UnstructuredList) UnstructuredContent() map[string]interface{} { - out := make(map[string]interface{}, len(u.Object)+1) - - // shallow copy every property - for k, v := range u.Object { - out[k] = v + out := u.Object + if out == nil { + out = make(map[string]interface{}) } - items := make([]interface{}, len(u.Items)) for i, item := range u.Items { - items[i] = item.UnstructuredContent() + items[i] = item.Object } out["items"] = items return out diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/zz_generated.deepcopy.go index 9a9f25e8..1c185139 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go index 98dfea09..73308d86 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.defaults.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.defaults.go index cce2e603..40d9ab00 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.defaults.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.defaults.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/BUILD b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/BUILD new file mode 100644 index 00000000..05f33c5b --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/BUILD @@ -0,0 +1,45 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +filegroup( + name = "go_default_library_protos", + srcs = ["generated.proto"], + visibility = ["//visibility:public"], +) + +go_library( + name = "go_default_library", + srcs = [ + "conversion.go", + "deepcopy.go", + "doc.go", + "generated.pb.go", + "register.go", + "types.go", + "types_swagger_doc_generated.go", + "zz_generated.deepcopy.go", + "zz_generated.defaults.go", + ], + importpath = "k8s.io/apimachinery/pkg/apis/meta/v1beta1", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/gogo/protobuf/proto:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/deepcopy.go index 3b2bedd9..2dd440bb 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/deepcopy.go @@ -16,8 +16,6 @@ limitations under the License. package v1beta1 -import "k8s.io/apimachinery/pkg/runtime" - func (in *TableRow) DeepCopy() *TableRow { if in == nil { return nil @@ -28,7 +26,7 @@ func (in *TableRow) DeepCopy() *TableRow { if in.Cells != nil { out.Cells = make([]interface{}, len(in.Cells)) for i := range in.Cells { - out.Cells[i] = runtime.DeepCopyJSONValue(in.Cells[i]) + out.Cells[i] = deepCopyJSON(in.Cells[i]) } } @@ -42,3 +40,22 @@ func (in *TableRow) DeepCopy() *TableRow { in.Object.DeepCopyInto(&out.Object) return out } + +func deepCopyJSON(x interface{}) interface{} { + switch x := x.(type) { + case map[string]interface{}: + clone := make(map[string]interface{}, len(x)) + for k, v := range x { + clone[k] = deepCopyJSON(v) + } + return clone + case []interface{}: + clone := make([]interface{}, len(x)) + for i := range x { + clone[i] = deepCopyJSON(x[i]) + } + return clone + default: + return x + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go index 4e427b3b..dda05bea 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto index 472902ad..a9060bf9 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types.go index 344c533e..7b7c47d8 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types.go @@ -66,8 +66,8 @@ type TableColumnDefinition struct { // TableRow is an individual row in a table. // +protobuf=false type TableRow struct { - // cells will be as wide as headers and may contain strings, numbers (float64 or int64), booleans, simple - // maps, or lists, or null. See the type field of the column definition for a more detailed description. + // cells will be as wide as headers and may contain strings, numbers, booleans, simple maps, or lists, or + // null. See the type field of the column definition for a more detailed description. Cells []interface{} `json:"cells"` // conditions describe additional status of a row that are relevant for a human user. // +optional diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go index 7394535d..2680fbf7 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2016 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -26,7 +26,7 @@ package v1beta1 // // Those methods can be generated by using hack/update-generated-swagger-docs.sh -// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +// AUTO-GENERATED FUNCTIONS START HERE var map_PartialObjectMetadata = map[string]string{ "": "PartialObjectMetadata is a generic representation of any object with ObjectMeta. It allows clients to get access to a particular ObjectMeta schema without knowing the details of the version.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", @@ -80,7 +80,7 @@ func (TableOptions) SwaggerDoc() map[string]string { var map_TableRow = map[string]string{ "": "TableRow is an individual row in a table.", - "cells": "cells will be as wide as headers and may contain strings, numbers (float64 or int64), booleans, simple maps, or lists, or null. See the type field of the column definition for a more detailed description.", + "cells": "cells will be as wide as headers and may contain strings, numbers, booleans, simple maps, or lists, or null. See the type field of the column definition for a more detailed description.", "conditions": "conditions describe additional status of a row that are relevant for a human user.", "object": "This field contains the requested additional information about each object based on the includeObject policy when requesting the Table. If \"None\", this field is empty, if \"Object\" this will be the default serialization of the object for the current API version, and if \"Metadata\" (the default) will contain the object metadata. Check the returned kind and apiVersion of the object before parsing.", } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.deepcopy.go index 2e79a131..226995a2 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.defaults.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.defaults.go index 73e63fc1..544e569d 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.defaults.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.defaults.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/BUILD b/vendor/k8s.io/apimachinery/pkg/conversion/BUILD new file mode 100644 index 00000000..0d2cee72 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/conversion/BUILD @@ -0,0 +1,49 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_test( + name = "go_default_test", + srcs = [ + "converter_test.go", + "helper_test.go", + ], + embed = [":go_default_library"], + deps = [ + "//vendor/github.com/google/gofuzz:go_default_library", + "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library", + ], +) + +go_library( + name = "go_default_library", + srcs = [ + "converter.go", + "deep_equal.go", + "doc.go", + "helper.go", + ], + importpath = "k8s.io/apimachinery/pkg/conversion", + deps = ["//vendor/k8s.io/apimachinery/third_party/forked/golang/reflect:go_default_library"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//staging/src/k8s.io/apimachinery/pkg/conversion/queryparams:all-srcs", + ], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/BUILD b/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/BUILD new file mode 100644 index 00000000..81bacef7 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/BUILD @@ -0,0 +1,39 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_library( + name = "go_default_library", + srcs = [ + "convert.go", + "doc.go", + ], + importpath = "k8s.io/apimachinery/pkg/conversion/queryparams", +) + +go_test( + name = "go_default_xtest", + srcs = ["convert_test.go"], + deps = [ + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/conversion/queryparams:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go b/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go index b3804aa4..17b36661 100644 --- a/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go +++ b/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go @@ -174,9 +174,6 @@ func convertStruct(result url.Values, st reflect.Type, sv reflect.Value) { kind = ft.Kind() if !field.IsNil() { field = reflect.Indirect(field) - // If the field is non-nil, it should be added to params - // and the omitempty should be overwite to false - omitempty = false } } diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert_test.go b/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert_test.go index 1eed5e1f..b075debf 100644 --- a/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert_test.go +++ b/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert_test.go @@ -70,7 +70,6 @@ type childStructs struct { Follow bool `json:"follow,omitempty"` Previous bool `json:"previous,omitempty"` SinceSeconds *int64 `json:"sinceSeconds,omitempty"` - TailLines *int64 `json:"tailLines,omitempty"` SinceTime *metav1.Time `json:"sinceTime,omitempty"` EmptyTime *metav1.Time `json:"emptyTime"` NonPointerTime metav1.Time `json:"nonPointerTime"` @@ -100,7 +99,6 @@ func validateResult(t *testing.T, input interface{}, actual, expected url.Values func TestConvert(t *testing.T) { sinceSeconds := int64(123) - tailLines := int64(0) sinceTime := metav1.Date(2000, 1, 1, 12, 34, 56, 0, time.UTC) tests := []struct { @@ -184,7 +182,6 @@ func TestConvert(t *testing.T) { Follow: true, Previous: true, SinceSeconds: &sinceSeconds, - TailLines: nil, SinceTime: &sinceTime, // test a custom marshaller EmptyTime: nil, // test a nil custom marshaller without omitempty NonPointerTime: sinceTime, @@ -197,11 +194,10 @@ func TestConvert(t *testing.T) { Follow: true, Previous: true, SinceSeconds: &sinceSeconds, - TailLines: &tailLines, SinceTime: nil, // test a nil custom marshaller with omitempty NonPointerTime: sinceTime, }, - expected: url.Values{"container": {"mycontainer"}, "follow": {"true"}, "previous": {"true"}, "sinceSeconds": {"123"}, "tailLines": {"0"}, "emptyTime": {""}, "nonPointerTime": {"2000-01-01T12:34:56Z"}}, + expected: url.Values{"container": {"mycontainer"}, "follow": {"true"}, "previous": {"true"}, "sinceSeconds": {"123"}, "emptyTime": {""}, "nonPointerTime": {"2000-01-01T12:34:56Z"}}, }, } diff --git a/vendor/k8s.io/apimachinery/pkg/fields/BUILD b/vendor/k8s.io/apimachinery/pkg/fields/BUILD new file mode 100644 index 00000000..383448e0 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/fields/BUILD @@ -0,0 +1,41 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_test( + name = "go_default_test", + srcs = [ + "fields_test.go", + "selector_test.go", + ], + embed = [":go_default_library"], +) + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "fields.go", + "requirements.go", + "selector.go", + ], + importpath = "k8s.io/apimachinery/pkg/fields", + deps = ["//vendor/k8s.io/apimachinery/pkg/selection:go_default_library"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/apimachinery/pkg/fields/selector.go b/vendor/k8s.io/apimachinery/pkg/fields/selector.go index e3e4453b..3785d8c2 100644 --- a/vendor/k8s.io/apimachinery/pkg/fields/selector.go +++ b/vendor/k8s.io/apimachinery/pkg/fields/selector.go @@ -55,21 +55,6 @@ type Selector interface { DeepCopySelector() Selector } -type nothingSelector struct{} - -func (n nothingSelector) Matches(_ Fields) bool { return false } -func (n nothingSelector) Empty() bool { return false } -func (n nothingSelector) String() string { return "" } -func (n nothingSelector) Requirements() Requirements { return nil } -func (n nothingSelector) DeepCopySelector() Selector { return n } -func (n nothingSelector) RequiresExactMatch(field string) (value string, found bool) { return "", false } -func (n nothingSelector) Transform(fn TransformFunc) (Selector, error) { return n, nil } - -// Nothing returns a selector that matches no fields -func Nothing() Selector { - return nothingSelector{} -} - // Everything returns a selector that matches all fields. func Everything() Selector { return andTerm{} @@ -464,12 +449,6 @@ func OneTermEqualSelector(k, v string) Selector { return &hasTerm{field: k, value: v} } -// OneTermNotEqualSelector returns an object that matches objects where one field/field does not equal one value. -// Cannot return an error. -func OneTermNotEqualSelector(k, v string) Selector { - return ¬HasTerm{field: k, value: v} -} - // AndSelectors creates a selector that is the logical AND of all the given selectors func AndSelectors(selectors ...Selector) Selector { return andTerm(selectors) diff --git a/vendor/k8s.io/apimachinery/pkg/labels/BUILD b/vendor/k8s.io/apimachinery/pkg/labels/BUILD new file mode 100644 index 00000000..a78764f7 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/labels/BUILD @@ -0,0 +1,50 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_test( + name = "go_default_test", + srcs = [ + "labels_test.go", + "selector_test.go", + ], + embed = [":go_default_library"], + deps = [ + "//vendor/k8s.io/apimachinery/pkg/selection:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", + ], +) + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "labels.go", + "selector.go", + "zz_generated.deepcopy.go", + ], + importpath = "k8s.io/apimachinery/pkg/labels", + deps = [ + "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/selection:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/apimachinery/pkg/labels/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/labels/zz_generated.deepcopy.go index 4d482947..a536f9ec 100644 --- a/vendor/k8s.io/apimachinery/pkg/labels/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/labels/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/BUILD b/vendor/k8s.io/apimachinery/pkg/runtime/BUILD new file mode 100644 index 00000000..7a53fbc4 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/BUILD @@ -0,0 +1,98 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_test( + name = "go_default_test", + srcs = ["swagger_doc_generator_test.go"], + embed = [":go_default_library"], +) + +go_library( + name = "go_default_library", + srcs = [ + "codec.go", + "codec_check.go", + "conversion.go", + "converter.go", + "doc.go", + "embedded.go", + "error.go", + "extension.go", + "generated.pb.go", + "helper.go", + "interfaces.go", + "register.go", + "scheme.go", + "scheme_builder.go", + "swagger_doc_generator.go", + "types.go", + "types_proto.go", + "zz_generated.deepcopy.go", + ], + importpath = "k8s.io/apimachinery/pkg/runtime", + deps = [ + "//vendor/github.com/gogo/protobuf/proto:go_default_library", + "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/conversion/queryparams:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/json:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", + ], +) + +go_test( + name = "go_default_xtest", + srcs = [ + "conversion_test.go", + "converter_test.go", + "embedded_test.go", + "extension_test.go", + "scheme_test.go", + ], + deps = [ + "//vendor/github.com/google/gofuzz:go_default_library", + "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/github.com/stretchr/testify/assert:go_default_library", + "//vendor/github.com/stretchr/testify/require:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/testing:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/json:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:all-srcs", + "//staging/src/k8s.io/apimachinery/pkg/runtime/serializer:all-srcs", + "//staging/src/k8s.io/apimachinery/pkg/runtime/testing:all-srcs", + ], + tags = ["automanaged"], +) + +filegroup( + name = "go_default_library_protos", + srcs = ["generated.proto"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/converter.go b/vendor/k8s.io/apimachinery/pkg/runtime/converter.go index 291d7a4e..f6f7c10d 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/converter.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/converter.go @@ -73,6 +73,7 @@ var ( mapStringInterfaceType = reflect.TypeOf(map[string]interface{}{}) stringType = reflect.TypeOf(string("")) int64Type = reflect.TypeOf(int64(0)) + uint64Type = reflect.TypeOf(uint64(0)) float64Type = reflect.TypeOf(float64(0)) boolType = reflect.TypeOf(bool(false)) fieldCache = newFieldsCache() @@ -410,7 +411,8 @@ func (c *unstructuredConverter) ToUnstructured(obj interface{}) (map[string]inte var u map[string]interface{} var err error if unstr, ok := obj.(Unstructured); ok { - u = unstr.UnstructuredContent() + // UnstructuredContent() mutates the object so we need to make a copy first + u = unstr.DeepCopyObject().(Unstructured).UnstructuredContent() } else { t := reflect.TypeOf(obj) value := reflect.ValueOf(obj) @@ -437,32 +439,22 @@ func (c *unstructuredConverter) ToUnstructured(obj interface{}) (map[string]inte } // DeepCopyJSON deep copies the passed value, assuming it is a valid JSON representation i.e. only contains -// types produced by json.Unmarshal() and also int64. -// bool, int64, float64, string, []interface{}, map[string]interface{}, json.Number and nil +// types produced by json.Unmarshal(). func DeepCopyJSON(x map[string]interface{}) map[string]interface{} { return DeepCopyJSONValue(x).(map[string]interface{}) } // DeepCopyJSONValue deep copies the passed value, assuming it is a valid JSON representation i.e. only contains -// types produced by json.Unmarshal() and also int64. -// bool, int64, float64, string, []interface{}, map[string]interface{}, json.Number and nil +// types produced by json.Unmarshal(). func DeepCopyJSONValue(x interface{}) interface{} { switch x := x.(type) { case map[string]interface{}: - if x == nil { - // Typed nil - an interface{} that contains a type map[string]interface{} with a value of nil - return x - } clone := make(map[string]interface{}, len(x)) for k, v := range x { clone[k] = DeepCopyJSONValue(v) } return clone case []interface{}: - if x == nil { - // Typed nil - an interface{} that contains a type []interface{} with a value of nil - return x - } clone := make([]interface{}, len(x)) for i, v := range x { clone[i] = DeepCopyJSONValue(v) @@ -592,14 +584,10 @@ func toUnstructured(sv, dv reflect.Value) error { dv.Set(reflect.ValueOf(sv.Int())) return nil case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - uVal := sv.Uint() - if uVal > math.MaxInt64 { - return fmt.Errorf("unsigned value %d does not fit into int64 (overflow)", uVal) - } if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 { - dv.Set(reflect.New(int64Type)) + dv.Set(reflect.New(uint64Type)) } - dv.Set(reflect.ValueOf(int64(uVal))) + dv.Set(reflect.ValueOf(sv.Uint())) return nil case reflect.Float32, reflect.Float64: if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 { diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/error.go b/vendor/k8s.io/apimachinery/pkg/runtime/error.go index 77879660..86b24840 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/error.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/error.go @@ -41,18 +41,10 @@ func NewNotRegisteredErrForTarget(t reflect.Type, target GroupVersioner) error { return ¬RegisteredErr{t: t, target: target} } -func NewNotRegisteredGVKErrForTarget(gvk schema.GroupVersionKind, target GroupVersioner) error { - return ¬RegisteredErr{gvk: gvk, target: target} -} - func (k *notRegisteredErr) Error() string { if k.t != nil && k.target != nil { return fmt.Sprintf("%v is not suitable for converting to %q", k.t, k.target) } - nullGVK := schema.GroupVersionKind{} - if k.gvk != nullGVK && k.target != nil { - return fmt.Sprintf("%q is not suitable for converting to %q", k.gvk.GroupVersion(), k.target) - } if k.t != nil { return fmt.Sprintf("no kind is registered for the type %v", k.t) } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go index 9bcbd722..f561fd47 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto b/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto index 2ff38391..02e388e9 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go b/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go index ba48e614..9d00f165 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go @@ -174,16 +174,13 @@ type ObjectVersioner interface { // ObjectConvertor converts an object to a different version. type ObjectConvertor interface { - // Convert attempts to convert one object into another, or returns an error. This - // method does not mutate the in object, but the in and out object might share data structures, - // i.e. the out object cannot be mutated without mutating the in object as well. - // The context argument will be passed to all nested conversions. + // Convert attempts to convert one object into another, or returns an error. This method does + // not guarantee the in object is not mutated. The context argument will be passed to + // all nested conversions. Convert(in, out, context interface{}) error // ConvertToVersion takes the provided object and converts it the provided version. This - // method does not mutate the in object, but the in and out object might share data structures, - // i.e. the out object cannot be mutated without mutating the in object as well. - // This method is similar to Convert() but handles specific details of choosing the correct - // output version. + // method does not guarantee that the in object is not mutated. This method is similar to + // Convert() but handles specific details of choosing the correct output version. ConvertToVersion(in Object, gv GroupVersioner) (out Object, err error) ConvertFieldLabel(version, kind, label, value string) (string, string, error) } @@ -237,9 +234,9 @@ type Object interface { // to JSON allowed. type Unstructured interface { Object - // UnstructuredContent returns a non-nil map with this object's contents. Values may be + // UnstructuredContent returns a non-nil, mutable map of the contents of this object. Values may be // []interface{}, map[string]interface{}, or any primitive type. Contents are typically serialized to - // and from JSON. SetUnstructuredContent should be used to mutate the contents. + // and from JSON. UnstructuredContent() map[string]interface{} // SetUnstructuredContent updates the object content to match the provided map. SetUnstructuredContent(map[string]interface{}) diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/local_scheme_test.go b/vendor/k8s.io/apimachinery/pkg/runtime/local_scheme_test.go deleted file mode 100644 index 45a0dde5..00000000 --- a/vendor/k8s.io/apimachinery/pkg/runtime/local_scheme_test.go +++ /dev/null @@ -1,150 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package runtime - -import ( - "testing" - - "reflect" - - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/diff" -) - -func TestPreferredVersionsAllGroups(t *testing.T) { - tests := []struct { - name string - versionPriority map[string][]string - observedVersions []schema.GroupVersion - expectedPrioritized map[string][]schema.GroupVersion - expectedPreferred map[schema.GroupVersion]bool - }{ - { - name: "observedOnly", - observedVersions: []schema.GroupVersion{ - {Group: "", Version: "v3"}, - {Group: "foo", Version: "v1"}, - {Group: "foo", Version: "v2"}, - {Group: "", Version: "v1"}, - }, - expectedPrioritized: map[string][]schema.GroupVersion{ - "": { - {Group: "", Version: "v3"}, - {Group: "", Version: "v1"}, - }, - "foo": { - {Group: "foo", Version: "v1"}, - {Group: "foo", Version: "v2"}, - }, - }, - expectedPreferred: map[schema.GroupVersion]bool{ - {Group: "", Version: "v3"}: true, - {Group: "foo", Version: "v1"}: true, - }, - }, - { - name: "specifiedOnly", - versionPriority: map[string][]string{ - "": {"v3", "v1"}, - "foo": {"v1", "v2"}, - }, - expectedPrioritized: map[string][]schema.GroupVersion{ - "": { - {Group: "", Version: "v3"}, - {Group: "", Version: "v1"}, - }, - "foo": { - {Group: "foo", Version: "v1"}, - {Group: "foo", Version: "v2"}, - }, - }, - expectedPreferred: map[schema.GroupVersion]bool{ - {Group: "", Version: "v3"}: true, - {Group: "foo", Version: "v1"}: true, - }, - }, - { - name: "both", - versionPriority: map[string][]string{ - "": {"v3", "v1"}, - "foo": {"v1", "v2"}, - }, - observedVersions: []schema.GroupVersion{ - {Group: "", Version: "v1"}, - {Group: "", Version: "v3"}, - {Group: "", Version: "v4"}, - {Group: "", Version: "v5"}, - {Group: "bar", Version: "v1"}, - {Group: "bar", Version: "v2"}, - }, - expectedPrioritized: map[string][]schema.GroupVersion{ - "": { - {Group: "", Version: "v3"}, - {Group: "", Version: "v1"}, - {Group: "", Version: "v4"}, - {Group: "", Version: "v5"}, - }, - "foo": { - {Group: "foo", Version: "v1"}, - {Group: "foo", Version: "v2"}, - }, - "bar": { - {Group: "bar", Version: "v1"}, - {Group: "bar", Version: "v2"}, - }, - }, - expectedPreferred: map[schema.GroupVersion]bool{ - {Group: "", Version: "v3"}: true, - {Group: "foo", Version: "v1"}: true, - {Group: "bar", Version: "v1"}: true, - }, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - scheme := NewScheme() - scheme.versionPriority = test.versionPriority - scheme.observedVersions = test.observedVersions - - for group, expected := range test.expectedPrioritized { - actual := scheme.PrioritizedVersionsForGroup(group) - if !reflect.DeepEqual(expected, actual) { - t.Error(diff.ObjectDiff(expected, actual)) - } - } - - prioritizedAll := scheme.PrioritizedVersionsAllGroups() - actualPrioritizedAll := map[string][]schema.GroupVersion{} - for _, actual := range prioritizedAll { - actualPrioritizedAll[actual.Group] = append(actualPrioritizedAll[actual.Group], actual) - } - if !reflect.DeepEqual(test.expectedPrioritized, actualPrioritizedAll) { - t.Error(diff.ObjectDiff(test.expectedPrioritized, actualPrioritizedAll)) - } - - preferredAll := scheme.PreferredVersionAllGroups() - actualPreferredAll := map[schema.GroupVersion]bool{} - for _, actual := range preferredAll { - actualPreferredAll[actual] = true - } - if !reflect.DeepEqual(test.expectedPreferred, actualPreferredAll) { - t.Error(diff.ObjectDiff(test.expectedPreferred, actualPreferredAll)) - } - }) - } -} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/BUILD b/vendor/k8s.io/apimachinery/pkg/runtime/schema/BUILD new file mode 100644 index 00000000..e8197721 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/schema/BUILD @@ -0,0 +1,43 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_test( + name = "go_default_test", + srcs = ["group_version_test.go"], + embed = [":go_default_library"], +) + +go_library( + name = "go_default_library", + srcs = [ + "generated.pb.go", + "group_version.go", + "interfaces.go", + ], + importpath = "k8s.io/apimachinery/pkg/runtime/schema", + deps = ["//vendor/github.com/gogo/protobuf/proto:go_default_library"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) + +filegroup( + name = "go_default_library_protos", + srcs = ["generated.proto"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go index 46c85366..5357628a 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto index 8655f481..50c2f2a6 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version_test.go b/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version_test.go index 3934bc45..51f26df7 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version_test.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version_test.go @@ -134,47 +134,3 @@ func TestKindForGroupVersionKinds(t *testing.T) { } } } - -func TestParseKindArg(t *testing.T) { - tests := []struct { - input string - gvk *GroupVersionKind - gk GroupKind - }{ - {input: "Pod", gk: GroupKind{Kind: "Pod"}}, - {input: ".apps", gk: GroupKind{Group: "apps"}}, - {input: "Pod.", gk: GroupKind{Kind: "Pod"}}, - {input: "StatefulSet.apps", gk: GroupKind{Group: "apps", Kind: "StatefulSet"}}, - {input: "StatefulSet.v1.apps", gvk: &GroupVersionKind{Group: "apps", Version: "v1", Kind: "StatefulSet"}, gk: GroupKind{Group: "v1.apps", Kind: "StatefulSet"}}, - } - for i, test := range tests { - t.Run(test.input, func(t *testing.T) { - gvk, gk := ParseKindArg(test.input) - if (gvk != nil && test.gvk == nil) || (gvk == nil && test.gvk != nil) || (test.gvk != nil && *gvk != *test.gvk) { - t.Errorf("%d: expected output: %#v, got: %#v", i, test.gvk, gvk) - } - if gk != test.gk { - t.Errorf("%d: expected output: %#v, got: %#v", i, test.gk, gk) - } - }) - } -} - -func TestParseGroupKind(t *testing.T) { - tests := []struct { - input string - out GroupKind - }{ - {input: "Pod", out: GroupKind{Kind: "Pod"}}, - {input: ".StatefulSet", out: GroupKind{Group: "StatefulSet"}}, - {input: "StatefulSet.apps", out: GroupKind{Group: "apps", Kind: "StatefulSet"}}, - } - for i, test := range tests { - t.Run(test.input, func(t *testing.T) { - out := ParseGroupKind(test.input) - if out != test.out { - t.Errorf("%d: expected output: %#v, got: %#v", i, test.out, out) - } - }) - } -} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go b/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go index 59163d77..3d94a304 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go @@ -21,11 +21,8 @@ import ( "net/url" "reflect" - "strings" - "k8s.io/apimachinery/pkg/conversion" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/sets" ) // Scheme defines methods for serializing and deserializing API objects, a type @@ -71,13 +68,6 @@ type Scheme struct { // converter stores all registered conversion functions. It also has // default coverting behavior. converter *conversion.Converter - - // versionPriority is a map of groups to ordered lists of versions for those groups indicating the - // default priorities of these versions as registered in the scheme - versionPriority map[string][]string - - // observedVersions keeps track of the order we've seen versions during type registration - observedVersions []schema.GroupVersion } // Function to convert a field selector to internal representation. @@ -92,7 +82,6 @@ func NewScheme() *Scheme { unversionedKinds: map[string]reflect.Type{}, fieldLabelConversionFuncs: map[string]map[string]FieldLabelConversionFunc{}, defaulterFuncs: map[reflect.Type]func(interface{}){}, - versionPriority: map[string][]string{}, } s.converter = conversion.NewConverter(s.nameFunc) @@ -122,7 +111,7 @@ func (s *Scheme) nameFunc(t reflect.Type) string { for _, gvk := range gvks { internalGV := gvk.GroupVersion() - internalGV.Version = APIVersionInternal // this is hacky and maybe should be passed in + internalGV.Version = "__internal" // this is hacky and maybe should be passed in internalGVK := internalGV.WithKind(gvk.Kind) if internalType, exists := s.gvkToType[internalGVK]; exists { @@ -152,7 +141,6 @@ func (s *Scheme) Converter() *conversion.Converter { // TODO: there is discussion about removing unversioned and replacing it with objects that are manifest into // every version with particular schemas. Resolve this method at that point. func (s *Scheme) AddUnversionedTypes(version schema.GroupVersion, types ...Object) { - s.addObservedVersion(version) s.AddKnownTypes(version, types...) for _, obj := range types { t := reflect.TypeOf(obj).Elem() @@ -170,7 +158,6 @@ func (s *Scheme) AddUnversionedTypes(version schema.GroupVersion, types ...Objec // the struct becomes the "kind" field when encoding. Version may not be empty - use the // APIVersionInternal constant if you have a type that does not have a formal version. func (s *Scheme) AddKnownTypes(gv schema.GroupVersion, types ...Object) { - s.addObservedVersion(gv) for _, obj := range types { t := reflect.TypeOf(obj) if t.Kind() != reflect.Ptr { @@ -186,7 +173,6 @@ func (s *Scheme) AddKnownTypes(gv schema.GroupVersion, types ...Object) { // your structs. Version may not be empty - use the APIVersionInternal constant if you have a // type that does not have a formal version. func (s *Scheme) AddKnownTypeWithName(gvk schema.GroupVersionKind, obj Object) { - s.addObservedVersion(gvk.GroupVersion()) t := reflect.TypeOf(obj) if len(gvk.Version) == 0 { panic(fmt.Sprintf("version is required on all types: %s %v", gvk, t)) @@ -634,133 +620,3 @@ func setTargetKind(obj Object, kind schema.GroupVersionKind) { } obj.GetObjectKind().SetGroupVersionKind(kind) } - -// SetVersionPriority allows specifying a precise order of priority. All specified versions must be in the same group, -// and the specified order overwrites any previously specified order for this group -func (s *Scheme) SetVersionPriority(versions ...schema.GroupVersion) error { - groups := sets.String{} - order := []string{} - for _, version := range versions { - if len(version.Version) == 0 || version.Version == APIVersionInternal { - return fmt.Errorf("internal versions cannot be prioritized: %v", version) - } - - groups.Insert(version.Group) - order = append(order, version.Version) - } - if len(groups) != 1 { - return fmt.Errorf("must register versions for exactly one group: %v", strings.Join(groups.List(), ", ")) - } - - s.versionPriority[groups.List()[0]] = order - return nil -} - -// PrioritizedVersionsForGroup returns versions for a single group in priority order -func (s *Scheme) PrioritizedVersionsForGroup(group string) []schema.GroupVersion { - ret := []schema.GroupVersion{} - for _, version := range s.versionPriority[group] { - ret = append(ret, schema.GroupVersion{Group: group, Version: version}) - } - for _, observedVersion := range s.observedVersions { - if observedVersion.Group != group { - continue - } - found := false - for _, existing := range ret { - if existing == observedVersion { - found = true - break - } - } - if !found { - ret = append(ret, observedVersion) - } - } - - return ret -} - -// PrioritizedVersionsAllGroups returns all known versions in their priority order. Groups are random, but -// versions for a single group are prioritized -func (s *Scheme) PrioritizedVersionsAllGroups() []schema.GroupVersion { - ret := []schema.GroupVersion{} - for group, versions := range s.versionPriority { - for _, version := range versions { - ret = append(ret, schema.GroupVersion{Group: group, Version: version}) - } - } - for _, observedVersion := range s.observedVersions { - found := false - for _, existing := range ret { - if existing == observedVersion { - found = true - break - } - } - if !found { - ret = append(ret, observedVersion) - } - } - return ret -} - -// PreferredVersionAllGroups returns the most preferred version for every group. -// group ordering is random. -func (s *Scheme) PreferredVersionAllGroups() []schema.GroupVersion { - ret := []schema.GroupVersion{} - for group, versions := range s.versionPriority { - for _, version := range versions { - ret = append(ret, schema.GroupVersion{Group: group, Version: version}) - break - } - } - for _, observedVersion := range s.observedVersions { - found := false - for _, existing := range ret { - if existing.Group == observedVersion.Group { - found = true - break - } - } - if !found { - ret = append(ret, observedVersion) - } - } - - return ret -} - -// IsGroupRegistered returns true if types for the group have been registered with the scheme -func (s *Scheme) IsGroupRegistered(group string) bool { - for _, observedVersion := range s.observedVersions { - if observedVersion.Group == group { - return true - } - } - return false -} - -// IsVersionRegistered returns true if types for the version have been registered with the scheme -func (s *Scheme) IsVersionRegistered(version schema.GroupVersion) bool { - for _, observedVersion := range s.observedVersions { - if observedVersion == version { - return true - } - } - - return false -} - -func (s *Scheme) addObservedVersion(version schema.GroupVersion) { - if len(version.Version) == 0 || version.Version == APIVersionInternal { - return - } - for _, observedVersion := range s.observedVersions { - if observedVersion == version { - return - } - } - - s.observedVersions = append(s.observedVersions, version) -} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/BUILD b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/BUILD new file mode 100644 index 00000000..bc4cf8ec --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/BUILD @@ -0,0 +1,64 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_test( + name = "go_default_test", + srcs = ["codec_test.go"], + embed = [":go_default_library"], + deps = [ + "//vendor/github.com/ghodss/yaml:go_default_library", + "//vendor/github.com/google/gofuzz:go_default_library", + "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/serializer/testing:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library", + ], +) + +go_library( + name = "go_default_library", + srcs = [ + "codec_factory.go", + "negotiated_codec.go", + "protobuf_extension.go", + ], + importpath = "k8s.io/apimachinery/pkg/runtime/serializer", + deps = [ + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/serializer/json:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//staging/src/k8s.io/apimachinery/pkg/runtime/serializer/json:all-srcs", + "//staging/src/k8s.io/apimachinery/pkg/runtime/serializer/protobuf:all-srcs", + "//staging/src/k8s.io/apimachinery/pkg/runtime/serializer/recognizer:all-srcs", + "//staging/src/k8s.io/apimachinery/pkg/runtime/serializer/streaming:all-srcs", + "//staging/src/k8s.io/apimachinery/pkg/runtime/serializer/testing:all-srcs", + "//staging/src/k8s.io/apimachinery/pkg/runtime/serializer/versioning:all-srcs", + "//staging/src/k8s.io/apimachinery/pkg/runtime/serializer/yaml:all-srcs", + ], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/BUILD b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/BUILD new file mode 100644 index 00000000..7be13ed4 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/BUILD @@ -0,0 +1,55 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_test( + name = "go_default_test", + srcs = ["meta_test.go"], + embed = [":go_default_library"], +) + +go_library( + name = "go_default_library", + srcs = [ + "json.go", + "meta.go", + ], + importpath = "k8s.io/apimachinery/pkg/runtime/serializer/json", + deps = [ + "//vendor/github.com/ghodss/yaml:go_default_library", + "//vendor/github.com/json-iterator/go:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/framer:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/yaml:go_default_library", + ], +) + +go_test( + name = "go_default_xtest", + srcs = ["json_test.go"], + deps = [ + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/serializer/json:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go index f7738d11..2b795b5b 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go @@ -75,6 +75,11 @@ func init() { case jsoniter.NumberValue: var number json.Number iter.ReadVal(&number) + u64, err := strconv.ParseUint(string(number), 10, 64) + if err == nil { + *(*interface{})(ptr) = u64 + return + } i64, err := strconv.ParseInt(string(number), 10, 64) if err == nil { *(*interface{})(ptr) = i64 diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/BUILD b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/BUILD new file mode 100644 index 00000000..3eb91d86 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/BUILD @@ -0,0 +1,35 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "protobuf.go", + ], + importpath = "k8s.io/apimachinery/pkg/runtime/serializer/protobuf", + deps = [ + "//vendor/github.com/gogo/protobuf/proto:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/framer:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer/BUILD b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer/BUILD new file mode 100644 index 00000000..de54abb3 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer/BUILD @@ -0,0 +1,32 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = ["recognizer.go"], + importpath = "k8s.io/apimachinery/pkg/runtime/serializer/recognizer", + deps = [ + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//staging/src/k8s.io/apimachinery/pkg/runtime/serializer/recognizer/testing:all-srcs", + ], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/sparse_test.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/sparse_test.go deleted file mode 100644 index 3bc9d13b..00000000 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/sparse_test.go +++ /dev/null @@ -1,91 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package serializer - -import ( - "testing" - - "k8s.io/apimachinery/pkg/api/equality" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/diff" -) - -type FakeV1Obj struct { - metav1.TypeMeta - metav1.ObjectMeta -} - -func (*FakeV1Obj) DeepCopyObject() runtime.Object { - panic("not supported") -} - -type FakeV2DifferentObj struct { - metav1.TypeMeta - metav1.ObjectMeta -} - -func (*FakeV2DifferentObj) DeepCopyObject() runtime.Object { - panic("not supported") -} -func TestSparse(t *testing.T) { - v1 := schema.GroupVersion{Group: "mygroup", Version: "v1"} - v2 := schema.GroupVersion{Group: "mygroup", Version: "v2"} - - scheme := runtime.NewScheme() - scheme.AddKnownTypes(v1, &FakeV1Obj{}) - scheme.AddKnownTypes(v2, &FakeV2DifferentObj{}) - codecs := NewCodecFactory(scheme) - - srcObj1 := &FakeV1Obj{ObjectMeta: metav1.ObjectMeta{Name: "foo"}} - srcObj2 := &FakeV2DifferentObj{ObjectMeta: metav1.ObjectMeta{Name: "foo"}} - - encoder := codecs.LegacyCodec(v2, v1) - decoder := codecs.UniversalDecoder(v2, v1) - - srcObj1Bytes, err := runtime.Encode(encoder, srcObj1) - if err != nil { - t.Fatal(err) - } - t.Log(string(srcObj1Bytes)) - srcObj2Bytes, err := runtime.Encode(encoder, srcObj2) - if err != nil { - t.Fatal(err) - } - t.Log(string(srcObj2Bytes)) - - uncastDstObj1, err := runtime.Decode(decoder, srcObj1Bytes) - if err != nil { - t.Fatal(err) - } - uncastDstObj2, err := runtime.Decode(decoder, srcObj2Bytes) - if err != nil { - t.Fatal(err) - } - - // clear typemeta - uncastDstObj1.(*FakeV1Obj).TypeMeta = metav1.TypeMeta{} - uncastDstObj2.(*FakeV2DifferentObj).TypeMeta = metav1.TypeMeta{} - - if !equality.Semantic.DeepEqual(srcObj1, uncastDstObj1) { - t.Fatal(diff.ObjectDiff(srcObj1, uncastDstObj1)) - } - if !equality.Semantic.DeepEqual(srcObj2, uncastDstObj2) { - t.Fatal(diff.ObjectDiff(srcObj2, uncastDstObj2)) - } -} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/BUILD b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/BUILD new file mode 100644 index 00000000..e0589fef --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/BUILD @@ -0,0 +1,41 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_test( + name = "go_default_test", + srcs = ["streaming_test.go"], + embed = [":go_default_library"], + deps = [ + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/framer:go_default_library", + ], +) + +go_library( + name = "go_default_library", + srcs = ["streaming.go"], + importpath = "k8s.io/apimachinery/pkg/runtime/serializer/streaming", + deps = [ + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/BUILD b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/BUILD new file mode 100644 index 00000000..32e6863c --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/BUILD @@ -0,0 +1,41 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_test( + name = "go_default_test", + srcs = ["versioning_test.go"], + embed = [":go_default_library"], + deps = [ + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library", + ], +) + +go_library( + name = "go_default_library", + srcs = ["versioning.go"], + importpath = "k8s.io/apimachinery/pkg/runtime/serializer/versioning", + deps = [ + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go index 7716cc42..b717fe8f 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go @@ -19,7 +19,6 @@ package versioning import ( "io" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" ) @@ -167,27 +166,9 @@ func (c *codec) Decode(data []byte, defaultGVK *schema.GroupVersionKind, into ru // Encode ensures the provided object is output in the appropriate group and version, invoking // conversion if necessary. Unversioned objects (according to the ObjectTyper) are output as is. func (c *codec) Encode(obj runtime.Object, w io.Writer) error { - switch obj := obj.(type) { - case *runtime.Unknown: + switch obj.(type) { + case *runtime.Unknown, runtime.Unstructured: return c.encoder.Encode(obj, w) - case runtime.Unstructured: - // An unstructured list can contain objects of multiple group version kinds. don't short-circuit just - // because the top-level type matches our desired destination type. actually send the object to the converter - // to give it a chance to convert the list items if needed. - if _, ok := obj.(*unstructured.UnstructuredList); !ok { - // avoid conversion roundtrip if GVK is the right one already or is empty (yes, this is a hack, but the old behaviour we rely on in kubectl) - objGVK := obj.GetObjectKind().GroupVersionKind() - if len(objGVK.Version) == 0 { - return c.encoder.Encode(obj, w) - } - targetGVK, ok := c.encodeVersion.KindForGroupVersionKinds([]schema.GroupVersionKind{objGVK}) - if !ok { - return runtime.NewNotRegisteredGVKErrForTarget(objGVK, c.encodeVersion) - } - if targetGVK == objGVK { - return c.encoder.Encode(obj, w) - } - } } gvks, isUnversioned, err := c.typer.ObjectKinds(obj) diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning_test.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning_test.go index f79b2a7c..43c24265 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning_test.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning_test.go @@ -129,25 +129,25 @@ func TestDecode(t *testing.T) { }{ { serializer: &mockSerializer{actual: gvk1}, - convertor: &checkConvertor{groupVersion: schema.GroupVersion{Group: "other", Version: runtime.APIVersionInternal}}, + convertor: &checkConvertor{groupVersion: schema.GroupVersion{Group: "other", Version: "__internal"}}, expectedGVK: gvk1, - decodes: schema.GroupVersion{Group: "other", Version: runtime.APIVersionInternal}, + decodes: schema.GroupVersion{Group: "other", Version: "__internal"}, }, { serializer: &mockSerializer{actual: gvk1, obj: decodable1}, - convertor: &checkConvertor{in: decodable1, obj: decodable2, groupVersion: schema.GroupVersion{Group: "other", Version: runtime.APIVersionInternal}}, + convertor: &checkConvertor{in: decodable1, obj: decodable2, groupVersion: schema.GroupVersion{Group: "other", Version: "__internal"}}, expectedGVK: gvk1, sameObject: decodable2, - decodes: schema.GroupVersion{Group: "other", Version: runtime.APIVersionInternal}, + decodes: schema.GroupVersion{Group: "other", Version: "__internal"}, }, // defaultGVK.Group is allowed to force a conversion to the destination group { serializer: &mockSerializer{actual: gvk1, obj: decodable1}, defaultGVK: &schema.GroupVersionKind{Group: "force"}, - convertor: &checkConvertor{in: decodable1, obj: decodable2, groupVersion: schema.GroupVersion{Group: "force", Version: runtime.APIVersionInternal}}, + convertor: &checkConvertor{in: decodable1, obj: decodable2, groupVersion: schema.GroupVersion{Group: "force", Version: "__internal"}}, expectedGVK: gvk1, sameObject: decodable2, - decodes: schema.GroupVersion{Group: "force", Version: runtime.APIVersionInternal}, + decodes: schema.GroupVersion{Group: "force", Version: "__internal"}, }, // uses direct conversion for into when objects differ { @@ -184,10 +184,10 @@ func TestDecode(t *testing.T) { into: &runtime.VersionedObjects{Objects: []runtime.Object{}}, serializer: &mockSerializer{actual: gvk1, obj: decodable1}, - convertor: &checkConvertor{in: decodable1, obj: decodable2, groupVersion: schema.GroupVersion{Group: "other", Version: runtime.APIVersionInternal}}, + convertor: &checkConvertor{in: decodable1, obj: decodable2, groupVersion: schema.GroupVersion{Group: "other", Version: "__internal"}}, expectedGVK: gvk1, expectedObject: &runtime.VersionedObjects{Objects: []runtime.Object{decodable1, decodable2}}, - decodes: schema.GroupVersion{Group: "other", Version: runtime.APIVersionInternal}, + decodes: schema.GroupVersion{Group: "other", Version: "__internal"}, }, // decode into the same version as the serialized object diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go index 167de610..ba89cd23 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/k8s.io/apimachinery/pkg/selection/BUILD b/vendor/k8s.io/apimachinery/pkg/selection/BUILD new file mode 100644 index 00000000..3790df9a --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/selection/BUILD @@ -0,0 +1,25 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = ["operator.go"], + importpath = "k8s.io/apimachinery/pkg/selection", +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/apimachinery/pkg/types/BUILD b/vendor/k8s.io/apimachinery/pkg/types/BUILD new file mode 100644 index 00000000..3db635c8 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/types/BUILD @@ -0,0 +1,31 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "namespacedname.go", + "nodename.go", + "patch.go", + "uid.go", + ], + importpath = "k8s.io/apimachinery/pkg/types", +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go b/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go index 88f0de36..1e2130da 100644 --- a/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go +++ b/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go @@ -18,6 +18,7 @@ package types import ( "fmt" + "strings" ) // NamespacedName comprises a resource name, with a mandatory namespace, @@ -41,3 +42,19 @@ const ( func (n NamespacedName) String() string { return fmt.Sprintf("%s%c%s", n.Namespace, Separator, n.Name) } + +// NewNamespacedNameFromString parses the provided string and returns a NamespacedName. +// The expected format is as per String() above. +// If the input string is invalid, the returned NamespacedName has all empty string field values. +// This allows a single-value return from this function, while still allowing error checks in the caller. +// Note that an input string which does not include exactly one Separator is not a valid input (as it could never +// have neem returned by String() ) +func NewNamespacedNameFromString(s string) NamespacedName { + nn := NamespacedName{} + result := strings.Split(s, string(Separator)) + if len(result) == 2 { + nn.Namespace = result[0] + nn.Name = result[1] + } + return nn +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/clock/BUILD b/vendor/k8s.io/apimachinery/pkg/util/clock/BUILD new file mode 100644 index 00000000..a9f6be47 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/clock/BUILD @@ -0,0 +1,32 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_test( + name = "go_default_test", + srcs = ["clock_test.go"], + embed = [":go_default_library"], +) + +go_library( + name = "go_default_library", + srcs = ["clock.go"], + importpath = "k8s.io/apimachinery/pkg/util/clock", +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go b/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go index 9567f900..c303a212 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go +++ b/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go @@ -26,12 +26,18 @@ import ( type Clock interface { Now() time.Time Since(time.Time) time.Duration - After(time.Duration) <-chan time.Time - NewTimer(time.Duration) Timer - Sleep(time.Duration) - NewTicker(time.Duration) Ticker + After(d time.Duration) <-chan time.Time + NewTimer(d time.Duration) Timer + Sleep(d time.Duration) + Tick(d time.Duration) <-chan time.Time } +var ( + _ = Clock(RealClock{}) + _ = Clock(&FakeClock{}) + _ = Clock(&IntervalClock{}) +) + // RealClock really calls time.Now() type RealClock struct{} @@ -56,10 +62,8 @@ func (RealClock) NewTimer(d time.Duration) Timer { } } -func (RealClock) NewTicker(d time.Duration) Ticker { - return &realTicker{ - ticker: time.NewTicker(d), - } +func (RealClock) Tick(d time.Duration) <-chan time.Time { + return time.Tick(d) } func (RealClock) Sleep(d time.Duration) { @@ -133,7 +137,7 @@ func (f *FakeClock) NewTimer(d time.Duration) Timer { return timer } -func (f *FakeClock) NewTicker(d time.Duration) Ticker { +func (f *FakeClock) Tick(d time.Duration) <-chan time.Time { f.lock.Lock() defer f.lock.Unlock() tickTime := f.time.Add(d) @@ -145,9 +149,7 @@ func (f *FakeClock) NewTicker(d time.Duration) Ticker { destChan: ch, }) - return &fakeTicker{ - c: ch, - } + return ch } // Move clock by Duration, notify anyone that's called After, Tick, or NewTimer @@ -240,8 +242,8 @@ func (*IntervalClock) NewTimer(d time.Duration) Timer { // Unimplemented, will panic. // TODO: make interval clock use FakeClock so this can be implemented. -func (*IntervalClock) NewTicker(d time.Duration) Ticker { - panic("IntervalClock doesn't implement NewTicker") +func (*IntervalClock) Tick(d time.Duration) <-chan time.Time { + panic("IntervalClock doesn't implement Tick") } func (*IntervalClock) Sleep(d time.Duration) { @@ -256,6 +258,11 @@ type Timer interface { Reset(d time.Duration) bool } +var ( + _ = Timer(&realTimer{}) + _ = Timer(&fakeTimer{}) +) + // realTimer is backed by an actual time.Timer. type realTimer struct { timer *time.Timer @@ -318,31 +325,3 @@ func (f *fakeTimer) Reset(d time.Duration) bool { return active } - -type Ticker interface { - C() <-chan time.Time - Stop() -} - -type realTicker struct { - ticker *time.Ticker -} - -func (t *realTicker) C() <-chan time.Time { - return t.ticker.C -} - -func (t *realTicker) Stop() { - t.ticker.Stop() -} - -type fakeTicker struct { - c <-chan time.Time -} - -func (t *fakeTicker) C() <-chan time.Time { - return t.c -} - -func (t *fakeTicker) Stop() { -} diff --git a/vendor/k8s.io/apimachinery/pkg/util/clock/clock_test.go b/vendor/k8s.io/apimachinery/pkg/util/clock/clock_test.go index c7b371fc..27d34605 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/clock/clock_test.go +++ b/vendor/k8s.io/apimachinery/pkg/util/clock/clock_test.go @@ -21,18 +21,6 @@ import ( "time" ) -var ( - _ = Clock(RealClock{}) - _ = Clock(&FakeClock{}) - _ = Clock(&IntervalClock{}) - - _ = Timer(&realTimer{}) - _ = Timer(&fakeTimer{}) - - _ = Ticker(&realTicker{}) - _ = Ticker(&fakeTicker{}) -) - func TestFakeClock(t *testing.T) { startTime := time.Now() tc := NewFakeClock(startTime) @@ -122,13 +110,13 @@ func TestFakeTick(t *testing.T) { if tc.HasWaiters() { t.Errorf("unexpected waiter?") } - oneSec := tc.NewTicker(time.Second).C() + oneSec := tc.Tick(time.Second) if !tc.HasWaiters() { t.Errorf("unexpected lack of waiter?") } - oneOhOneSec := tc.NewTicker(time.Second + time.Millisecond).C() - twoSec := tc.NewTicker(2 * time.Second).C() + oneOhOneSec := tc.Tick(time.Second + time.Millisecond) + twoSec := tc.Tick(2 * time.Second) select { case <-oneSec: t.Errorf("unexpected channel read") diff --git a/vendor/k8s.io/apimachinery/pkg/util/errors/BUILD b/vendor/k8s.io/apimachinery/pkg/util/errors/BUILD new file mode 100644 index 00000000..fa2b74a2 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/errors/BUILD @@ -0,0 +1,35 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_test( + name = "go_default_test", + srcs = ["errors_test.go"], + embed = [":go_default_library"], +) + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "errors.go", + ], + importpath = "k8s.io/apimachinery/pkg/util/errors", +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/apimachinery/pkg/util/framer/BUILD b/vendor/k8s.io/apimachinery/pkg/util/framer/BUILD new file mode 100644 index 00000000..3a323b11 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/framer/BUILD @@ -0,0 +1,32 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_test( + name = "go_default_test", + srcs = ["framer_test.go"], + embed = [":go_default_library"], +) + +go_library( + name = "go_default_library", + srcs = ["framer.go"], + importpath = "k8s.io/apimachinery/pkg/util/framer", +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/BUILD b/vendor/k8s.io/apimachinery/pkg/util/intstr/BUILD new file mode 100644 index 00000000..73e62bcd --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/BUILD @@ -0,0 +1,47 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_test( + name = "go_default_test", + srcs = ["intstr_test.go"], + embed = [":go_default_library"], + deps = ["//vendor/github.com/ghodss/yaml:go_default_library"], +) + +go_library( + name = "go_default_library", + srcs = [ + "generated.pb.go", + "intstr.go", + ], + importpath = "k8s.io/apimachinery/pkg/util/intstr", + deps = [ + "//vendor/github.com/gogo/protobuf/proto:go_default_library", + "//vendor/github.com/golang/glog:go_default_library", + "//vendor/github.com/google/gofuzz:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) + +filegroup( + name = "go_default_library_protos", + srcs = ["generated.proto"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go index 5c2ac4f2..161e9a6f 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto index 1c3ec732..6819d468 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/k8s.io/apimachinery/pkg/util/json/BUILD b/vendor/k8s.io/apimachinery/pkg/util/json/BUILD new file mode 100644 index 00000000..c42821fc --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/json/BUILD @@ -0,0 +1,32 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_library( + name = "go_default_library", + srcs = ["json.go"], + importpath = "k8s.io/apimachinery/pkg/util/json", +) + +go_test( + name = "go_default_test", + srcs = ["json_test.go"], + embed = [":go_default_library"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/BUILD b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/BUILD new file mode 100644 index 00000000..0ddf97d1 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/BUILD @@ -0,0 +1,39 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_test( + name = "go_default_test", + srcs = ["util_test.go"], + embed = [":go_default_library"], +) + +go_library( + name = "go_default_library", + srcs = [ + "errors.go", + "util.go", + ], + importpath = "k8s.io/apimachinery/pkg/util/mergepatch", + deps = [ + "//vendor/github.com/davecgh/go-spew/spew:go_default_library", + "//vendor/github.com/ghodss/yaml:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/OWNERS b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/OWNERS new file mode 100644 index 00000000..8e8d9fce --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/OWNERS @@ -0,0 +1,5 @@ +approvers: +- pwittrock +reviewers: +- mengqiy +- apelisse diff --git a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/errors.go b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/errors.go new file mode 100644 index 00000000..16501d5a --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/errors.go @@ -0,0 +1,102 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mergepatch + +import ( + "errors" + "fmt" + "reflect" +) + +var ( + ErrBadJSONDoc = errors.New("invalid JSON document") + ErrNoListOfLists = errors.New("lists of lists are not supported") + ErrBadPatchFormatForPrimitiveList = errors.New("invalid patch format of primitive list") + ErrBadPatchFormatForRetainKeys = errors.New("invalid patch format of retainKeys") + ErrBadPatchFormatForSetElementOrderList = errors.New("invalid patch format of setElementOrder list") + ErrPatchContentNotMatchRetainKeys = errors.New("patch content doesn't match retainKeys list") + ErrUnsupportedStrategicMergePatchFormat = errors.New("strategic merge patch format is not supported") +) + +func ErrNoMergeKey(m map[string]interface{}, k string) error { + return fmt.Errorf("map: %v does not contain declared merge key: %s", m, k) +} + +func ErrBadArgType(expected, actual interface{}) error { + return fmt.Errorf("expected a %s, but received a %s", + reflect.TypeOf(expected), + reflect.TypeOf(actual)) +} + +func ErrBadArgKind(expected, actual interface{}) error { + var expectedKindString, actualKindString string + if expected == nil { + expectedKindString = "nil" + } else { + expectedKindString = reflect.TypeOf(expected).Kind().String() + } + if actual == nil { + actualKindString = "nil" + } else { + actualKindString = reflect.TypeOf(actual).Kind().String() + } + return fmt.Errorf("expected a %s, but received a %s", expectedKindString, actualKindString) +} + +func ErrBadPatchType(t interface{}, m map[string]interface{}) error { + return fmt.Errorf("unknown patch type: %s in map: %v", t, m) +} + +// IsPreconditionFailed returns true if the provided error indicates +// a precondition failed. +func IsPreconditionFailed(err error) bool { + _, ok := err.(ErrPreconditionFailed) + return ok +} + +type ErrPreconditionFailed struct { + message string +} + +func NewErrPreconditionFailed(target map[string]interface{}) ErrPreconditionFailed { + s := fmt.Sprintf("precondition failed for: %v", target) + return ErrPreconditionFailed{s} +} + +func (err ErrPreconditionFailed) Error() string { + return err.message +} + +type ErrConflict struct { + message string +} + +func NewErrConflict(patch, current string) ErrConflict { + s := fmt.Sprintf("patch:\n%s\nconflicts with changes made from original to current:\n%s\n", patch, current) + return ErrConflict{s} +} + +func (err ErrConflict) Error() string { + return err.message +} + +// IsConflict returns true if the provided error indicates +// a conflict between the patch and the current configuration. +func IsConflict(err error) bool { + _, ok := err.(ErrConflict) + return ok +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go new file mode 100644 index 00000000..9261290a --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go @@ -0,0 +1,133 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mergepatch + +import ( + "fmt" + "reflect" + + "github.com/davecgh/go-spew/spew" + "github.com/ghodss/yaml" +) + +// PreconditionFunc asserts that an incompatible change is not present within a patch. +type PreconditionFunc func(interface{}) bool + +// RequireKeyUnchanged returns a precondition function that fails if the provided key +// is present in the patch (indicating that its value has changed). +func RequireKeyUnchanged(key string) PreconditionFunc { + return func(patch interface{}) bool { + patchMap, ok := patch.(map[string]interface{}) + if !ok { + return true + } + + // The presence of key means that its value has been changed, so the test fails. + _, ok = patchMap[key] + return !ok + } +} + +// RequireMetadataKeyUnchanged creates a precondition function that fails +// if the metadata.key is present in the patch (indicating its value +// has changed). +func RequireMetadataKeyUnchanged(key string) PreconditionFunc { + return func(patch interface{}) bool { + patchMap, ok := patch.(map[string]interface{}) + if !ok { + return true + } + patchMap1, ok := patchMap["metadata"] + if !ok { + return true + } + patchMap2, ok := patchMap1.(map[string]interface{}) + if !ok { + return true + } + _, ok = patchMap2[key] + return !ok + } +} + +func ToYAMLOrError(v interface{}) string { + y, err := toYAML(v) + if err != nil { + return err.Error() + } + + return y +} + +func toYAML(v interface{}) (string, error) { + y, err := yaml.Marshal(v) + if err != nil { + return "", fmt.Errorf("yaml marshal failed:%v\n%v\n", err, spew.Sdump(v)) + } + + return string(y), nil +} + +// HasConflicts returns true if the left and right JSON interface objects overlap with +// different values in any key. All keys are required to be strings. Since patches of the +// same Type have congruent keys, this is valid for multiple patch types. This method +// supports JSON merge patch semantics. +// +// NOTE: Numbers with different types (e.g. int(0) vs int64(0)) will be detected as conflicts. +// Make sure the unmarshaling of left and right are consistent (e.g. use the same library). +func HasConflicts(left, right interface{}) (bool, error) { + switch typedLeft := left.(type) { + case map[string]interface{}: + switch typedRight := right.(type) { + case map[string]interface{}: + for key, leftValue := range typedLeft { + rightValue, ok := typedRight[key] + if !ok { + continue + } + if conflict, err := HasConflicts(leftValue, rightValue); err != nil || conflict { + return conflict, err + } + } + + return false, nil + default: + return true, nil + } + case []interface{}: + switch typedRight := right.(type) { + case []interface{}: + if len(typedLeft) != len(typedRight) { + return true, nil + } + + for i := range typedLeft { + if conflict, err := HasConflicts(typedLeft[i], typedRight[i]); err != nil || conflict { + return conflict, err + } + } + + return false, nil + default: + return true, nil + } + case string, float64, bool, int, int64, nil: + return !reflect.DeepEqual(left, right), nil + default: + return true, fmt.Errorf("unknown type: %v", reflect.TypeOf(left)) + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util_test.go b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util_test.go new file mode 100644 index 00000000..1b37e3ef --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util_test.go @@ -0,0 +1,138 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mergepatch + +import ( + "fmt" + "testing" +) + +func TestHasConflicts(t *testing.T) { + testCases := []struct { + A interface{} + B interface{} + Ret bool + }{ + {A: "hello", B: "hello", Ret: false}, + {A: "hello", B: "hell", Ret: true}, + {A: "hello", B: nil, Ret: true}, + {A: "hello", B: 1, Ret: true}, + {A: "hello", B: float64(1.0), Ret: true}, + {A: "hello", B: false, Ret: true}, + {A: 1, B: 1, Ret: false}, + {A: nil, B: nil, Ret: false}, + {A: false, B: false, Ret: false}, + {A: float64(3), B: float64(3), Ret: false}, + + {A: "hello", B: []interface{}{}, Ret: true}, + {A: []interface{}{1}, B: []interface{}{}, Ret: true}, + {A: []interface{}{}, B: []interface{}{}, Ret: false}, + {A: []interface{}{1}, B: []interface{}{1}, Ret: false}, + {A: map[string]interface{}{}, B: []interface{}{1}, Ret: true}, + + {A: map[string]interface{}{}, B: map[string]interface{}{"a": 1}, Ret: false}, + {A: map[string]interface{}{"a": 1}, B: map[string]interface{}{"a": 1}, Ret: false}, + {A: map[string]interface{}{"a": 1}, B: map[string]interface{}{"a": 2}, Ret: true}, + {A: map[string]interface{}{"a": 1}, B: map[string]interface{}{"b": 2}, Ret: false}, + + { + A: map[string]interface{}{"a": []interface{}{1}}, + B: map[string]interface{}{"a": []interface{}{1}}, + Ret: false, + }, + { + A: map[string]interface{}{"a": []interface{}{1}}, + B: map[string]interface{}{"a": []interface{}{}}, + Ret: true, + }, + { + A: map[string]interface{}{"a": []interface{}{1}}, + B: map[string]interface{}{"a": 1}, + Ret: true, + }, + + // Maps and lists with multiple entries. + { + A: map[string]interface{}{"a": 1, "b": 2}, + B: map[string]interface{}{"a": 1, "b": 0}, + Ret: true, + }, + { + A: map[string]interface{}{"a": 1, "b": 2}, + B: map[string]interface{}{"a": 1, "b": 2}, + Ret: false, + }, + { + A: map[string]interface{}{"a": 1, "b": 2}, + B: map[string]interface{}{"a": 1, "b": 0, "c": 3}, + Ret: true, + }, + { + A: map[string]interface{}{"a": 1, "b": 2}, + B: map[string]interface{}{"a": 1, "b": 2, "c": 3}, + Ret: false, + }, + { + A: map[string]interface{}{"a": []interface{}{1, 2}}, + B: map[string]interface{}{"a": []interface{}{1, 0}}, + Ret: true, + }, + { + A: map[string]interface{}{"a": []interface{}{1, 2}}, + B: map[string]interface{}{"a": []interface{}{1, 2}}, + Ret: false, + }, + + // Numeric types are not interchangeable. + // Callers are expected to ensure numeric types are consistent in 'left' and 'right'. + {A: int(0), B: int64(0), Ret: true}, + {A: int(0), B: float64(0), Ret: true}, + {A: int64(0), B: float64(0), Ret: true}, + // Other types are not interchangeable. + {A: int(0), B: "0", Ret: true}, + {A: int(0), B: nil, Ret: true}, + {A: int(0), B: false, Ret: true}, + {A: "true", B: true, Ret: true}, + {A: "null", B: nil, Ret: true}, + } + + for _, testCase := range testCases { + testStr := fmt.Sprintf("A = %#v, B = %#v", testCase.A, testCase.B) + // Run each test case multiple times if it passes because HasConflicts() + // uses map iteration, which returns keys in nondeterministic order. + for try := 0; try < 10; try++ { + out, err := HasConflicts(testCase.A, testCase.B) + if err != nil { + t.Errorf("%v: unexpected error: %v", testStr, err) + break + } + if out != testCase.Ret { + t.Errorf("%v: expected %t got %t", testStr, testCase.Ret, out) + break + } + out, err = HasConflicts(testCase.B, testCase.A) + if err != nil { + t.Errorf("%v: unexpected error: %v", testStr, err) + break + } + if out != testCase.Ret { + t.Errorf("%v: expected reversed %t got %t", testStr, testCase.Ret, out) + break + } + } + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/BUILD b/vendor/k8s.io/apimachinery/pkg/util/net/BUILD new file mode 100644 index 00000000..9d0fffb1 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/net/BUILD @@ -0,0 +1,50 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_test( + name = "go_default_test", + srcs = [ + "http_test.go", + "interface_test.go", + "port_range_test.go", + "port_split_test.go", + "util_test.go", + ], + embed = [":go_default_library"], + deps = ["//vendor/github.com/spf13/pflag:go_default_library"], +) + +go_library( + name = "go_default_library", + srcs = [ + "http.go", + "interface.go", + "port_range.go", + "port_split.go", + "util.go", + ], + importpath = "k8s.io/apimachinery/pkg/util/net", + deps = [ + "//vendor/github.com/golang/glog:go_default_library", + "//vendor/golang.org/x/net/http2:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/http.go b/vendor/k8s.io/apimachinery/pkg/util/net/http.go index 7ea2df22..bc2a531b 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/net/http.go +++ b/vendor/k8s.io/apimachinery/pkg/util/net/http.go @@ -19,7 +19,6 @@ package net import ( "bufio" "bytes" - "context" "crypto/tls" "fmt" "io" @@ -62,9 +61,6 @@ func JoinPreservingTrailingSlash(elem ...string) string { // differentiate probable errors in connection behavior between normal "this is // disconnected" should use the method. func IsProbableEOF(err error) bool { - if err == nil { - return false - } if uerr, ok := err.(*url.Error); ok { err = uerr.Err } @@ -91,8 +87,8 @@ func SetOldTransportDefaults(t *http.Transport) *http.Transport { // ProxierWithNoProxyCIDR allows CIDR rules in NO_PROXY t.Proxy = NewProxierWithNoProxyCIDR(http.ProxyFromEnvironment) } - if t.DialContext == nil { - t.DialContext = defaultTransport.DialContext + if t.Dial == nil { + t.Dial = defaultTransport.Dial } if t.TLSHandshakeTimeout == 0 { t.TLSHandshakeTimeout = defaultTransport.TLSHandshakeTimeout @@ -120,7 +116,7 @@ type RoundTripperWrapper interface { WrappedRoundTripper() http.RoundTripper } -type DialFunc func(ctx context.Context, net, addr string) (net.Conn, error) +type DialFunc func(net, addr string) (net.Conn, error) func DialerFor(transport http.RoundTripper) (DialFunc, error) { if transport == nil { @@ -129,7 +125,7 @@ func DialerFor(transport http.RoundTripper) (DialFunc, error) { switch transport := transport.(type) { case *http.Transport: - return transport.DialContext, nil + return transport.Dial, nil case RoundTripperWrapper: return DialerFor(transport.WrappedRoundTripper()) default: diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/port_range.go b/vendor/k8s.io/apimachinery/pkg/util/net/port_range.go index 7b6eca89..6a50e618 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/net/port_range.go +++ b/vendor/k8s.io/apimachinery/pkg/util/net/port_range.go @@ -43,19 +43,14 @@ func (pr PortRange) String() string { return fmt.Sprintf("%d-%d", pr.Base, pr.Base+pr.Size-1) } -// Set parses a string of the form "value", "min-max", or "min+offset", inclusive at both ends, and +// Set parses a string of the form "min-max", inclusive at both ends, and // sets the PortRange from it. This is part of the flag.Value and pflag.Value // interfaces. func (pr *PortRange) Set(value string) error { - const ( - SinglePortNotation = 1 << iota - HyphenNotation - PlusNotation - ) - value = strings.TrimSpace(value) - hyphenIndex := strings.Index(value, "-") - plusIndex := strings.Index(value, "+") + + // TODO: Accept "80" syntax + // TODO: Accept "80+8" syntax if value == "" { pr.Base = 0 @@ -63,51 +58,20 @@ func (pr *PortRange) Set(value string) error { return nil } + hyphenIndex := strings.Index(value, "-") + if hyphenIndex == -1 { + return fmt.Errorf("expected hyphen in port range") + } + var err error - var low, high int - var notation int - - if plusIndex == -1 && hyphenIndex == -1 { - notation |= SinglePortNotation - } - if hyphenIndex != -1 { - notation |= HyphenNotation - } - if plusIndex != -1 { - notation |= PlusNotation - } - - switch notation { - case SinglePortNotation: - var port int - port, err = strconv.Atoi(value) - if err != nil { - return err - } - low = port - high = port - case HyphenNotation: - low, err = strconv.Atoi(value[:hyphenIndex]) - if err != nil { - return err - } + var low int + var high int + low, err = strconv.Atoi(value[:hyphenIndex]) + if err == nil { high, err = strconv.Atoi(value[hyphenIndex+1:]) - if err != nil { - return err - } - case PlusNotation: - var offset int - low, err = strconv.Atoi(value[:plusIndex]) - if err != nil { - return err - } - offset, err = strconv.Atoi(value[plusIndex+1:]) - if err != nil { - return err - } - high = low + offset - default: - return fmt.Errorf("unable to parse port range: %s", value) + } + if err != nil { + return fmt.Errorf("unable to parse port range: %s: %v", value, err) } if low > 65535 || high > 65535 { diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/port_range_test.go b/vendor/k8s.io/apimachinery/pkg/util/net/port_range_test.go index b4cbe824..897b8df6 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/net/port_range_test.go +++ b/vendor/k8s.io/apimachinery/pkg/util/net/port_range_test.go @@ -34,21 +34,13 @@ func TestPortRange(t *testing.T) { {" 100-200 ", true, "100-200", 200, 201}, {"0-0", true, "0-0", 0, 1}, {"", true, "", -1, 0}, - {"100", true, "100-100", 100, 101}, + {"100", false, "", -1, -1}, {"100 - 200", false, "", -1, -1}, {"-100", false, "", -1, -1}, {"100-", false, "", -1, -1}, {"200-100", false, "", -1, -1}, {"60000-70000", false, "", -1, -1}, {"70000-80000", false, "", -1, -1}, - {"70000+80000", false, "", -1, -1}, - {"1+0", true, "1-1", 1, 2}, - {"0+0", true, "0-0", 0, 1}, - {"1+-1", false, "", -1, -1}, - {"1-+1", false, "", -1, -1}, - {"100+200", true, "100-300", 300, 301}, - {"1+65535", false, "", -1, -1}, - {"0+65535", true, "0-65535", 65535, 65536}, } for i := range testCases { @@ -60,7 +52,7 @@ func TestPortRange(t *testing.T) { t.Errorf("expected success, got %q", err) continue } else if err == nil && tc.success == false { - t.Errorf("expected failure %#v", testCases[i]) + t.Errorf("expected failure") continue } else if tc.success { if f.String() != tc.expected { diff --git a/vendor/k8s.io/apimachinery/pkg/util/runtime/BUILD b/vendor/k8s.io/apimachinery/pkg/util/runtime/BUILD new file mode 100644 index 00000000..6407b8bd --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/runtime/BUILD @@ -0,0 +1,33 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_test( + name = "go_default_test", + srcs = ["runtime_test.go"], + embed = [":go_default_library"], +) + +go_library( + name = "go_default_library", + srcs = ["runtime.go"], + importpath = "k8s.io/apimachinery/pkg/util/runtime", + deps = ["//vendor/github.com/golang/glog:go_default_library"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go index da32fe12..d4cec0b8 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go +++ b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go @@ -128,8 +128,9 @@ func (r *rudimentaryErrorBackoff) OnError(error) { r.lastErrorTimeLock.Lock() defer r.lastErrorTimeLock.Unlock() d := time.Since(r.lastErrorTime) - if d < r.minPeriod { + if d < r.minPeriod && d >= 0 { // If the time moves backwards for any reason, do nothing + // TODO: remove check "d >= 0" after go 1.8 is no longer supported time.Sleep(r.minPeriod - d) } r.lastErrorTime = time.Now() @@ -160,10 +161,3 @@ func RecoverFromPanic(err *error) { callers) } } - -// Must panics on non-nil errors. Useful to handling programmer level errors. -func Must(err error) { - if err != nil { - panic(err) - } -} diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/BUILD b/vendor/k8s.io/apimachinery/pkg/util/sets/BUILD new file mode 100644 index 00000000..ec2f2347 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/BUILD @@ -0,0 +1,71 @@ +package(default_visibility = ["//visibility:public"]) + +load("@io_kubernetes_build//defs:go.bzl", "go_genrule") +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_library( + name = "go_default_library", + srcs = [ + "byte.go", + "doc.go", + "empty.go", + "int.go", + "int64.go", + "string.go", + ], + importpath = "k8s.io/apimachinery/pkg/util/sets", +) + +go_genrule( + name = "set-gen", + srcs = [ + "//hack/boilerplate:boilerplate.go.txt", + ], + outs = [ + "byte.go", + "doc.go", + "empty.go", + "int.go", + "int64.go", + "string.go", + ], + cmd = """ +$(location //vendor/k8s.io/code-generator/cmd/set-gen) \ + --input-dirs ./vendor/k8s.io/apimachinery/pkg/util/sets/types \ + --output-base $$(dirname $$(dirname $(location :byte.go))) \ + --go-header-file $(location //hack/boilerplate:boilerplate.go.txt) \ + --output-package sets + """, + go_deps = [ + "//vendor/k8s.io/apimachinery/pkg/util/sets/types:go_default_library", + ], + tools = [ + "//vendor/k8s.io/code-generator/cmd/set-gen", + ], +) + +go_test( + name = "go_default_test", + srcs = ["set_test.go"], + embed = [":go_default_library"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//staging/src/k8s.io/apimachinery/pkg/util/sets/types:all-srcs", + ], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go b/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go index 766f4501..a460e4b1 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by set-gen. DO NOT EDIT. +// This file was autogenerated by set-gen. Do not edit it manually! package sets @@ -26,7 +26,7 @@ import ( // sets.Byte is a set of bytes, implemented via map[byte]struct{} for minimal memory consumption. type Byte map[byte]Empty -// NewByte creates a Byte from a list of values. +// New creates a Byte from a list of values. func NewByte(items ...byte) Byte { ss := Byte{} ss.Insert(items...) diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/doc.go b/vendor/k8s.io/apimachinery/pkg/util/sets/doc.go index b152a0bf..28a6a7d5 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/sets/doc.go +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/doc.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by set-gen. DO NOT EDIT. +// This file was autogenerated by set-gen. Do not edit it manually! // Package sets has auto-generated set types. package sets diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/empty.go b/vendor/k8s.io/apimachinery/pkg/util/sets/empty.go index e11e622c..cd22b953 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/sets/empty.go +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/empty.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by set-gen. DO NOT EDIT. +// This file was autogenerated by set-gen. Do not edit it manually! package sets diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/int.go b/vendor/k8s.io/apimachinery/pkg/util/sets/int.go index a0a513cd..0614e9fb 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/sets/int.go +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/int.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by set-gen. DO NOT EDIT. +// This file was autogenerated by set-gen. Do not edit it manually! package sets @@ -26,7 +26,7 @@ import ( // sets.Int is a set of ints, implemented via map[int]struct{} for minimal memory consumption. type Int map[int]Empty -// NewInt creates a Int from a list of values. +// New creates a Int from a list of values. func NewInt(items ...int) Int { ss := Int{} ss.Insert(items...) diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go b/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go index 9ca9af0c..82e1ba78 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by set-gen. DO NOT EDIT. +// This file was autogenerated by set-gen. Do not edit it manually! package sets @@ -26,7 +26,7 @@ import ( // sets.Int64 is a set of int64s, implemented via map[int64]struct{} for minimal memory consumption. type Int64 map[int64]Empty -// NewInt64 creates a Int64 from a list of values. +// New creates a Int64 from a list of values. func NewInt64(items ...int64) Int64 { ss := Int64{} ss.Insert(items...) diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/string.go b/vendor/k8s.io/apimachinery/pkg/util/sets/string.go index ba00ad7d..baef7a6a 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/sets/string.go +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/string.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by set-gen. DO NOT EDIT. +// This file was autogenerated by set-gen. Do not edit it manually! package sets @@ -26,7 +26,7 @@ import ( // sets.String is a set of strings, implemented via map[string]struct{} for minimal memory consumption. type String map[string]Empty -// NewString creates a String from a list of values. +// New creates a String from a list of values. func NewString(items ...string) String { ss := String{} ss.Insert(items...) diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/BUILD b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/BUILD new file mode 100644 index 00000000..2d608892 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/BUILD @@ -0,0 +1,60 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_test( + name = "go_default_test", + srcs = ["patch_test.go"], + data = [ + "testdata/swagger-merge-item.json", + "testdata/swagger-precision-item.json", + ], + embed = [":go_default_library"], + deps = [ + "//vendor/github.com/davecgh/go-spew/spew:go_default_library", + "//vendor/github.com/ghodss/yaml:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/json:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/mergepatch:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/strategicpatch/testing:go_default_library", + ], +) + +go_library( + name = "go_default_library", + srcs = [ + "errors.go", + "meta.go", + "patch.go", + "types.go", + ], + importpath = "k8s.io/apimachinery/pkg/util/strategicpatch", + deps = [ + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/json:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/mergepatch:go_default_library", + "//vendor/k8s.io/apimachinery/third_party/forked/golang/json:go_default_library", + "//vendor/k8s.io/kube-openapi/pkg/util/proto:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//staging/src/k8s.io/apimachinery/pkg/util/strategicpatch/testing:all-srcs", + ], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS new file mode 100644 index 00000000..8e8d9fce --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS @@ -0,0 +1,5 @@ +approvers: +- pwittrock +reviewers: +- mengqiy +- apelisse diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/errors.go b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/errors.go new file mode 100644 index 00000000..ab66d045 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/errors.go @@ -0,0 +1,49 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package strategicpatch + +import ( + "fmt" +) + +type LookupPatchMetaError struct { + Path string + Err error +} + +func (e LookupPatchMetaError) Error() string { + return fmt.Sprintf("LookupPatchMetaError(%s): %v", e.Path, e.Err) +} + +type FieldNotFoundError struct { + Path string + Field string +} + +func (e FieldNotFoundError) Error() string { + return fmt.Sprintf("unable to find api field %q in %s", e.Field, e.Path) +} + +type InvalidTypeError struct { + Path string + Expected string + Actual string +} + +func (e InvalidTypeError) Error() string { + return fmt.Sprintf("invalid type for %s: got %q, expected %q", e.Path, e.Actual, e.Expected) +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go new file mode 100644 index 00000000..c31de15e --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go @@ -0,0 +1,194 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package strategicpatch + +import ( + "errors" + "fmt" + "reflect" + + "k8s.io/apimachinery/pkg/util/mergepatch" + forkedjson "k8s.io/apimachinery/third_party/forked/golang/json" + openapi "k8s.io/kube-openapi/pkg/util/proto" +) + +type PatchMeta struct { + patchStrategies []string + patchMergeKey string +} + +func (pm PatchMeta) GetPatchStrategies() []string { + if pm.patchStrategies == nil { + return []string{} + } + return pm.patchStrategies +} + +func (pm PatchMeta) SetPatchStrategies(ps []string) { + pm.patchStrategies = ps +} + +func (pm PatchMeta) GetPatchMergeKey() string { + return pm.patchMergeKey +} + +func (pm PatchMeta) SetPatchMergeKey(pmk string) { + pm.patchMergeKey = pmk +} + +type LookupPatchMeta interface { + // LookupPatchMetadataForStruct gets subschema and the patch metadata (e.g. patch strategy and merge key) for map. + LookupPatchMetadataForStruct(key string) (LookupPatchMeta, PatchMeta, error) + // LookupPatchMetadataForSlice get subschema and the patch metadata for slice. + LookupPatchMetadataForSlice(key string) (LookupPatchMeta, PatchMeta, error) + // Get the type name of the field + Name() string +} + +type PatchMetaFromStruct struct { + T reflect.Type +} + +func NewPatchMetaFromStruct(dataStruct interface{}) (PatchMetaFromStruct, error) { + t, err := getTagStructType(dataStruct) + return PatchMetaFromStruct{T: t}, err +} + +var _ LookupPatchMeta = PatchMetaFromStruct{} + +func (s PatchMetaFromStruct) LookupPatchMetadataForStruct(key string) (LookupPatchMeta, PatchMeta, error) { + fieldType, fieldPatchStrategies, fieldPatchMergeKey, err := forkedjson.LookupPatchMetadataForStruct(s.T, key) + if err != nil { + return nil, PatchMeta{}, err + } + + return PatchMetaFromStruct{T: fieldType}, + PatchMeta{ + patchStrategies: fieldPatchStrategies, + patchMergeKey: fieldPatchMergeKey, + }, nil +} + +func (s PatchMetaFromStruct) LookupPatchMetadataForSlice(key string) (LookupPatchMeta, PatchMeta, error) { + subschema, patchMeta, err := s.LookupPatchMetadataForStruct(key) + if err != nil { + return nil, PatchMeta{}, err + } + elemPatchMetaFromStruct := subschema.(PatchMetaFromStruct) + t := elemPatchMetaFromStruct.T + + var elemType reflect.Type + switch t.Kind() { + // If t is an array or a slice, get the element type. + // If element is still an array or a slice, return an error. + // Otherwise, return element type. + case reflect.Array, reflect.Slice: + elemType = t.Elem() + if elemType.Kind() == reflect.Array || elemType.Kind() == reflect.Slice { + return nil, PatchMeta{}, errors.New("unexpected slice of slice") + } + // If t is an pointer, get the underlying element. + // If the underlying element is neither an array nor a slice, the pointer is pointing to a slice, + // e.g. https://github.com/kubernetes/kubernetes/blob/bc22e206c79282487ea0bf5696d5ccec7e839a76/staging/src/k8s.io/apimachinery/pkg/util/strategicpatch/patch_test.go#L2782-L2822 + // If the underlying element is either an array or a slice, return its element type. + case reflect.Ptr: + t = t.Elem() + if t.Kind() == reflect.Array || t.Kind() == reflect.Slice { + t = t.Elem() + } + elemType = t + default: + return nil, PatchMeta{}, fmt.Errorf("expected slice or array type, but got: %s", s.T.Kind().String()) + } + + return PatchMetaFromStruct{T: elemType}, patchMeta, nil +} + +func (s PatchMetaFromStruct) Name() string { + return s.T.Kind().String() +} + +func getTagStructType(dataStruct interface{}) (reflect.Type, error) { + if dataStruct == nil { + return nil, mergepatch.ErrBadArgKind(struct{}{}, nil) + } + + t := reflect.TypeOf(dataStruct) + // Get the underlying type for pointers + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + + if t.Kind() != reflect.Struct { + return nil, mergepatch.ErrBadArgKind(struct{}{}, dataStruct) + } + + return t, nil +} + +func GetTagStructTypeOrDie(dataStruct interface{}) reflect.Type { + t, err := getTagStructType(dataStruct) + if err != nil { + panic(err) + } + return t +} + +type PatchMetaFromOpenAPI struct { + Schema openapi.Schema +} + +func NewPatchMetaFromOpenAPI(s openapi.Schema) PatchMetaFromOpenAPI { + return PatchMetaFromOpenAPI{Schema: s} +} + +var _ LookupPatchMeta = PatchMetaFromOpenAPI{} + +func (s PatchMetaFromOpenAPI) LookupPatchMetadataForStruct(key string) (LookupPatchMeta, PatchMeta, error) { + if s.Schema == nil { + return nil, PatchMeta{}, nil + } + kindItem := NewKindItem(key, s.Schema.GetPath()) + s.Schema.Accept(kindItem) + + err := kindItem.Error() + if err != nil { + return nil, PatchMeta{}, err + } + return PatchMetaFromOpenAPI{Schema: kindItem.subschema}, + kindItem.patchmeta, nil +} + +func (s PatchMetaFromOpenAPI) LookupPatchMetadataForSlice(key string) (LookupPatchMeta, PatchMeta, error) { + if s.Schema == nil { + return nil, PatchMeta{}, nil + } + sliceItem := NewSliceItem(key, s.Schema.GetPath()) + s.Schema.Accept(sliceItem) + + err := sliceItem.Error() + if err != nil { + return nil, PatchMeta{}, err + } + return PatchMetaFromOpenAPI{Schema: sliceItem.subschema}, + sliceItem.patchmeta, nil +} + +func (s PatchMetaFromOpenAPI) Name() string { + schema := s.Schema + return schema.GetName() +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go new file mode 100644 index 00000000..2f6ade2b --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go @@ -0,0 +1,2151 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package strategicpatch + +import ( + "fmt" + "reflect" + "sort" + "strings" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/util/json" + "k8s.io/apimachinery/pkg/util/mergepatch" +) + +// An alternate implementation of JSON Merge Patch +// (https://tools.ietf.org/html/rfc7386) which supports the ability to annotate +// certain fields with metadata that indicates whether the elements of JSON +// lists should be merged or replaced. +// +// For more information, see the PATCH section of docs/devel/api-conventions.md. +// +// Some of the content of this package was borrowed with minor adaptations from +// evanphx/json-patch and openshift/origin. + +const ( + directiveMarker = "$patch" + deleteDirective = "delete" + replaceDirective = "replace" + mergeDirective = "merge" + + retainKeysStrategy = "retainKeys" + + deleteFromPrimitiveListDirectivePrefix = "$deleteFromPrimitiveList" + retainKeysDirective = "$" + retainKeysStrategy + setElementOrderDirectivePrefix = "$setElementOrder" +) + +// JSONMap is a representations of JSON object encoded as map[string]interface{} +// where the children can be either map[string]interface{}, []interface{} or +// primitive type). +// Operating on JSONMap representation is much faster as it doesn't require any +// json marshaling and/or unmarshaling operations. +type JSONMap map[string]interface{} + +type DiffOptions struct { + // SetElementOrder determines whether we generate the $setElementOrder parallel list. + SetElementOrder bool + // IgnoreChangesAndAdditions indicates if we keep the changes and additions in the patch. + IgnoreChangesAndAdditions bool + // IgnoreDeletions indicates if we keep the deletions in the patch. + IgnoreDeletions bool + // We introduce a new value retainKeys for patchStrategy. + // It indicates that all fields needing to be preserved must be + // present in the `retainKeys` list. + // And the fields that are present will be merged with live object. + // All the missing fields will be cleared when patching. + BuildRetainKeysDirective bool +} + +type MergeOptions struct { + // MergeParallelList indicates if we are merging the parallel list. + // We don't merge parallel list when calling mergeMap() in CreateThreeWayMergePatch() + // which is called client-side. + // We merge parallel list iff when calling mergeMap() in StrategicMergeMapPatch() + // which is called server-side + MergeParallelList bool + // IgnoreUnmatchedNulls indicates if we should process the unmatched nulls. + IgnoreUnmatchedNulls bool +} + +// The following code is adapted from github.com/openshift/origin/pkg/util/jsonmerge. +// Instead of defining a Delta that holds an original, a patch and a set of preconditions, +// the reconcile method accepts a set of preconditions as an argument. + +// CreateTwoWayMergePatch creates a patch that can be passed to StrategicMergePatch from an original +// document and a modified document, which are passed to the method as json encoded content. It will +// return a patch that yields the modified document when applied to the original document, or an error +// if either of the two documents is invalid. +func CreateTwoWayMergePatch(original, modified []byte, dataStruct interface{}, fns ...mergepatch.PreconditionFunc) ([]byte, error) { + schema, err := NewPatchMetaFromStruct(dataStruct) + if err != nil { + return nil, err + } + + return CreateTwoWayMergePatchUsingLookupPatchMeta(original, modified, schema, fns...) +} + +func CreateTwoWayMergePatchUsingLookupPatchMeta( + original, modified []byte, schema LookupPatchMeta, fns ...mergepatch.PreconditionFunc) ([]byte, error) { + originalMap := map[string]interface{}{} + if len(original) > 0 { + if err := json.Unmarshal(original, &originalMap); err != nil { + return nil, mergepatch.ErrBadJSONDoc + } + } + + modifiedMap := map[string]interface{}{} + if len(modified) > 0 { + if err := json.Unmarshal(modified, &modifiedMap); err != nil { + return nil, mergepatch.ErrBadJSONDoc + } + } + + patchMap, err := CreateTwoWayMergeMapPatchUsingLookupPatchMeta(originalMap, modifiedMap, schema, fns...) + if err != nil { + return nil, err + } + + return json.Marshal(patchMap) +} + +// CreateTwoWayMergeMapPatch creates a patch from an original and modified JSON objects, +// encoded JSONMap. +// The serialized version of the map can then be passed to StrategicMergeMapPatch. +func CreateTwoWayMergeMapPatch(original, modified JSONMap, dataStruct interface{}, fns ...mergepatch.PreconditionFunc) (JSONMap, error) { + schema, err := NewPatchMetaFromStruct(dataStruct) + if err != nil { + return nil, err + } + + return CreateTwoWayMergeMapPatchUsingLookupPatchMeta(original, modified, schema, fns...) +} + +func CreateTwoWayMergeMapPatchUsingLookupPatchMeta(original, modified JSONMap, schema LookupPatchMeta, fns ...mergepatch.PreconditionFunc) (JSONMap, error) { + diffOptions := DiffOptions{ + SetElementOrder: true, + } + patchMap, err := diffMaps(original, modified, schema, diffOptions) + if err != nil { + return nil, err + } + + // Apply the preconditions to the patch, and return an error if any of them fail. + for _, fn := range fns { + if !fn(patchMap) { + return nil, mergepatch.NewErrPreconditionFailed(patchMap) + } + } + + return patchMap, nil +} + +// Returns a (recursive) strategic merge patch that yields modified when applied to original. +// Including: +// - Adding fields to the patch present in modified, missing from original +// - Setting fields to the patch present in modified and original with different values +// - Delete fields present in original, missing from modified through +// - IFF map field - set to nil in patch +// - IFF list of maps && merge strategy - use deleteDirective for the elements +// - IFF list of primitives && merge strategy - use parallel deletion list +// - IFF list of maps or primitives with replace strategy (default) - set patch value to the value in modified +// - Build $retainKeys directive for fields with retainKeys patch strategy +func diffMaps(original, modified map[string]interface{}, schema LookupPatchMeta, diffOptions DiffOptions) (map[string]interface{}, error) { + patch := map[string]interface{}{} + + // This will be used to build the $retainKeys directive sent in the patch + retainKeysList := make([]interface{}, 0, len(modified)) + + // Compare each value in the modified map against the value in the original map + for key, modifiedValue := range modified { + // Get the underlying type for pointers + if diffOptions.BuildRetainKeysDirective && modifiedValue != nil { + retainKeysList = append(retainKeysList, key) + } + + originalValue, ok := original[key] + if !ok { + // Key was added, so add to patch + if !diffOptions.IgnoreChangesAndAdditions { + patch[key] = modifiedValue + } + continue + } + + // The patch may have a patch directive + // TODO: figure out if we need this. This shouldn't be needed by apply. When would the original map have patch directives in it? + foundDirectiveMarker, err := handleDirectiveMarker(key, originalValue, modifiedValue, patch) + if err != nil { + return nil, err + } + if foundDirectiveMarker { + continue + } + + if reflect.TypeOf(originalValue) != reflect.TypeOf(modifiedValue) { + // Types have changed, so add to patch + if !diffOptions.IgnoreChangesAndAdditions { + patch[key] = modifiedValue + } + continue + } + + // Types are the same, so compare values + switch originalValueTyped := originalValue.(type) { + case map[string]interface{}: + modifiedValueTyped := modifiedValue.(map[string]interface{}) + err = handleMapDiff(key, originalValueTyped, modifiedValueTyped, patch, schema, diffOptions) + case []interface{}: + modifiedValueTyped := modifiedValue.([]interface{}) + err = handleSliceDiff(key, originalValueTyped, modifiedValueTyped, patch, schema, diffOptions) + default: + replacePatchFieldIfNotEqual(key, originalValue, modifiedValue, patch, diffOptions) + } + if err != nil { + return nil, err + } + } + + updatePatchIfMissing(original, modified, patch, diffOptions) + // Insert the retainKeysList iff there are values present in the retainKeysList and + // either of the following is true: + // - the patch is not empty + // - there are additional field in original that need to be cleared + if len(retainKeysList) > 0 && + (len(patch) > 0 || hasAdditionalNewField(original, modified)) { + patch[retainKeysDirective] = sortScalars(retainKeysList) + } + return patch, nil +} + +// handleDirectiveMarker handles how to diff directive marker between 2 objects +func handleDirectiveMarker(key string, originalValue, modifiedValue interface{}, patch map[string]interface{}) (bool, error) { + if key == directiveMarker { + originalString, ok := originalValue.(string) + if !ok { + return false, fmt.Errorf("invalid value for special key: %s", directiveMarker) + } + modifiedString, ok := modifiedValue.(string) + if !ok { + return false, fmt.Errorf("invalid value for special key: %s", directiveMarker) + } + if modifiedString != originalString { + patch[directiveMarker] = modifiedValue + } + return true, nil + } + return false, nil +} + +// handleMapDiff diff between 2 maps `originalValueTyped` and `modifiedValue`, +// puts the diff in the `patch` associated with `key` +// key is the key associated with originalValue and modifiedValue. +// originalValue, modifiedValue are the old and new value respectively.They are both maps +// patch is the patch map that contains key and the updated value, and it is the parent of originalValue, modifiedValue +// diffOptions contains multiple options to control how we do the diff. +func handleMapDiff(key string, originalValue, modifiedValue, patch map[string]interface{}, + schema LookupPatchMeta, diffOptions DiffOptions) error { + subschema, patchMeta, err := schema.LookupPatchMetadataForStruct(key) + + if err != nil { + // We couldn't look up metadata for the field + // If the values are identical, this doesn't matter, no patch is needed + if reflect.DeepEqual(originalValue, modifiedValue) { + return nil + } + // Otherwise, return the error + return err + } + retainKeys, patchStrategy, err := extractRetainKeysPatchStrategy(patchMeta.GetPatchStrategies()) + if err != nil { + return err + } + diffOptions.BuildRetainKeysDirective = retainKeys + switch patchStrategy { + // The patch strategic from metadata tells us to replace the entire object instead of diffing it + case replaceDirective: + if !diffOptions.IgnoreChangesAndAdditions { + patch[key] = modifiedValue + } + default: + patchValue, err := diffMaps(originalValue, modifiedValue, subschema, diffOptions) + if err != nil { + return err + } + // Maps were not identical, use provided patch value + if len(patchValue) > 0 { + patch[key] = patchValue + } + } + return nil +} + +// handleSliceDiff diff between 2 slices `originalValueTyped` and `modifiedValue`, +// puts the diff in the `patch` associated with `key` +// key is the key associated with originalValue and modifiedValue. +// originalValue, modifiedValue are the old and new value respectively.They are both slices +// patch is the patch map that contains key and the updated value, and it is the parent of originalValue, modifiedValue +// diffOptions contains multiple options to control how we do the diff. +func handleSliceDiff(key string, originalValue, modifiedValue []interface{}, patch map[string]interface{}, + schema LookupPatchMeta, diffOptions DiffOptions) error { + subschema, patchMeta, err := schema.LookupPatchMetadataForSlice(key) + if err != nil { + // We couldn't look up metadata for the field + // If the values are identical, this doesn't matter, no patch is needed + if reflect.DeepEqual(originalValue, modifiedValue) { + return nil + } + // Otherwise, return the error + return err + } + retainKeys, patchStrategy, err := extractRetainKeysPatchStrategy(patchMeta.GetPatchStrategies()) + if err != nil { + return err + } + switch patchStrategy { + // Merge the 2 slices using mergePatchKey + case mergeDirective: + diffOptions.BuildRetainKeysDirective = retainKeys + addList, deletionList, setOrderList, err := diffLists(originalValue, modifiedValue, subschema, patchMeta.GetPatchMergeKey(), diffOptions) + if err != nil { + return err + } + if len(addList) > 0 { + patch[key] = addList + } + // generate a parallel list for deletion + if len(deletionList) > 0 { + parallelDeletionListKey := fmt.Sprintf("%s/%s", deleteFromPrimitiveListDirectivePrefix, key) + patch[parallelDeletionListKey] = deletionList + } + if len(setOrderList) > 0 { + parallelSetOrderListKey := fmt.Sprintf("%s/%s", setElementOrderDirectivePrefix, key) + patch[parallelSetOrderListKey] = setOrderList + } + default: + replacePatchFieldIfNotEqual(key, originalValue, modifiedValue, patch, diffOptions) + } + return nil +} + +// replacePatchFieldIfNotEqual updates the patch if original and modified are not deep equal +// if diffOptions.IgnoreChangesAndAdditions is false. +// original is the old value, maybe either the live cluster object or the last applied configuration +// modified is the new value, is always the users new config +func replacePatchFieldIfNotEqual(key string, original, modified interface{}, + patch map[string]interface{}, diffOptions DiffOptions) { + if diffOptions.IgnoreChangesAndAdditions { + // Ignoring changes - do nothing + return + } + if reflect.DeepEqual(original, modified) { + // Contents are identical - do nothing + return + } + // Create a patch to replace the old value with the new one + patch[key] = modified +} + +// updatePatchIfMissing iterates over `original` when ignoreDeletions is false. +// Clear the field whose key is not present in `modified`. +// original is the old value, maybe either the live cluster object or the last applied configuration +// modified is the new value, is always the users new config +func updatePatchIfMissing(original, modified, patch map[string]interface{}, diffOptions DiffOptions) { + if diffOptions.IgnoreDeletions { + // Ignoring deletion - do nothing + return + } + // Add nils for deleted values + for key := range original { + if _, found := modified[key]; !found { + patch[key] = nil + } + } +} + +// validateMergeKeyInLists checks if each map in the list has the mentryerge key. +func validateMergeKeyInLists(mergeKey string, lists ...[]interface{}) error { + for _, list := range lists { + for _, item := range list { + m, ok := item.(map[string]interface{}) + if !ok { + return mergepatch.ErrBadArgType(m, item) + } + if _, ok = m[mergeKey]; !ok { + return mergepatch.ErrNoMergeKey(m, mergeKey) + } + } + } + return nil +} + +// normalizeElementOrder sort `patch` list by `patchOrder` and sort `serverOnly` list by `serverOrder`. +// Then it merges the 2 sorted lists. +// It guarantee the relative order in the patch list and in the serverOnly list is kept. +// `patch` is a list of items in the patch, and `serverOnly` is a list of items in the live object. +// `patchOrder` is the order we want `patch` list to have and +// `serverOrder` is the order we want `serverOnly` list to have. +// kind is the kind of each item in the lists `patch` and `serverOnly`. +func normalizeElementOrder(patch, serverOnly, patchOrder, serverOrder []interface{}, mergeKey string, kind reflect.Kind) ([]interface{}, error) { + patch, err := normalizeSliceOrder(patch, patchOrder, mergeKey, kind) + if err != nil { + return nil, err + } + serverOnly, err = normalizeSliceOrder(serverOnly, serverOrder, mergeKey, kind) + if err != nil { + return nil, err + } + all := mergeSortedSlice(serverOnly, patch, serverOrder, mergeKey, kind) + + return all, nil +} + +// mergeSortedSlice merges the 2 sorted lists by serverOrder with best effort. +// It will insert each item in `left` list to `right` list. In most cases, the 2 lists will be interleaved. +// The relative order of left and right are guaranteed to be kept. +// They have higher precedence than the order in the live list. +// The place for a item in `left` is found by: +// scan from the place of last insertion in `right` to the end of `right`, +// the place is before the first item that is greater than the item we want to insert. +// example usage: using server-only items as left and patch items as right. We insert server-only items +// to patch list. We use the order of live object as record for comparison. +func mergeSortedSlice(left, right, serverOrder []interface{}, mergeKey string, kind reflect.Kind) []interface{} { + // Returns if l is less than r, and if both have been found. + // If l and r both present and l is in front of r, l is less than r. + less := func(l, r interface{}) (bool, bool) { + li := index(serverOrder, l, mergeKey, kind) + ri := index(serverOrder, r, mergeKey, kind) + if li >= 0 && ri >= 0 { + return li < ri, true + } else { + return false, false + } + } + + // left and right should be non-overlapping. + size := len(left) + len(right) + i, j := 0, 0 + s := make([]interface{}, size, size) + + for k := 0; k < size; k++ { + if i >= len(left) && j < len(right) { + // have items left in `right` list + s[k] = right[j] + j++ + } else if j >= len(right) && i < len(left) { + // have items left in `left` list + s[k] = left[i] + i++ + } else { + // compare them if i and j are both in bound + less, foundBoth := less(left[i], right[j]) + if foundBoth && less { + s[k] = left[i] + i++ + } else { + s[k] = right[j] + j++ + } + } + } + return s +} + +// index returns the index of the item in the given items, or -1 if it doesn't exist +// l must NOT be a slice of slices, this should be checked before calling. +func index(l []interface{}, valToLookUp interface{}, mergeKey string, kind reflect.Kind) int { + var getValFn func(interface{}) interface{} + // Get the correct `getValFn` based on item `kind`. + // It should return the value of merge key for maps and + // return the item for other kinds. + switch kind { + case reflect.Map: + getValFn = func(item interface{}) interface{} { + typedItem, ok := item.(map[string]interface{}) + if !ok { + return nil + } + val := typedItem[mergeKey] + return val + } + default: + getValFn = func(item interface{}) interface{} { + return item + } + } + + for i, v := range l { + if getValFn(valToLookUp) == getValFn(v) { + return i + } + } + return -1 +} + +// extractToDeleteItems takes a list and +// returns 2 lists: one contains items that should be kept and the other contains items to be deleted. +func extractToDeleteItems(l []interface{}) ([]interface{}, []interface{}, error) { + var nonDelete, toDelete []interface{} + for _, v := range l { + m, ok := v.(map[string]interface{}) + if !ok { + return nil, nil, mergepatch.ErrBadArgType(m, v) + } + + directive, foundDirective := m[directiveMarker] + if foundDirective && directive == deleteDirective { + toDelete = append(toDelete, v) + } else { + nonDelete = append(nonDelete, v) + } + } + return nonDelete, toDelete, nil +} + +// normalizeSliceOrder sort `toSort` list by `order` +func normalizeSliceOrder(toSort, order []interface{}, mergeKey string, kind reflect.Kind) ([]interface{}, error) { + var toDelete []interface{} + if kind == reflect.Map { + // make sure each item in toSort, order has merge key + err := validateMergeKeyInLists(mergeKey, toSort, order) + if err != nil { + return nil, err + } + toSort, toDelete, err = extractToDeleteItems(toSort) + if err != nil { + return nil, err + } + } + + sort.SliceStable(toSort, func(i, j int) bool { + if ii := index(order, toSort[i], mergeKey, kind); ii >= 0 { + if ij := index(order, toSort[j], mergeKey, kind); ij >= 0 { + return ii < ij + } + } + return true + }) + toSort = append(toSort, toDelete...) + return toSort, nil +} + +// Returns a (recursive) strategic merge patch, a parallel deletion list if necessary and +// another list to set the order of the list +// Only list of primitives with merge strategy will generate a parallel deletion list. +// These two lists should yield modified when applied to original, for lists with merge semantics. +func diffLists(original, modified []interface{}, schema LookupPatchMeta, mergeKey string, diffOptions DiffOptions) ([]interface{}, []interface{}, []interface{}, error) { + if len(original) == 0 { + // Both slices are empty - do nothing + if len(modified) == 0 || diffOptions.IgnoreChangesAndAdditions { + return nil, nil, nil, nil + } + + // Old slice was empty - add all elements from the new slice + return modified, nil, nil, nil + } + + elementType, err := sliceElementType(original, modified) + if err != nil { + return nil, nil, nil, err + } + + var patchList, deleteList, setOrderList []interface{} + kind := elementType.Kind() + switch kind { + case reflect.Map: + patchList, deleteList, err = diffListsOfMaps(original, modified, schema, mergeKey, diffOptions) + if err != nil { + return nil, nil, nil, err + } + patchList, err = normalizeSliceOrder(patchList, modified, mergeKey, kind) + if err != nil { + return nil, nil, nil, err + } + orderSame, err := isOrderSame(original, modified, mergeKey) + if err != nil { + return nil, nil, nil, err + } + // append the deletions to the end of the patch list. + patchList = append(patchList, deleteList...) + deleteList = nil + // generate the setElementOrder list when there are content changes or order changes + if diffOptions.SetElementOrder && + ((!diffOptions.IgnoreChangesAndAdditions && (len(patchList) > 0 || !orderSame)) || + (!diffOptions.IgnoreDeletions && len(patchList) > 0)) { + // Generate a list of maps that each item contains only the merge key. + setOrderList = make([]interface{}, len(modified)) + for i, v := range modified { + typedV := v.(map[string]interface{}) + setOrderList[i] = map[string]interface{}{ + mergeKey: typedV[mergeKey], + } + } + } + case reflect.Slice: + // Lists of Lists are not permitted by the api + return nil, nil, nil, mergepatch.ErrNoListOfLists + default: + patchList, deleteList, err = diffListsOfScalars(original, modified, diffOptions) + if err != nil { + return nil, nil, nil, err + } + patchList, err = normalizeSliceOrder(patchList, modified, mergeKey, kind) + // generate the setElementOrder list when there are content changes or order changes + if diffOptions.SetElementOrder && ((!diffOptions.IgnoreDeletions && len(deleteList) > 0) || + (!diffOptions.IgnoreChangesAndAdditions && !reflect.DeepEqual(original, modified))) { + setOrderList = modified + } + } + return patchList, deleteList, setOrderList, err +} + +// isOrderSame checks if the order in a list has changed +func isOrderSame(original, modified []interface{}, mergeKey string) (bool, error) { + if len(original) != len(modified) { + return false, nil + } + for i, modifiedItem := range modified { + equal, err := mergeKeyValueEqual(original[i], modifiedItem, mergeKey) + if err != nil || !equal { + return equal, err + } + } + return true, nil +} + +// diffListsOfScalars returns 2 lists, the first one is addList and the second one is deletionList. +// Argument diffOptions.IgnoreChangesAndAdditions controls if calculate addList. true means not calculate. +// Argument diffOptions.IgnoreDeletions controls if calculate deletionList. true means not calculate. +// original may be changed, but modified is guaranteed to not be changed +func diffListsOfScalars(original, modified []interface{}, diffOptions DiffOptions) ([]interface{}, []interface{}, error) { + modifiedCopy := make([]interface{}, len(modified)) + copy(modifiedCopy, modified) + // Sort the scalars for easier calculating the diff + originalScalars := sortScalars(original) + modifiedScalars := sortScalars(modifiedCopy) + + originalIndex, modifiedIndex := 0, 0 + addList := []interface{}{} + deletionList := []interface{}{} + + for { + originalInBounds := originalIndex < len(originalScalars) + modifiedInBounds := modifiedIndex < len(modifiedScalars) + if !originalInBounds && !modifiedInBounds { + break + } + // we need to compare the string representation of the scalar, + // because the scalar is an interface which doesn't support either < or > + // And that's how func sortScalars compare scalars. + var originalString, modifiedString string + var originalValue, modifiedValue interface{} + if originalInBounds { + originalValue = originalScalars[originalIndex] + originalString = fmt.Sprintf("%v", originalValue) + } + if modifiedInBounds { + modifiedValue = modifiedScalars[modifiedIndex] + modifiedString = fmt.Sprintf("%v", modifiedValue) + } + + originalV, modifiedV := compareListValuesAtIndex(originalInBounds, modifiedInBounds, originalString, modifiedString) + switch { + case originalV == nil && modifiedV == nil: + originalIndex++ + modifiedIndex++ + case originalV != nil && modifiedV == nil: + if !diffOptions.IgnoreDeletions { + deletionList = append(deletionList, originalValue) + } + originalIndex++ + case originalV == nil && modifiedV != nil: + if !diffOptions.IgnoreChangesAndAdditions { + addList = append(addList, modifiedValue) + } + modifiedIndex++ + default: + return nil, nil, fmt.Errorf("Unexpected returned value from compareListValuesAtIndex: %v and %v", originalV, modifiedV) + } + } + + return addList, deduplicateScalars(deletionList), nil +} + +// If first return value is non-nil, list1 contains an element not present in list2 +// If second return value is non-nil, list2 contains an element not present in list1 +func compareListValuesAtIndex(list1Inbounds, list2Inbounds bool, list1Value, list2Value string) (interface{}, interface{}) { + bothInBounds := list1Inbounds && list2Inbounds + switch { + // scalars are identical + case bothInBounds && list1Value == list2Value: + return nil, nil + // only list2 is in bound + case !list1Inbounds: + fallthrough + // list2 has additional scalar + case bothInBounds && list1Value > list2Value: + return nil, list2Value + // only original is in bound + case !list2Inbounds: + fallthrough + // original has additional scalar + case bothInBounds && list1Value < list2Value: + return list1Value, nil + default: + return nil, nil + } +} + +// diffListsOfMaps takes a pair of lists and +// returns a (recursive) strategic merge patch list contains additions and changes and +// a deletion list contains deletions +func diffListsOfMaps(original, modified []interface{}, schema LookupPatchMeta, mergeKey string, diffOptions DiffOptions) ([]interface{}, []interface{}, error) { + patch := make([]interface{}, 0, len(modified)) + deletionList := make([]interface{}, 0, len(original)) + + originalSorted, err := sortMergeListsByNameArray(original, schema, mergeKey, false) + if err != nil { + return nil, nil, err + } + modifiedSorted, err := sortMergeListsByNameArray(modified, schema, mergeKey, false) + if err != nil { + return nil, nil, err + } + + originalIndex, modifiedIndex := 0, 0 + for { + originalInBounds := originalIndex < len(originalSorted) + modifiedInBounds := modifiedIndex < len(modifiedSorted) + bothInBounds := originalInBounds && modifiedInBounds + if !originalInBounds && !modifiedInBounds { + break + } + + var originalElementMergeKeyValueString, modifiedElementMergeKeyValueString string + var originalElementMergeKeyValue, modifiedElementMergeKeyValue interface{} + var originalElement, modifiedElement map[string]interface{} + if originalInBounds { + originalElement, originalElementMergeKeyValue, err = getMapAndMergeKeyValueByIndex(originalIndex, mergeKey, originalSorted) + if err != nil { + return nil, nil, err + } + originalElementMergeKeyValueString = fmt.Sprintf("%v", originalElementMergeKeyValue) + } + if modifiedInBounds { + modifiedElement, modifiedElementMergeKeyValue, err = getMapAndMergeKeyValueByIndex(modifiedIndex, mergeKey, modifiedSorted) + if err != nil { + return nil, nil, err + } + modifiedElementMergeKeyValueString = fmt.Sprintf("%v", modifiedElementMergeKeyValue) + } + + switch { + case bothInBounds && ItemMatchesOriginalAndModifiedSlice(originalElementMergeKeyValueString, modifiedElementMergeKeyValueString): + // Merge key values are equal, so recurse + patchValue, err := diffMaps(originalElement, modifiedElement, schema, diffOptions) + if err != nil { + return nil, nil, err + } + if len(patchValue) > 0 { + patchValue[mergeKey] = modifiedElementMergeKeyValue + patch = append(patch, patchValue) + } + originalIndex++ + modifiedIndex++ + // only modified is in bound + case !originalInBounds: + fallthrough + // modified has additional map + case bothInBounds && ItemAddedToModifiedSlice(originalElementMergeKeyValueString, modifiedElementMergeKeyValueString): + if !diffOptions.IgnoreChangesAndAdditions { + patch = append(patch, modifiedElement) + } + modifiedIndex++ + // only original is in bound + case !modifiedInBounds: + fallthrough + // original has additional map + case bothInBounds && ItemRemovedFromModifiedSlice(originalElementMergeKeyValueString, modifiedElementMergeKeyValueString): + if !diffOptions.IgnoreDeletions { + // Item was deleted, so add delete directive + deletionList = append(deletionList, CreateDeleteDirective(mergeKey, originalElementMergeKeyValue)) + } + originalIndex++ + } + } + + return patch, deletionList, nil +} + +// getMapAndMergeKeyValueByIndex return a map in the list and its merge key value given the index of the map. +func getMapAndMergeKeyValueByIndex(index int, mergeKey string, listOfMaps []interface{}) (map[string]interface{}, interface{}, error) { + m, ok := listOfMaps[index].(map[string]interface{}) + if !ok { + return nil, nil, mergepatch.ErrBadArgType(m, listOfMaps[index]) + } + + val, ok := m[mergeKey] + if !ok { + return nil, nil, mergepatch.ErrNoMergeKey(m, mergeKey) + } + return m, val, nil +} + +// StrategicMergePatch applies a strategic merge patch. The patch and the original document +// must be json encoded content. A patch can be created from an original and a modified document +// by calling CreateStrategicMergePatch. +func StrategicMergePatch(original, patch []byte, dataStruct interface{}) ([]byte, error) { + schema, err := NewPatchMetaFromStruct(dataStruct) + if err != nil { + return nil, err + } + + return StrategicMergePatchUsingLookupPatchMeta(original, patch, schema) +} + +func StrategicMergePatchUsingLookupPatchMeta(original, patch []byte, schema LookupPatchMeta) ([]byte, error) { + originalMap, err := handleUnmarshal(original) + if err != nil { + return nil, err + } + patchMap, err := handleUnmarshal(patch) + if err != nil { + return nil, err + } + + result, err := StrategicMergeMapPatchUsingLookupPatchMeta(originalMap, patchMap, schema) + if err != nil { + return nil, err + } + + return json.Marshal(result) +} + +func handleUnmarshal(j []byte) (map[string]interface{}, error) { + if j == nil { + j = []byte("{}") + } + + m := map[string]interface{}{} + err := json.Unmarshal(j, &m) + if err != nil { + return nil, mergepatch.ErrBadJSONDoc + } + return m, nil +} + +// StrategicMergeMapPatch applies a strategic merge patch. The original and patch documents +// must be JSONMap. A patch can be created from an original and modified document by +// calling CreateTwoWayMergeMapPatch. +// Warning: the original and patch JSONMap objects are mutated by this function and should not be reused. +func StrategicMergeMapPatch(original, patch JSONMap, dataStruct interface{}) (JSONMap, error) { + schema, err := NewPatchMetaFromStruct(dataStruct) + if err != nil { + return nil, err + } + + // We need the go struct tags `patchMergeKey` and `patchStrategy` for fields that support a strategic merge patch. + // For native resources, we can easily figure out these tags since we know the fields. + + // Because custom resources are decoded as Unstructured and because we're missing the metadata about how to handle + // each field in a strategic merge patch, we can't find the go struct tags. Hence, we can't easily do a strategic merge + // for custom resources. So we should fail fast and return an error. + if _, ok := dataStruct.(*unstructured.Unstructured); ok { + return nil, mergepatch.ErrUnsupportedStrategicMergePatchFormat + } + + return StrategicMergeMapPatchUsingLookupPatchMeta(original, patch, schema) +} + +func StrategicMergeMapPatchUsingLookupPatchMeta(original, patch JSONMap, schema LookupPatchMeta) (JSONMap, error) { + mergeOptions := MergeOptions{ + MergeParallelList: true, + IgnoreUnmatchedNulls: true, + } + return mergeMap(original, patch, schema, mergeOptions) +} + +// handleDirectiveInMergeMap handles the patch directive when merging 2 maps. +func handleDirectiveInMergeMap(directive interface{}, patch map[string]interface{}) (map[string]interface{}, error) { + if directive == replaceDirective { + // If the patch contains "$patch: replace", don't merge it, just use the + // patch directly. Later on, we can add a single level replace that only + // affects the map that the $patch is in. + delete(patch, directiveMarker) + return patch, nil + } + + if directive == deleteDirective { + // If the patch contains "$patch: delete", don't merge it, just return + // an empty map. + return map[string]interface{}{}, nil + } + + return nil, mergepatch.ErrBadPatchType(directive, patch) +} + +func containsDirectiveMarker(item interface{}) bool { + m, ok := item.(map[string]interface{}) + if ok { + if _, foundDirectiveMarker := m[directiveMarker]; foundDirectiveMarker { + return true + } + } + return false +} + +func mergeKeyValueEqual(left, right interface{}, mergeKey string) (bool, error) { + if len(mergeKey) == 0 { + return left == right, nil + } + typedLeft, ok := left.(map[string]interface{}) + if !ok { + return false, mergepatch.ErrBadArgType(typedLeft, left) + } + typedRight, ok := right.(map[string]interface{}) + if !ok { + return false, mergepatch.ErrBadArgType(typedRight, right) + } + mergeKeyLeft, ok := typedLeft[mergeKey] + if !ok { + return false, mergepatch.ErrNoMergeKey(typedLeft, mergeKey) + } + mergeKeyRight, ok := typedRight[mergeKey] + if !ok { + return false, mergepatch.ErrNoMergeKey(typedRight, mergeKey) + } + return mergeKeyLeft == mergeKeyRight, nil +} + +// extractKey trims the prefix and return the original key +func extractKey(s, prefix string) (string, error) { + substrings := strings.SplitN(s, "/", 2) + if len(substrings) <= 1 || substrings[0] != prefix { + switch prefix { + case deleteFromPrimitiveListDirectivePrefix: + return "", mergepatch.ErrBadPatchFormatForPrimitiveList + case setElementOrderDirectivePrefix: + return "", mergepatch.ErrBadPatchFormatForSetElementOrderList + default: + return "", fmt.Errorf("fail to find unknown prefix %q in %s\n", prefix, s) + } + } + return substrings[1], nil +} + +// validatePatchUsingSetOrderList verifies: +// the relative order of any two items in the setOrderList list matches that in the patch list. +// the items in the patch list must be a subset or the same as the $setElementOrder list (deletions are ignored). +func validatePatchWithSetOrderList(patchList, setOrderList interface{}, mergeKey string) error { + typedSetOrderList, ok := setOrderList.([]interface{}) + if !ok { + return mergepatch.ErrBadPatchFormatForSetElementOrderList + } + typedPatchList, ok := patchList.([]interface{}) + if !ok { + return mergepatch.ErrBadPatchFormatForSetElementOrderList + } + if len(typedSetOrderList) == 0 || len(typedPatchList) == 0 { + return nil + } + + var nonDeleteList, toDeleteList []interface{} + var err error + if len(mergeKey) > 0 { + nonDeleteList, toDeleteList, err = extractToDeleteItems(typedPatchList) + if err != nil { + return err + } + } else { + nonDeleteList = typedPatchList + } + + patchIndex, setOrderIndex := 0, 0 + for patchIndex < len(nonDeleteList) && setOrderIndex < len(typedSetOrderList) { + if containsDirectiveMarker(nonDeleteList[patchIndex]) { + patchIndex++ + continue + } + mergeKeyEqual, err := mergeKeyValueEqual(nonDeleteList[patchIndex], typedSetOrderList[setOrderIndex], mergeKey) + if err != nil { + return err + } + if mergeKeyEqual { + patchIndex++ + } + setOrderIndex++ + } + // If patchIndex is inbound but setOrderIndex if out of bound mean there are items mismatching between the patch list and setElementOrder list. + // the second check is is a sanity check, and should always be true if the first is true. + if patchIndex < len(nonDeleteList) && setOrderIndex >= len(typedSetOrderList) { + return fmt.Errorf("The order in patch list:\n%v\n doesn't match %s list:\n%v\n", typedPatchList, setElementOrderDirectivePrefix, setOrderList) + } + typedPatchList = append(nonDeleteList, toDeleteList...) + return nil +} + +// preprocessDeletionListForMerging preprocesses the deletion list. +// it returns shouldContinue, isDeletionList, noPrefixKey +func preprocessDeletionListForMerging(key string, original map[string]interface{}, + patchVal interface{}, mergeDeletionList bool) (bool, bool, string, error) { + // If found a parallel list for deletion and we are going to merge the list, + // overwrite the key to the original key and set flag isDeleteList + foundParallelListPrefix := strings.HasPrefix(key, deleteFromPrimitiveListDirectivePrefix) + if foundParallelListPrefix { + if !mergeDeletionList { + original[key] = patchVal + return true, false, "", nil + } + originalKey, err := extractKey(key, deleteFromPrimitiveListDirectivePrefix) + return false, true, originalKey, err + } + return false, false, "", nil +} + +// applyRetainKeysDirective looks for a retainKeys directive and applies to original +// - if no directive exists do nothing +// - if directive is found, clear keys in original missing from the directive list +// - validate that all keys present in the patch are present in the retainKeys directive +// note: original may be another patch request, e.g. applying the add+modified patch to the deletions patch. In this case it may have directives +func applyRetainKeysDirective(original, patch map[string]interface{}, options MergeOptions) error { + retainKeysInPatch, foundInPatch := patch[retainKeysDirective] + if !foundInPatch { + return nil + } + // cleanup the directive + delete(patch, retainKeysDirective) + + if !options.MergeParallelList { + // If original is actually a patch, make sure the retainKeys directives are the same in both patches if present in both. + // If not present in the original patch, copy from the modified patch. + retainKeysInOriginal, foundInOriginal := original[retainKeysDirective] + if foundInOriginal { + if !reflect.DeepEqual(retainKeysInOriginal, retainKeysInPatch) { + // This error actually should never happen. + return fmt.Errorf("%v and %v are not deep equal: this may happen when calculating the 3-way diff patch", retainKeysInOriginal, retainKeysInPatch) + } + } else { + original[retainKeysDirective] = retainKeysInPatch + } + return nil + } + + retainKeysList, ok := retainKeysInPatch.([]interface{}) + if !ok { + return mergepatch.ErrBadPatchFormatForRetainKeys + } + + // validate patch to make sure all fields in the patch are present in the retainKeysList. + // The map is used only as a set, the value is never referenced + m := map[interface{}]struct{}{} + for _, v := range retainKeysList { + m[v] = struct{}{} + } + for k, v := range patch { + if v == nil || strings.HasPrefix(k, deleteFromPrimitiveListDirectivePrefix) || + strings.HasPrefix(k, setElementOrderDirectivePrefix) { + continue + } + // If there is an item present in the patch but not in the retainKeys list, + // the patch is invalid. + if _, found := m[k]; !found { + return mergepatch.ErrBadPatchFormatForRetainKeys + } + } + + // clear not present fields + for k := range original { + if _, found := m[k]; !found { + delete(original, k) + } + } + return nil +} + +// mergePatchIntoOriginal processes $setElementOrder list. +// When not merging the directive, it will make sure $setElementOrder list exist only in original. +// When merging the directive, it will try to find the $setElementOrder list and +// its corresponding patch list, validate it and merge it. +// Then, sort them by the relative order in setElementOrder, patch list and live list. +// The precedence is $setElementOrder > order in patch list > order in live list. +// This function will delete the item after merging it to prevent process it again in the future. +// Ref: https://git.k8s.io/community/contributors/design-proposals/cli/preserve-order-in-strategic-merge-patch.md +func mergePatchIntoOriginal(original, patch map[string]interface{}, schema LookupPatchMeta, mergeOptions MergeOptions) error { + for key, patchV := range patch { + // Do nothing if there is no ordering directive + if !strings.HasPrefix(key, setElementOrderDirectivePrefix) { + continue + } + + setElementOrderInPatch := patchV + // Copies directive from the second patch (`patch`) to the first patch (`original`) + // and checks they are equal and delete the directive in the second patch + if !mergeOptions.MergeParallelList { + setElementOrderListInOriginal, ok := original[key] + if ok { + // check if the setElementOrder list in original and the one in patch matches + if !reflect.DeepEqual(setElementOrderListInOriginal, setElementOrderInPatch) { + return mergepatch.ErrBadPatchFormatForSetElementOrderList + } + } else { + // move the setElementOrder list from patch to original + original[key] = setElementOrderInPatch + } + } + delete(patch, key) + + var ( + ok bool + originalFieldValue, patchFieldValue, merged []interface{} + patchStrategy string + patchMeta PatchMeta + subschema LookupPatchMeta + ) + typedSetElementOrderList, ok := setElementOrderInPatch.([]interface{}) + if !ok { + return mergepatch.ErrBadArgType(typedSetElementOrderList, setElementOrderInPatch) + } + // Trim the setElementOrderDirectivePrefix to get the key of the list field in original. + originalKey, err := extractKey(key, setElementOrderDirectivePrefix) + if err != nil { + return err + } + // try to find the list with `originalKey` in `original` and `modified` and merge them. + originalList, foundOriginal := original[originalKey] + patchList, foundPatch := patch[originalKey] + if foundOriginal { + originalFieldValue, ok = originalList.([]interface{}) + if !ok { + return mergepatch.ErrBadArgType(originalFieldValue, originalList) + } + } + if foundPatch { + patchFieldValue, ok = patchList.([]interface{}) + if !ok { + return mergepatch.ErrBadArgType(patchFieldValue, patchList) + } + } + subschema, patchMeta, err = schema.LookupPatchMetadataForSlice(originalKey) + if err != nil { + return err + } + _, patchStrategy, err = extractRetainKeysPatchStrategy(patchMeta.GetPatchStrategies()) + if err != nil { + return err + } + // Check for consistency between the element order list and the field it applies to + err = validatePatchWithSetOrderList(patchFieldValue, typedSetElementOrderList, patchMeta.GetPatchMergeKey()) + if err != nil { + return err + } + + switch { + case foundOriginal && !foundPatch: + // no change to list contents + merged = originalFieldValue + case !foundOriginal && foundPatch: + // list was added + merged = patchFieldValue + case foundOriginal && foundPatch: + merged, err = mergeSliceHandler(originalList, patchList, subschema, + patchStrategy, patchMeta.GetPatchMergeKey(), false, mergeOptions) + if err != nil { + return err + } + case !foundOriginal && !foundPatch: + continue + } + + // Split all items into patch items and server-only items and then enforce the order. + var patchItems, serverOnlyItems []interface{} + if len(patchMeta.GetPatchMergeKey()) == 0 { + // Primitives doesn't need merge key to do partitioning. + patchItems, serverOnlyItems = partitionPrimitivesByPresentInList(merged, typedSetElementOrderList) + + } else { + // Maps need merge key to do partitioning. + patchItems, serverOnlyItems, err = partitionMapsByPresentInList(merged, typedSetElementOrderList, patchMeta.GetPatchMergeKey()) + if err != nil { + return err + } + } + + elementType, err := sliceElementType(originalFieldValue, patchFieldValue) + if err != nil { + return err + } + kind := elementType.Kind() + // normalize merged list + // typedSetElementOrderList contains all the relative order in typedPatchList, + // so don't need to use typedPatchList + both, err := normalizeElementOrder(patchItems, serverOnlyItems, typedSetElementOrderList, originalFieldValue, patchMeta.GetPatchMergeKey(), kind) + if err != nil { + return err + } + original[originalKey] = both + // delete patch list from patch to prevent process again in the future + delete(patch, originalKey) + } + return nil +} + +// partitionPrimitivesByPresentInList partitions elements into 2 slices, the first containing items present in partitionBy, the other not. +func partitionPrimitivesByPresentInList(original, partitionBy []interface{}) ([]interface{}, []interface{}) { + patch := make([]interface{}, 0, len(original)) + serverOnly := make([]interface{}, 0, len(original)) + inPatch := map[interface{}]bool{} + for _, v := range partitionBy { + inPatch[v] = true + } + for _, v := range original { + if !inPatch[v] { + serverOnly = append(serverOnly, v) + } else { + patch = append(patch, v) + } + } + return patch, serverOnly +} + +// partitionMapsByPresentInList partitions elements into 2 slices, the first containing items present in partitionBy, the other not. +func partitionMapsByPresentInList(original, partitionBy []interface{}, mergeKey string) ([]interface{}, []interface{}, error) { + patch := make([]interface{}, 0, len(original)) + serverOnly := make([]interface{}, 0, len(original)) + for _, v := range original { + typedV, ok := v.(map[string]interface{}) + if !ok { + return nil, nil, mergepatch.ErrBadArgType(typedV, v) + } + mergeKeyValue, foundMergeKey := typedV[mergeKey] + if !foundMergeKey { + return nil, nil, mergepatch.ErrNoMergeKey(typedV, mergeKey) + } + _, _, found, err := findMapInSliceBasedOnKeyValue(partitionBy, mergeKey, mergeKeyValue) + if err != nil { + return nil, nil, err + } + if !found { + serverOnly = append(serverOnly, v) + } else { + patch = append(patch, v) + } + } + return patch, serverOnly, nil +} + +// Merge fields from a patch map into the original map. Note: This may modify +// both the original map and the patch because getting a deep copy of a map in +// golang is highly non-trivial. +// flag mergeOptions.MergeParallelList controls if using the parallel list to delete or keeping the list. +// If patch contains any null field (e.g. field_1: null) that is not +// present in original, then to propagate it to the end result use +// mergeOptions.IgnoreUnmatchedNulls == false. +func mergeMap(original, patch map[string]interface{}, schema LookupPatchMeta, mergeOptions MergeOptions) (map[string]interface{}, error) { + if v, ok := patch[directiveMarker]; ok { + return handleDirectiveInMergeMap(v, patch) + } + + // nil is an accepted value for original to simplify logic in other places. + // If original is nil, replace it with an empty map and then apply the patch. + if original == nil { + original = map[string]interface{}{} + } + + err := applyRetainKeysDirective(original, patch, mergeOptions) + if err != nil { + return nil, err + } + + // Process $setElementOrder list and other lists sharing the same key. + // When not merging the directive, it will make sure $setElementOrder list exist only in original. + // When merging the directive, it will process $setElementOrder and its patch list together. + // This function will delete the merged elements from patch so they will not be reprocessed + err = mergePatchIntoOriginal(original, patch, schema, mergeOptions) + if err != nil { + return nil, err + } + + // Start merging the patch into the original. + for k, patchV := range patch { + skipProcessing, isDeleteList, noPrefixKey, err := preprocessDeletionListForMerging(k, original, patchV, mergeOptions.MergeParallelList) + if err != nil { + return nil, err + } + if skipProcessing { + continue + } + if len(noPrefixKey) > 0 { + k = noPrefixKey + } + + // If the value of this key is null, delete the key if it exists in the + // original. Otherwise, check if we want to preserve it or skip it. + // Preserving the null value is useful when we want to send an explicit + // delete to the API server. + if patchV == nil { + if _, ok := original[k]; ok { + delete(original, k) + } + if mergeOptions.IgnoreUnmatchedNulls { + continue + } + } + + _, ok := original[k] + if !ok { + // If it's not in the original document, just take the patch value. + original[k] = patchV + continue + } + + originalType := reflect.TypeOf(original[k]) + patchType := reflect.TypeOf(patchV) + if originalType != patchType { + original[k] = patchV + continue + } + // If they're both maps or lists, recurse into the value. + switch originalType.Kind() { + case reflect.Map: + subschema, patchMeta, err2 := schema.LookupPatchMetadataForStruct(k) + if err2 != nil { + return nil, err2 + } + _, patchStrategy, err2 := extractRetainKeysPatchStrategy(patchMeta.GetPatchStrategies()) + if err2 != nil { + return nil, err2 + } + original[k], err = mergeMapHandler(original[k], patchV, subschema, patchStrategy, mergeOptions) + case reflect.Slice: + subschema, patchMeta, err2 := schema.LookupPatchMetadataForSlice(k) + if err2 != nil { + return nil, err2 + } + _, patchStrategy, err2 := extractRetainKeysPatchStrategy(patchMeta.GetPatchStrategies()) + if err2 != nil { + return nil, err2 + } + original[k], err = mergeSliceHandler(original[k], patchV, subschema, patchStrategy, patchMeta.GetPatchMergeKey(), isDeleteList, mergeOptions) + default: + original[k] = patchV + } + if err != nil { + return nil, err + } + } + return original, nil +} + +// mergeMapHandler handles how to merge `patchV` whose key is `key` with `original` respecting +// fieldPatchStrategy and mergeOptions. +func mergeMapHandler(original, patch interface{}, schema LookupPatchMeta, + fieldPatchStrategy string, mergeOptions MergeOptions) (map[string]interface{}, error) { + typedOriginal, typedPatch, err := mapTypeAssertion(original, patch) + if err != nil { + return nil, err + } + + if fieldPatchStrategy != replaceDirective { + return mergeMap(typedOriginal, typedPatch, schema, mergeOptions) + } else { + return typedPatch, nil + } +} + +// mergeSliceHandler handles how to merge `patchV` whose key is `key` with `original` respecting +// fieldPatchStrategy, fieldPatchMergeKey, isDeleteList and mergeOptions. +func mergeSliceHandler(original, patch interface{}, schema LookupPatchMeta, + fieldPatchStrategy, fieldPatchMergeKey string, isDeleteList bool, mergeOptions MergeOptions) ([]interface{}, error) { + typedOriginal, typedPatch, err := sliceTypeAssertion(original, patch) + if err != nil { + return nil, err + } + + if fieldPatchStrategy == mergeDirective { + return mergeSlice(typedOriginal, typedPatch, schema, fieldPatchMergeKey, mergeOptions, isDeleteList) + } else { + return typedPatch, nil + } +} + +// Merge two slices together. Note: This may modify both the original slice and +// the patch because getting a deep copy of a slice in golang is highly +// non-trivial. +func mergeSlice(original, patch []interface{}, schema LookupPatchMeta, mergeKey string, mergeOptions MergeOptions, isDeleteList bool) ([]interface{}, error) { + if len(original) == 0 && len(patch) == 0 { + return original, nil + } + + // All the values must be of the same type, but not a list. + t, err := sliceElementType(original, patch) + if err != nil { + return nil, err + } + + var merged []interface{} + kind := t.Kind() + // If the elements are not maps, merge the slices of scalars. + if kind != reflect.Map { + if mergeOptions.MergeParallelList && isDeleteList { + return deleteFromSlice(original, patch), nil + } + // Maybe in the future add a "concat" mode that doesn't + // deduplicate. + both := append(original, patch...) + merged = deduplicateScalars(both) + + } else { + if mergeKey == "" { + return nil, fmt.Errorf("cannot merge lists without merge key for %s", schema.Name()) + } + + original, patch, err = mergeSliceWithSpecialElements(original, patch, mergeKey) + if err != nil { + return nil, err + } + + merged, err = mergeSliceWithoutSpecialElements(original, patch, mergeKey, schema, mergeOptions) + if err != nil { + return nil, err + } + } + + // enforce the order + var patchItems, serverOnlyItems []interface{} + if len(mergeKey) == 0 { + patchItems, serverOnlyItems = partitionPrimitivesByPresentInList(merged, patch) + } else { + patchItems, serverOnlyItems, err = partitionMapsByPresentInList(merged, patch, mergeKey) + if err != nil { + return nil, err + } + } + return normalizeElementOrder(patchItems, serverOnlyItems, patch, original, mergeKey, kind) +} + +// mergeSliceWithSpecialElements handles special elements with directiveMarker +// before merging the slices. It returns a updated `original` and a patch without special elements. +// original and patch must be slices of maps, they should be checked before calling this function. +func mergeSliceWithSpecialElements(original, patch []interface{}, mergeKey string) ([]interface{}, []interface{}, error) { + patchWithoutSpecialElements := []interface{}{} + replace := false + for _, v := range patch { + typedV := v.(map[string]interface{}) + patchType, ok := typedV[directiveMarker] + if !ok { + patchWithoutSpecialElements = append(patchWithoutSpecialElements, v) + } else { + switch patchType { + case deleteDirective: + mergeValue, ok := typedV[mergeKey] + if ok { + var err error + original, err = deleteMatchingEntries(original, mergeKey, mergeValue) + if err != nil { + return nil, nil, err + } + } else { + return nil, nil, mergepatch.ErrNoMergeKey(typedV, mergeKey) + } + case replaceDirective: + replace = true + // Continue iterating through the array to prune any other $patch elements. + case mergeDirective: + return nil, nil, fmt.Errorf("merging lists cannot yet be specified in the patch") + default: + return nil, nil, mergepatch.ErrBadPatchType(patchType, typedV) + } + } + } + if replace { + return patchWithoutSpecialElements, nil, nil + } + return original, patchWithoutSpecialElements, nil +} + +// delete all matching entries (based on merge key) from a merging list +func deleteMatchingEntries(original []interface{}, mergeKey string, mergeValue interface{}) ([]interface{}, error) { + for { + _, originalKey, found, err := findMapInSliceBasedOnKeyValue(original, mergeKey, mergeValue) + if err != nil { + return nil, err + } + + if !found { + break + } + // Delete the element at originalKey. + original = append(original[:originalKey], original[originalKey+1:]...) + } + return original, nil +} + +// mergeSliceWithoutSpecialElements merges slices with non-special elements. +// original and patch must be slices of maps, they should be checked before calling this function. +func mergeSliceWithoutSpecialElements(original, patch []interface{}, mergeKey string, schema LookupPatchMeta, mergeOptions MergeOptions) ([]interface{}, error) { + for _, v := range patch { + typedV := v.(map[string]interface{}) + mergeValue, ok := typedV[mergeKey] + if !ok { + return nil, mergepatch.ErrNoMergeKey(typedV, mergeKey) + } + + // If we find a value with this merge key value in original, merge the + // maps. Otherwise append onto original. + originalMap, originalKey, found, err := findMapInSliceBasedOnKeyValue(original, mergeKey, mergeValue) + if err != nil { + return nil, err + } + + if found { + var mergedMaps interface{} + var err error + // Merge into original. + mergedMaps, err = mergeMap(originalMap, typedV, schema, mergeOptions) + if err != nil { + return nil, err + } + + original[originalKey] = mergedMaps + } else { + original = append(original, v) + } + } + return original, nil +} + +// deleteFromSlice uses the parallel list to delete the items in a list of scalars +func deleteFromSlice(current, toDelete []interface{}) []interface{} { + toDeleteMap := map[interface{}]interface{}{} + processed := make([]interface{}, 0, len(current)) + for _, v := range toDelete { + toDeleteMap[v] = true + } + for _, v := range current { + if _, found := toDeleteMap[v]; !found { + processed = append(processed, v) + } + } + return processed +} + +// This method no longer panics if any element of the slice is not a map. +func findMapInSliceBasedOnKeyValue(m []interface{}, key string, value interface{}) (map[string]interface{}, int, bool, error) { + for k, v := range m { + typedV, ok := v.(map[string]interface{}) + if !ok { + return nil, 0, false, fmt.Errorf("value for key %v is not a map", k) + } + + valueToMatch, ok := typedV[key] + if ok && valueToMatch == value { + return typedV, k, true, nil + } + } + + return nil, 0, false, nil +} + +// This function takes a JSON map and sorts all the lists that should be merged +// by key. This is needed by tests because in JSON, list order is significant, +// but in Strategic Merge Patch, merge lists do not have significant order. +// Sorting the lists allows for order-insensitive comparison of patched maps. +func sortMergeListsByName(mapJSON []byte, schema LookupPatchMeta) ([]byte, error) { + var m map[string]interface{} + err := json.Unmarshal(mapJSON, &m) + if err != nil { + return nil, mergepatch.ErrBadJSONDoc + } + + newM, err := sortMergeListsByNameMap(m, schema) + if err != nil { + return nil, err + } + + return json.Marshal(newM) +} + +// Function sortMergeListsByNameMap recursively sorts the merge lists by its mergeKey in a map. +func sortMergeListsByNameMap(s map[string]interface{}, schema LookupPatchMeta) (map[string]interface{}, error) { + newS := map[string]interface{}{} + for k, v := range s { + if k == retainKeysDirective { + typedV, ok := v.([]interface{}) + if !ok { + return nil, mergepatch.ErrBadPatchFormatForRetainKeys + } + v = sortScalars(typedV) + } else if strings.HasPrefix(k, deleteFromPrimitiveListDirectivePrefix) { + typedV, ok := v.([]interface{}) + if !ok { + return nil, mergepatch.ErrBadPatchFormatForPrimitiveList + } + v = sortScalars(typedV) + } else if strings.HasPrefix(k, setElementOrderDirectivePrefix) { + _, ok := v.([]interface{}) + if !ok { + return nil, mergepatch.ErrBadPatchFormatForSetElementOrderList + } + } else if k != directiveMarker { + // recurse for map and slice. + switch typedV := v.(type) { + case map[string]interface{}: + subschema, _, err := schema.LookupPatchMetadataForStruct(k) + if err != nil { + return nil, err + } + v, err = sortMergeListsByNameMap(typedV, subschema) + if err != nil { + return nil, err + } + case []interface{}: + subschema, patchMeta, err := schema.LookupPatchMetadataForSlice(k) + if err != nil { + return nil, err + } + _, patchStrategy, err := extractRetainKeysPatchStrategy(patchMeta.GetPatchStrategies()) + if err != nil { + return nil, err + } + if patchStrategy == mergeDirective { + var err error + v, err = sortMergeListsByNameArray(typedV, subschema, patchMeta.GetPatchMergeKey(), true) + if err != nil { + return nil, err + } + } + } + } + + newS[k] = v + } + + return newS, nil +} + +// Function sortMergeListsByNameMap recursively sorts the merge lists by its mergeKey in an array. +func sortMergeListsByNameArray(s []interface{}, schema LookupPatchMeta, mergeKey string, recurse bool) ([]interface{}, error) { + if len(s) == 0 { + return s, nil + } + + // We don't support lists of lists yet. + t, err := sliceElementType(s) + if err != nil { + return nil, err + } + + // If the elements are not maps... + if t.Kind() != reflect.Map { + // Sort the elements, because they may have been merged out of order. + return deduplicateAndSortScalars(s), nil + } + + // Elements are maps - if one of the keys of the map is a map or a + // list, we may need to recurse into it. + newS := []interface{}{} + for _, elem := range s { + if recurse { + typedElem := elem.(map[string]interface{}) + newElem, err := sortMergeListsByNameMap(typedElem, schema) + if err != nil { + return nil, err + } + + newS = append(newS, newElem) + } else { + newS = append(newS, elem) + } + } + + // Sort the maps. + newS = sortMapsBasedOnField(newS, mergeKey) + return newS, nil +} + +func sortMapsBasedOnField(m []interface{}, fieldName string) []interface{} { + mapM := mapSliceFromSlice(m) + ss := SortableSliceOfMaps{mapM, fieldName} + sort.Sort(ss) + newS := sliceFromMapSlice(ss.s) + return newS +} + +func mapSliceFromSlice(m []interface{}) []map[string]interface{} { + newM := []map[string]interface{}{} + for _, v := range m { + vt := v.(map[string]interface{}) + newM = append(newM, vt) + } + + return newM +} + +func sliceFromMapSlice(s []map[string]interface{}) []interface{} { + newS := []interface{}{} + for _, v := range s { + newS = append(newS, v) + } + + return newS +} + +type SortableSliceOfMaps struct { + s []map[string]interface{} + k string // key to sort on +} + +func (ss SortableSliceOfMaps) Len() int { + return len(ss.s) +} + +func (ss SortableSliceOfMaps) Less(i, j int) bool { + iStr := fmt.Sprintf("%v", ss.s[i][ss.k]) + jStr := fmt.Sprintf("%v", ss.s[j][ss.k]) + return sort.StringsAreSorted([]string{iStr, jStr}) +} + +func (ss SortableSliceOfMaps) Swap(i, j int) { + tmp := ss.s[i] + ss.s[i] = ss.s[j] + ss.s[j] = tmp +} + +func deduplicateAndSortScalars(s []interface{}) []interface{} { + s = deduplicateScalars(s) + return sortScalars(s) +} + +func sortScalars(s []interface{}) []interface{} { + ss := SortableSliceOfScalars{s} + sort.Sort(ss) + return ss.s +} + +func deduplicateScalars(s []interface{}) []interface{} { + // Clever algorithm to deduplicate. + length := len(s) - 1 + for i := 0; i < length; i++ { + for j := i + 1; j <= length; j++ { + if s[i] == s[j] { + s[j] = s[length] + s = s[0:length] + length-- + j-- + } + } + } + + return s +} + +type SortableSliceOfScalars struct { + s []interface{} +} + +func (ss SortableSliceOfScalars) Len() int { + return len(ss.s) +} + +func (ss SortableSliceOfScalars) Less(i, j int) bool { + iStr := fmt.Sprintf("%v", ss.s[i]) + jStr := fmt.Sprintf("%v", ss.s[j]) + return sort.StringsAreSorted([]string{iStr, jStr}) +} + +func (ss SortableSliceOfScalars) Swap(i, j int) { + tmp := ss.s[i] + ss.s[i] = ss.s[j] + ss.s[j] = tmp +} + +// Returns the type of the elements of N slice(s). If the type is different, +// another slice or undefined, returns an error. +func sliceElementType(slices ...[]interface{}) (reflect.Type, error) { + var prevType reflect.Type + for _, s := range slices { + // Go through elements of all given slices and make sure they are all the same type. + for _, v := range s { + currentType := reflect.TypeOf(v) + if prevType == nil { + prevType = currentType + // We don't support lists of lists yet. + if prevType.Kind() == reflect.Slice { + return nil, mergepatch.ErrNoListOfLists + } + } else { + if prevType != currentType { + return nil, fmt.Errorf("list element types are not identical: %v", fmt.Sprint(slices)) + } + prevType = currentType + } + } + } + + if prevType == nil { + return nil, fmt.Errorf("no elements in any of the given slices") + } + + return prevType, nil +} + +// MergingMapsHaveConflicts returns true if the left and right JSON interface +// objects overlap with different values in any key. All keys are required to be +// strings. Since patches of the same Type have congruent keys, this is valid +// for multiple patch types. This method supports strategic merge patch semantics. +func MergingMapsHaveConflicts(left, right map[string]interface{}, schema LookupPatchMeta) (bool, error) { + return mergingMapFieldsHaveConflicts(left, right, schema, "", "") +} + +func mergingMapFieldsHaveConflicts( + left, right interface{}, + schema LookupPatchMeta, + fieldPatchStrategy, fieldPatchMergeKey string, +) (bool, error) { + switch leftType := left.(type) { + case map[string]interface{}: + rightType, ok := right.(map[string]interface{}) + if !ok { + return true, nil + } + leftMarker, okLeft := leftType[directiveMarker] + rightMarker, okRight := rightType[directiveMarker] + // if one or the other has a directive marker, + // then we need to consider that before looking at the individual keys, + // since a directive operates on the whole map. + if okLeft || okRight { + // if one has a directive marker and the other doesn't, + // then we have a conflict, since one is deleting or replacing the whole map, + // and the other is doing things to individual keys. + if okLeft != okRight { + return true, nil + } + // if they both have markers, but they are not the same directive, + // then we have a conflict because they're doing different things to the map. + if leftMarker != rightMarker { + return true, nil + } + } + if fieldPatchStrategy == replaceDirective { + return false, nil + } + // Check the individual keys. + return mapsHaveConflicts(leftType, rightType, schema) + + case []interface{}: + rightType, ok := right.([]interface{}) + if !ok { + return true, nil + } + return slicesHaveConflicts(leftType, rightType, schema, fieldPatchStrategy, fieldPatchMergeKey) + case string, float64, bool, int, int64, nil: + return !reflect.DeepEqual(left, right), nil + default: + return true, fmt.Errorf("unknown type: %v", reflect.TypeOf(left)) + } +} + +func mapsHaveConflicts(typedLeft, typedRight map[string]interface{}, schema LookupPatchMeta) (bool, error) { + for key, leftValue := range typedLeft { + if key != directiveMarker && key != retainKeysDirective { + if rightValue, ok := typedRight[key]; ok { + var subschema LookupPatchMeta + var patchMeta PatchMeta + var patchStrategy string + var err error + switch leftValue.(type) { + case []interface{}: + subschema, patchMeta, err = schema.LookupPatchMetadataForSlice(key) + if err != nil { + return true, err + } + _, patchStrategy, err = extractRetainKeysPatchStrategy(patchMeta.patchStrategies) + if err != nil { + return true, err + } + case map[string]interface{}: + subschema, patchMeta, err = schema.LookupPatchMetadataForStruct(key) + if err != nil { + return true, err + } + _, patchStrategy, err = extractRetainKeysPatchStrategy(patchMeta.patchStrategies) + if err != nil { + return true, err + } + } + + if hasConflicts, err := mergingMapFieldsHaveConflicts(leftValue, rightValue, + subschema, patchStrategy, patchMeta.GetPatchMergeKey()); hasConflicts { + return true, err + } + } + } + } + + return false, nil +} + +func slicesHaveConflicts( + typedLeft, typedRight []interface{}, + schema LookupPatchMeta, + fieldPatchStrategy, fieldPatchMergeKey string, +) (bool, error) { + elementType, err := sliceElementType(typedLeft, typedRight) + if err != nil { + return true, err + } + + if fieldPatchStrategy == mergeDirective { + // Merging lists of scalars have no conflicts by definition + // So we only need to check further if the elements are maps + if elementType.Kind() != reflect.Map { + return false, nil + } + + // Build a map for each slice and then compare the two maps + leftMap, err := sliceOfMapsToMapOfMaps(typedLeft, fieldPatchMergeKey) + if err != nil { + return true, err + } + + rightMap, err := sliceOfMapsToMapOfMaps(typedRight, fieldPatchMergeKey) + if err != nil { + return true, err + } + + return mapsOfMapsHaveConflicts(leftMap, rightMap, schema) + } + + // Either we don't have type information, or these are non-merging lists + if len(typedLeft) != len(typedRight) { + return true, nil + } + + // Sort scalar slices to prevent ordering issues + // We have no way to sort non-merging lists of maps + if elementType.Kind() != reflect.Map { + typedLeft = deduplicateAndSortScalars(typedLeft) + typedRight = deduplicateAndSortScalars(typedRight) + } + + // Compare the slices element by element in order + // This test will fail if the slices are not sorted + for i := range typedLeft { + if hasConflicts, err := mergingMapFieldsHaveConflicts(typedLeft[i], typedRight[i], schema, "", ""); hasConflicts { + return true, err + } + } + + return false, nil +} + +func sliceOfMapsToMapOfMaps(slice []interface{}, mergeKey string) (map[string]interface{}, error) { + result := make(map[string]interface{}, len(slice)) + for _, value := range slice { + typedValue, ok := value.(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("invalid element type in merging list:%v", slice) + } + + mergeValue, ok := typedValue[mergeKey] + if !ok { + return nil, fmt.Errorf("cannot find merge key `%s` in merging list element:%v", mergeKey, typedValue) + } + + result[fmt.Sprintf("%s", mergeValue)] = typedValue + } + + return result, nil +} + +func mapsOfMapsHaveConflicts(typedLeft, typedRight map[string]interface{}, schema LookupPatchMeta) (bool, error) { + for key, leftValue := range typedLeft { + if rightValue, ok := typedRight[key]; ok { + if hasConflicts, err := mergingMapFieldsHaveConflicts(leftValue, rightValue, schema, "", ""); hasConflicts { + return true, err + } + } + } + + return false, nil +} + +// CreateThreeWayMergePatch reconciles a modified configuration with an original configuration, +// while preserving any changes or deletions made to the original configuration in the interim, +// and not overridden by the current configuration. All three documents must be passed to the +// method as json encoded content. It will return a strategic merge patch, or an error if any +// of the documents is invalid, or if there are any preconditions that fail against the modified +// configuration, or, if overwrite is false and there are conflicts between the modified and current +// configurations. Conflicts are defined as keys changed differently from original to modified +// than from original to current. In other words, a conflict occurs if modified changes any key +// in a way that is different from how it is changed in current (e.g., deleting it, changing its +// value). We also propagate values fields that do not exist in original but are explicitly +// defined in modified. +func CreateThreeWayMergePatch(original, modified, current []byte, schema LookupPatchMeta, overwrite bool, fns ...mergepatch.PreconditionFunc) ([]byte, error) { + originalMap := map[string]interface{}{} + if len(original) > 0 { + if err := json.Unmarshal(original, &originalMap); err != nil { + return nil, mergepatch.ErrBadJSONDoc + } + } + + modifiedMap := map[string]interface{}{} + if len(modified) > 0 { + if err := json.Unmarshal(modified, &modifiedMap); err != nil { + return nil, mergepatch.ErrBadJSONDoc + } + } + + currentMap := map[string]interface{}{} + if len(current) > 0 { + if err := json.Unmarshal(current, ¤tMap); err != nil { + return nil, mergepatch.ErrBadJSONDoc + } + } + + // The patch is the difference from current to modified without deletions, plus deletions + // from original to modified. To find it, we compute deletions, which are the deletions from + // original to modified, and delta, which is the difference from current to modified without + // deletions, and then apply delta to deletions as a patch, which should be strictly additive. + deltaMapDiffOptions := DiffOptions{ + IgnoreDeletions: true, + SetElementOrder: true, + } + deltaMap, err := diffMaps(currentMap, modifiedMap, schema, deltaMapDiffOptions) + if err != nil { + return nil, err + } + deletionsMapDiffOptions := DiffOptions{ + SetElementOrder: true, + IgnoreChangesAndAdditions: true, + } + deletionsMap, err := diffMaps(originalMap, modifiedMap, schema, deletionsMapDiffOptions) + if err != nil { + return nil, err + } + + mergeOptions := MergeOptions{} + patchMap, err := mergeMap(deletionsMap, deltaMap, schema, mergeOptions) + if err != nil { + return nil, err + } + + // Apply the preconditions to the patch, and return an error if any of them fail. + for _, fn := range fns { + if !fn(patchMap) { + return nil, mergepatch.NewErrPreconditionFailed(patchMap) + } + } + + // If overwrite is false, and the patch contains any keys that were changed differently, + // then return a conflict error. + if !overwrite { + changeMapDiffOptions := DiffOptions{} + changedMap, err := diffMaps(originalMap, currentMap, schema, changeMapDiffOptions) + if err != nil { + return nil, err + } + + hasConflicts, err := MergingMapsHaveConflicts(patchMap, changedMap, schema) + if err != nil { + return nil, err + } + + if hasConflicts { + return nil, mergepatch.NewErrConflict(mergepatch.ToYAMLOrError(patchMap), mergepatch.ToYAMLOrError(changedMap)) + } + } + + return json.Marshal(patchMap) +} + +func ItemAddedToModifiedSlice(original, modified string) bool { return original > modified } + +func ItemRemovedFromModifiedSlice(original, modified string) bool { return original < modified } + +func ItemMatchesOriginalAndModifiedSlice(original, modified string) bool { return original == modified } + +func CreateDeleteDirective(mergeKey string, mergeKeyValue interface{}) map[string]interface{} { + return map[string]interface{}{mergeKey: mergeKeyValue, directiveMarker: deleteDirective} +} + +func mapTypeAssertion(original, patch interface{}) (map[string]interface{}, map[string]interface{}, error) { + typedOriginal, ok := original.(map[string]interface{}) + if !ok { + return nil, nil, mergepatch.ErrBadArgType(typedOriginal, original) + } + typedPatch, ok := patch.(map[string]interface{}) + if !ok { + return nil, nil, mergepatch.ErrBadArgType(typedPatch, patch) + } + return typedOriginal, typedPatch, nil +} + +func sliceTypeAssertion(original, patch interface{}) ([]interface{}, []interface{}, error) { + typedOriginal, ok := original.([]interface{}) + if !ok { + return nil, nil, mergepatch.ErrBadArgType(typedOriginal, original) + } + typedPatch, ok := patch.([]interface{}) + if !ok { + return nil, nil, mergepatch.ErrBadArgType(typedPatch, patch) + } + return typedOriginal, typedPatch, nil +} + +// extractRetainKeysPatchStrategy process patch strategy, which is a string may contains multiple +// patch strategies separated by ",". It returns a boolean var indicating if it has +// retainKeys strategies and a string for the other strategy. +func extractRetainKeysPatchStrategy(strategies []string) (bool, string, error) { + switch len(strategies) { + case 0: + return false, "", nil + case 1: + singleStrategy := strategies[0] + switch singleStrategy { + case retainKeysStrategy: + return true, "", nil + default: + return false, singleStrategy, nil + } + case 2: + switch { + case strategies[0] == retainKeysStrategy: + return true, strategies[1], nil + case strategies[1] == retainKeysStrategy: + return true, strategies[0], nil + default: + return false, "", fmt.Errorf("unexpected patch strategy: %v", strategies) + } + default: + return false, "", fmt.Errorf("unexpected patch strategy: %v", strategies) + } +} + +// hasAdditionalNewField returns if original map has additional key with non-nil value than modified. +func hasAdditionalNewField(original, modified map[string]interface{}) bool { + for k, v := range original { + if v == nil { + continue + } + if _, found := modified[k]; !found { + return true + } + } + return false +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch_test.go b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch_test.go new file mode 100644 index 00000000..4721803c --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch_test.go @@ -0,0 +1,6763 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package strategicpatch + +import ( + "fmt" + "path/filepath" + "reflect" + "strings" + "testing" + + "github.com/davecgh/go-spew/spew" + "github.com/ghodss/yaml" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/json" + "k8s.io/apimachinery/pkg/util/mergepatch" + "k8s.io/apimachinery/pkg/util/sets" + sptest "k8s.io/apimachinery/pkg/util/strategicpatch/testing" +) + +var ( + fakeMergeItemSchema = sptest.Fake{Path: filepath.Join("testdata", "swagger-merge-item.json")} + fakePrecisionItemSchema = sptest.Fake{Path: filepath.Join("testdata", "swagger-precision-item.json")} +) + +type SortMergeListTestCases struct { + TestCases []SortMergeListTestCase +} + +type SortMergeListTestCase struct { + Description string + Original map[string]interface{} + Sorted map[string]interface{} +} + +type StrategicMergePatchTestCases struct { + TestCases []StrategicMergePatchTestCase +} + +type StrategicMergePatchTestCase struct { + Description string + StrategicMergePatchTestCaseData +} + +type StrategicMergePatchRawTestCase struct { + Description string + StrategicMergePatchRawTestCaseData +} + +type StrategicMergePatchTestCaseData struct { + // Original is the original object (last-applied config in annotation) + Original map[string]interface{} + // Modified is the modified object (new config we want) + Modified map[string]interface{} + // Current is the current object (live config in the server) + Current map[string]interface{} + // TwoWay is the expected two-way merge patch diff between original and modified + TwoWay map[string]interface{} + // ThreeWay is the expected three-way merge patch + ThreeWay map[string]interface{} + // Result is the expected object after applying the three-way patch on current object. + Result map[string]interface{} + // TwoWayResult is the expected object after applying the two-way patch on current object. + // If nil, Modified is used. + TwoWayResult map[string]interface{} +} + +// The meaning of each field is the same as StrategicMergePatchTestCaseData's. +// The difference is that all the fields in StrategicMergePatchRawTestCaseData are json-encoded data. +type StrategicMergePatchRawTestCaseData struct { + Original []byte + Modified []byte + Current []byte + TwoWay []byte + ThreeWay []byte + Result []byte + TwoWayResult []byte + ExpectedError string +} + +type MergeItem struct { + Name string `json:"name,omitempty"` + Value string `json:"value,omitempty"` + Other string `json:"other,omitempty"` + MergingList []MergeItem `json:"mergingList,omitempty" patchStrategy:"merge" patchMergeKey:"name"` + NonMergingList []MergeItem `json:"nonMergingList,omitempty"` + MergingIntList []int `json:"mergingIntList,omitempty" patchStrategy:"merge"` + NonMergingIntList []int `json:"nonMergingIntList,omitempty"` + MergeItemPtr *MergeItem `json:"mergeItemPtr,omitempty" patchStrategy:"merge" patchMergeKey:"name"` + SimpleMap map[string]string `json:"simpleMap,omitempty"` + ReplacingItem runtime.RawExtension `json:"replacingItem,omitempty" patchStrategy:"replace"` + RetainKeysMap RetainKeysMergeItem `json:"retainKeysMap,omitempty" patchStrategy:"retainKeys"` + RetainKeysMergingList []MergeItem `json:"retainKeysMergingList,omitempty" patchStrategy:"merge,retainKeys" patchMergeKey:"name"` +} + +type RetainKeysMergeItem struct { + Name string `json:"name,omitempty"` + Value string `json:"value,omitempty"` + Other string `json:"other,omitempty"` + SimpleMap map[string]string `json:"simpleMap,omitempty"` + MergingIntList []int `json:"mergingIntList,omitempty" patchStrategy:"merge"` + MergingList []MergeItem `json:"mergingList,omitempty" patchStrategy:"merge" patchMergeKey:"name"` + NonMergingList []MergeItem `json:"nonMergingList,omitempty"` +} + +var ( + mergeItem MergeItem + mergeItemStructSchema = PatchMetaFromStruct{T: GetTagStructTypeOrDie(mergeItem)} +) + +// These are test cases for SortMergeList, used to assert that it (recursively) +// sorts both merging and non merging lists correctly. +var sortMergeListTestCaseData = []byte(` +testCases: + - description: sort one list of maps + original: + mergingList: + - name: 1 + - name: 3 + - name: 2 + sorted: + mergingList: + - name: 1 + - name: 2 + - name: 3 + - description: sort lists of maps but not nested lists of maps + original: + mergingList: + - name: 2 + nonMergingList: + - name: 1 + - name: 3 + - name: 2 + - name: 1 + nonMergingList: + - name: 2 + - name: 1 + sorted: + mergingList: + - name: 1 + nonMergingList: + - name: 2 + - name: 1 + - name: 2 + nonMergingList: + - name: 1 + - name: 3 + - name: 2 + - description: sort lists of maps and nested lists of maps + original: + mergingList: + - name: 2 + mergingList: + - name: 1 + - name: 3 + - name: 2 + - name: 1 + mergingList: + - name: 2 + - name: 1 + sorted: + mergingList: + - name: 1 + mergingList: + - name: 1 + - name: 2 + - name: 2 + mergingList: + - name: 1 + - name: 2 + - name: 3 + - description: merging list should NOT sort when nested in non merging list + original: + nonMergingList: + - name: 2 + mergingList: + - name: 1 + - name: 3 + - name: 2 + - name: 1 + mergingList: + - name: 2 + - name: 1 + sorted: + nonMergingList: + - name: 2 + mergingList: + - name: 1 + - name: 3 + - name: 2 + - name: 1 + mergingList: + - name: 2 + - name: 1 + - description: sort very nested list of maps + fieldTypes: + original: + mergingList: + - mergingList: + - mergingList: + - name: 2 + - name: 1 + sorted: + mergingList: + - mergingList: + - mergingList: + - name: 1 + - name: 2 + - description: sort nested lists of ints + original: + mergingList: + - name: 2 + mergingIntList: + - 1 + - 3 + - 2 + - name: 1 + mergingIntList: + - 2 + - 1 + sorted: + mergingList: + - name: 1 + mergingIntList: + - 1 + - 2 + - name: 2 + mergingIntList: + - 1 + - 2 + - 3 + - description: sort nested pointers of ints + original: + mergeItemPtr: + - name: 2 + mergingIntList: + - 1 + - 3 + - 2 + - name: 1 + mergingIntList: + - 2 + - 1 + sorted: + mergeItemPtr: + - name: 1 + mergingIntList: + - 1 + - 2 + - name: 2 + mergingIntList: + - 1 + - 2 + - 3 + - description: sort merging list by pointer + original: + mergeItemPtr: + - name: 1 + - name: 3 + - name: 2 + sorted: + mergeItemPtr: + - name: 1 + - name: 2 + - name: 3 +`) + +func TestSortMergeLists(t *testing.T) { + mergeItemOpenapiSchema := PatchMetaFromOpenAPI{ + Schema: sptest.GetSchemaOrDie(fakeMergeItemSchema, "mergeItem"), + } + schemas := []LookupPatchMeta{ + mergeItemStructSchema, + mergeItemOpenapiSchema, + } + + tc := SortMergeListTestCases{} + err := yaml.Unmarshal(sortMergeListTestCaseData, &tc) + if err != nil { + t.Errorf("can't unmarshal test cases: %s\n", err) + return + } + + for _, schema := range schemas { + for _, c := range tc.TestCases { + temp := testObjectToJSONOrFail(t, c.Original) + got := sortJsonOrFail(t, temp, c.Description, schema) + expected := testObjectToJSONOrFail(t, c.Sorted) + if !reflect.DeepEqual(got, expected) { + t.Errorf("using %s error in test case: %s\ncannot sort object:\n%s\nexpected:\n%s\ngot:\n%s\n", + getSchemaType(schema), c.Description, mergepatch.ToYAMLOrError(c.Original), mergepatch.ToYAMLOrError(c.Sorted), jsonToYAMLOrError(got)) + } + } + } +} + +// These are test cases for StrategicMergePatch that cannot be generated using +// CreateTwoWayMergePatch because it may be one of the following cases: +// - not use the replace directive. +// - generate duplicate integers for a merging list patch. +// - generate empty merging lists. +// - use patch format from an old client. +var customStrategicMergePatchTestCaseData = []byte(` +testCases: + - description: unique scalars when merging lists + original: + mergingIntList: + - 1 + - 2 + twoWay: + mergingIntList: + - 2 + - 3 + modified: + mergingIntList: + - 1 + - 2 + - 3 + - description: delete map from nested map + original: + simpleMap: + key1: 1 + key2: 1 + twoWay: + simpleMap: + $patch: delete + modified: + simpleMap: + {} + - description: delete all items from merging list + original: + mergingList: + - name: 1 + - name: 2 + twoWay: + mergingList: + - $patch: replace + modified: + mergingList: [] + - description: merge empty merging lists + original: + mergingList: [] + twoWay: + mergingList: [] + modified: + mergingList: [] + - description: delete all keys from map + original: + name: 1 + value: 1 + twoWay: + $patch: replace + modified: {} + - description: add key and delete all keys from map + original: + name: 1 + value: 1 + twoWay: + other: a + $patch: replace + modified: + other: a + - description: delete all duplicate entries in a merging list + original: + mergingList: + - name: 1 + - name: 1 + - name: 2 + value: a + - name: 3 + - name: 3 + twoWay: + mergingList: + - name: 1 + $patch: delete + - name: 3 + $patch: delete + modified: + mergingList: + - name: 2 + value: a + - description: retainKeys map can add a field when no retainKeys directive present + original: + retainKeysMap: + name: foo + twoWay: + retainKeysMap: + value: bar + modified: + retainKeysMap: + name: foo + value: bar + - description: retainKeys map can change a field when no retainKeys directive present + original: + retainKeysMap: + name: foo + value: a + twoWay: + retainKeysMap: + value: b + modified: + retainKeysMap: + name: foo + value: b + - description: retainKeys map can delete a field when no retainKeys directive present + original: + retainKeysMap: + name: foo + value: a + twoWay: + retainKeysMap: + value: null + modified: + retainKeysMap: + name: foo + - description: retainKeys map merge an empty map + original: + retainKeysMap: + name: foo + value: a + twoWay: + retainKeysMap: {} + modified: + retainKeysMap: + name: foo + value: a + - description: retainKeys list can add a field when no retainKeys directive present + original: + retainKeysMergingList: + - name: bar + - name: foo + twoWay: + retainKeysMergingList: + - name: foo + value: a + modified: + retainKeysMergingList: + - name: bar + - name: foo + value: a + - description: retainKeys list can change a field when no retainKeys directive present + original: + retainKeysMergingList: + - name: bar + - name: foo + value: a + twoWay: + retainKeysMergingList: + - name: foo + value: b + modified: + retainKeysMergingList: + - name: bar + - name: foo + value: b + - description: retainKeys list can delete a field when no retainKeys directive present + original: + retainKeysMergingList: + - name: bar + - name: foo + value: a + twoWay: + retainKeysMergingList: + - name: foo + value: null + modified: + retainKeysMergingList: + - name: bar + - name: foo + - description: preserve the order from the patch in a merging list + original: + mergingList: + - name: 1 + - name: 2 + value: b + - name: 3 + twoWay: + mergingList: + - name: 3 + value: c + - name: 1 + value: a + - name: 2 + other: x + modified: + mergingList: + - name: 3 + value: c + - name: 1 + value: a + - name: 2 + value: b + other: x + - description: preserve the order from the patch in a merging list 2 + original: + mergingList: + - name: 1 + - name: 2 + value: b + - name: 3 + twoWay: + mergingList: + - name: 3 + value: c + - name: 1 + value: a + modified: + mergingList: + - name: 2 + value: b + - name: 3 + value: c + - name: 1 + value: a + - description: preserve the order from the patch in a merging int list + original: + mergingIntList: + - 1 + - 2 + - 3 + twoWay: + mergingIntList: + - 3 + - 1 + - 2 + modified: + mergingIntList: + - 3 + - 1 + - 2 + - description: preserve the order from the patch in a merging int list + original: + mergingIntList: + - 1 + - 2 + - 3 + twoWay: + mergingIntList: + - 3 + - 1 + modified: + mergingIntList: + - 2 + - 3 + - 1 +`) + +var customStrategicMergePatchRawTestCases = []StrategicMergePatchRawTestCase{ + { + Description: "$setElementOrder contains item that is not present in the list to be merged", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +mergingList: + - name: 1 + - name: 3 +`), + TwoWay: []byte(` +$setElementOrder/mergingList: + - name: 3 + - name: 2 + - name: 1 +mergingList: + - name: 3 + value: 3 + - name: 1 + value: 1 +`), + Modified: []byte(` +mergingList: + - name: 3 + value: 3 + - name: 1 + value: 1 +`), + }, + }, + { + Description: "$setElementOrder contains item that is not present in the int list to be merged", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +mergingIntList: + - 1 + - 3 +`), + TwoWay: []byte(` +$setElementOrder/mergingIntList: + - 3 + - 2 + - 1 +`), + Modified: []byte(` +mergingIntList: + - 3 + - 1 +`), + }, + }, + { + Description: "should check if order in $setElementOrder and patch list match", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +mergingList: + - name: 1 + - name: 3 + - name: 2 +`), + TwoWay: []byte(` +$setElementOrder/mergingList: + - name: 1 + - name: 2 + - name: 3 +mergingList: + - name: 3 + value: 3 + - name: 1 + value: 1 +`), + ExpectedError: "doesn't match", + }, + }, + { + Description: "$setElementOrder contains item that is not present in the int list to be merged", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +mergingIntList: + - 1 + - 3 + - 2 +`), + TwoWay: []byte(` +$setElementOrder/mergingIntList: + - 1 + - 2 + - 3 +mergingIntList: + - 3 + - 1 +`), + ExpectedError: "doesn't match", + }, + }, + { + Description: "missing merge key should error out", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +mergingList: + - name: 1 + value: a +`), + TwoWay: []byte(` +mergingList: + - value: b +`), + ExpectedError: "does not contain declared merge key", + }, + }, +} + +func TestCustomStrategicMergePatch(t *testing.T) { + mergeItemOpenapiSchema := PatchMetaFromOpenAPI{ + Schema: sptest.GetSchemaOrDie(fakeMergeItemSchema, "mergeItem"), + } + schemas := []LookupPatchMeta{ + mergeItemStructSchema, + mergeItemOpenapiSchema, + } + + tc := StrategicMergePatchTestCases{} + err := yaml.Unmarshal(customStrategicMergePatchTestCaseData, &tc) + if err != nil { + t.Errorf("can't unmarshal test cases: %v\n", err) + return + } + + for _, schema := range schemas { + for _, c := range tc.TestCases { + original, expectedTwoWayPatch, _, expectedResult := twoWayTestCaseToJSONOrFail(t, c, schema) + testPatchApplication(t, original, expectedTwoWayPatch, expectedResult, c.Description, "", schema) + } + + for _, c := range customStrategicMergePatchRawTestCases { + original, expectedTwoWayPatch, _, expectedResult := twoWayRawTestCaseToJSONOrFail(t, c) + testPatchApplication(t, original, expectedTwoWayPatch, expectedResult, c.Description, c.ExpectedError, schema) + } + } +} + +// These are test cases for StrategicMergePatch, to assert that applying a patch +// yields the correct outcome. They are also test cases for CreateTwoWayMergePatch +// and CreateThreeWayMergePatch, to assert that they both generate the correct patch +// for the given set of input documents. +// +var createStrategicMergePatchTestCaseData = []byte(` +testCases: + - description: nil original + twoWay: + name: 1 + value: 1 + modified: + name: 1 + value: 1 + current: + name: 1 + other: a + threeWay: + value: 1 + result: + name: 1 + value: 1 + other: a + - description: nil patch + original: + name: 1 + twoWay: + {} + modified: + name: 1 + current: + name: 1 + threeWay: + {} + result: + name: 1 + - description: add field to map + original: + name: 1 + twoWay: + value: 1 + modified: + name: 1 + value: 1 + current: + name: 1 + other: a + threeWay: + value: 1 + result: + name: 1 + value: 1 + other: a + - description: add field to map with conflict + original: + name: 1 + twoWay: + value: 1 + modified: + name: 1 + value: 1 + current: + name: a + other: a + threeWay: + name: 1 + value: 1 + result: + name: 1 + value: 1 + other: a + - description: add field and delete field from map + original: + name: 1 + twoWay: + name: null + value: 1 + modified: + value: 1 + current: + name: 1 + other: a + threeWay: + name: null + value: 1 + result: + value: 1 + other: a + - description: add field and delete field from map with conflict + original: + name: 1 + twoWay: + name: null + value: 1 + modified: + value: 1 + current: + name: a + other: a + threeWay: + name: null + value: 1 + result: + value: 1 + other: a + - description: delete field from nested map + original: + simpleMap: + key1: 1 + key2: 1 + twoWay: + simpleMap: + key2: null + modified: + simpleMap: + key1: 1 + current: + simpleMap: + key1: 1 + key2: 1 + other: a + threeWay: + simpleMap: + key2: null + result: + simpleMap: + key1: 1 + other: a + - description: delete field from nested map with conflict + original: + simpleMap: + key1: 1 + key2: 1 + twoWay: + simpleMap: + key2: null + modified: + simpleMap: + key1: 1 + current: + simpleMap: + key1: a + key2: 1 + other: a + threeWay: + simpleMap: + key1: 1 + key2: null + result: + simpleMap: + key1: 1 + other: a + - description: delete all fields from map + original: + name: 1 + value: 1 + twoWay: + name: null + value: null + modified: {} + current: + name: 1 + value: 1 + other: a + threeWay: + name: null + value: null + result: + other: a + - description: delete all fields from map with conflict + original: + name: 1 + value: 1 + twoWay: + name: null + value: null + modified: {} + current: + name: 1 + value: a + other: a + threeWay: + name: null + value: null + result: + other: a + - description: add field and delete all fields from map + original: + name: 1 + value: 1 + twoWay: + name: null + value: null + other: a + modified: + other: a + current: + name: 1 + value: 1 + other: a + threeWay: + name: null + value: null + result: + other: a + - description: add field and delete all fields from map with conflict + original: + name: 1 + value: 1 + twoWay: + name: null + value: null + other: a + modified: + other: a + current: + name: 1 + value: 1 + other: b + threeWay: + name: null + value: null + other: a + result: + other: a + - description: replace list of scalars + original: + nonMergingIntList: + - 1 + - 2 + twoWay: + nonMergingIntList: + - 2 + - 3 + modified: + nonMergingIntList: + - 2 + - 3 + current: + nonMergingIntList: + - 1 + - 2 + threeWay: + nonMergingIntList: + - 2 + - 3 + result: + nonMergingIntList: + - 2 + - 3 + - description: replace list of scalars with conflict + original: + nonMergingIntList: + - 1 + - 2 + twoWay: + nonMergingIntList: + - 2 + - 3 + modified: + nonMergingIntList: + - 2 + - 3 + current: + nonMergingIntList: + - 1 + - 4 + threeWay: + nonMergingIntList: + - 2 + - 3 + result: + nonMergingIntList: + - 2 + - 3 + - description: delete all maps from merging list + original: + mergingList: + - name: 1 + - name: 2 + twoWay: + mergingList: + - name: 1 + $patch: delete + - name: 2 + $patch: delete + modified: + mergingList: [] + current: + mergingList: + - name: 1 + - name: 2 + threeWay: + mergingList: + - name: 1 + $patch: delete + - name: 2 + $patch: delete + result: + mergingList: [] + - description: delete all maps from merging list with conflict + original: + mergingList: + - name: 1 + - name: 2 + twoWay: + mergingList: + - name: 1 + $patch: delete + - name: 2 + $patch: delete + modified: + mergingList: [] + current: + mergingList: + - name: 1 + other: a + - name: 2 + other: b + threeWay: + mergingList: + - name: 1 + $patch: delete + - name: 2 + $patch: delete + result: + mergingList: [] + - description: delete all maps from empty merging list + original: + mergingList: + - name: 1 + - name: 2 + twoWay: + mergingList: + - name: 1 + $patch: delete + - name: 2 + $patch: delete + modified: + mergingList: [] + current: + mergingList: [] + threeWay: + mergingList: + - name: 1 + $patch: delete + - name: 2 + $patch: delete + result: + mergingList: [] + - description: merge empty merging lists + original: + mergingList: [] + twoWay: + {} + modified: + mergingList: [] + current: + mergingList: [] + threeWay: + {} + result: + mergingList: [] + - description: defined null values should propagate overwrite current fields (with conflict) + original: + name: 2 + twoWay: + name: 1 + value: 1 + other: null + twoWayResult: + name: 1 + value: 1 + modified: + name: 1 + value: 1 + other: null + current: + name: a + other: a + threeWay: + name: 1 + value: 1 + other: null + result: + name: 1 + value: 1 + - description: defined null values should propagate removing original fields + original: + name: original-name + value: original-value + current: + name: original-name + value: original-value + other: current-other + modified: + name: modified-name + value: null + twoWay: + name: modified-name + value: null + twoWayResult: + name: modified-name + threeWay: + name: modified-name + value: null + result: + name: modified-name + other: current-other + - description: nil patch with retainKeys map + original: + name: a + retainKeysMap: + name: foo + current: + name: a + value: b + retainKeysMap: + name: foo + modified: + name: a + retainKeysMap: + name: foo + twoWay: {} + threeWay: {} + result: + name: a + value: b + retainKeysMap: + name: foo + - description: retainKeys map with no change should not be present + original: + name: a + retainKeysMap: + name: foo + current: + name: a + other: c + retainKeysMap: + name: foo + modified: + name: a + value: b + retainKeysMap: + name: foo + twoWay: + value: b + threeWay: + value: b + result: + name: a + value: b + other: c + retainKeysMap: + name: foo +`) + +var strategicMergePatchRawTestCases = []StrategicMergePatchRawTestCase{ + { + Description: "delete items in lists of scalars", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +mergingIntList: + - 1 + - 2 + - 3 +`), + TwoWay: []byte(` +$setElementOrder/mergingIntList: + - 1 + - 2 +$deleteFromPrimitiveList/mergingIntList: + - 3 +`), + Modified: []byte(` +mergingIntList: + - 1 + - 2 +`), + Current: []byte(` +mergingIntList: + - 1 + - 2 + - 3 + - 4 +`), + ThreeWay: []byte(` +$setElementOrder/mergingIntList: + - 1 + - 2 +$deleteFromPrimitiveList/mergingIntList: + - 3 +`), + Result: []byte(` +mergingIntList: + - 1 + - 2 + - 4 +`), + }, + }, + { + Description: "delete all duplicate items in lists of scalars", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +mergingIntList: + - 1 + - 2 + - 3 + - 3 +`), + TwoWay: []byte(` +$setElementOrder/mergingIntList: + - 1 + - 2 +$deleteFromPrimitiveList/mergingIntList: + - 3 +`), + Modified: []byte(` +mergingIntList: + - 1 + - 2 +`), + Current: []byte(` +mergingIntList: + - 1 + - 2 + - 3 + - 3 + - 4 +`), + ThreeWay: []byte(` +$setElementOrder/mergingIntList: + - 1 + - 2 +$deleteFromPrimitiveList/mergingIntList: + - 3 +`), + Result: []byte(` +mergingIntList: + - 1 + - 2 + - 4 +`), + }, + }, + { + Description: "add and delete items in lists of scalars", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +mergingIntList: + - 1 + - 2 + - 3 +`), + TwoWay: []byte(` +$setElementOrder/mergingIntList: + - 1 + - 2 + - 4 +$deleteFromPrimitiveList/mergingIntList: + - 3 +mergingIntList: + - 4 +`), + Modified: []byte(` +mergingIntList: + - 1 + - 2 + - 4 +`), + Current: []byte(` +mergingIntList: + - 1 + - 2 + - 3 + - 4 +`), + ThreeWay: []byte(` +$setElementOrder/mergingIntList: + - 1 + - 2 + - 4 +$deleteFromPrimitiveList/mergingIntList: + - 3 +`), + Result: []byte(` +mergingIntList: + - 1 + - 2 + - 4 +`), + }, + }, + { + Description: "merge lists of maps", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +mergingList: + - name: 1 + - name: 2 + value: 2 +`), + TwoWay: []byte(` +$setElementOrder/mergingList: + - name: 4 + - name: 1 + - name: 2 + - name: 3 +mergingList: + - name: 4 + value: 4 + - name: 3 + value: 3 +`), + Modified: []byte(` +mergingList: + - name: 4 + value: 4 + - name: 1 + - name: 2 + value: 2 + - name: 3 + value: 3 +`), + Current: []byte(` +mergingList: + - name: 1 + other: a + - name: 2 + value: 2 + other: b +`), + ThreeWay: []byte(` +$setElementOrder/mergingList: + - name: 4 + - name: 1 + - name: 2 + - name: 3 +mergingList: + - name: 4 + value: 4 + - name: 3 + value: 3 +`), + Result: []byte(` +mergingList: + - name: 4 + value: 4 + - name: 1 + other: a + - name: 2 + value: 2 + other: b + - name: 3 + value: 3 +`), + }, + }, + { + Description: "merge lists of maps with conflict", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +mergingList: + - name: 1 + - name: 2 + value: 2 +`), + TwoWay: []byte(` +$setElementOrder/mergingList: + - name: 1 + - name: 2 + - name: 3 +mergingList: + - name: 3 + value: 3 +`), + Modified: []byte(` +mergingList: + - name: 1 + - name: 2 + value: 2 + - name: 3 + value: 3 +`), + Current: []byte(` +mergingList: + - name: 1 + other: a + - name: 2 + value: 3 + other: b +`), + ThreeWay: []byte(` +$setElementOrder/mergingList: + - name: 1 + - name: 2 + - name: 3 +mergingList: + - name: 2 + value: 2 + - name: 3 + value: 3 +`), + Result: []byte(` +mergingList: + - name: 1 + other: a + - name: 2 + value: 2 + other: b + - name: 3 + value: 3 +`), + }, + }, + { + Description: "add field to map in merging list", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +mergingList: + - name: 1 + - name: 2 + value: 2 +`), + TwoWay: []byte(` +$setElementOrder/mergingList: + - name: 1 + - name: 2 +mergingList: + - name: 1 + value: 1 +`), + Modified: []byte(` +mergingList: + - name: 1 + value: 1 + - name: 2 + value: 2 +`), + Current: []byte(` +mergingList: + - name: 1 + other: a + - name: 2 + value: 2 + other: b +`), + ThreeWay: []byte(` +$setElementOrder/mergingList: + - name: 1 + - name: 2 +mergingList: + - name: 1 + value: 1 +`), + Result: []byte(` +mergingList: + - name: 1 + value: 1 + other: a + - name: 2 + value: 2 + other: b +`), + }, + }, + { + Description: "add field to map in merging list", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +mergingList: + - name: 1 + - name: 2 + value: 2 +`), + TwoWay: []byte(` +$setElementOrder/mergingList: + - name: 1 + - name: 2 +mergingList: + - name: 1 + value: 1 +`), + Modified: []byte(` +mergingList: + - name: 1 + value: 1 + - name: 2 + value: 2 +`), + Current: []byte(` +mergingList: + - name: 1 + other: a + - name: 2 + value: 2 + other: b +`), + ThreeWay: []byte(` +$setElementOrder/mergingList: + - name: 1 + - name: 2 +mergingList: + - name: 1 + value: 1 +`), + Result: []byte(` +mergingList: + - name: 1 + value: 1 + other: a + - name: 2 + value: 2 + other: b +`), + }, + }, + { + Description: "add field to map in merging list with conflict", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +mergingList: + - name: 1 + - name: 2 + value: 2 +`), + TwoWay: []byte(` +$setElementOrder/mergingList: + - name: 1 + - name: 2 +mergingList: + - name: 1 + value: 1 +`), + Modified: []byte(` +mergingList: + - name: 1 + value: 1 + - name: 2 + value: 2 +`), + Current: []byte(` +mergingList: + - name: 1 + other: a + - name: 3 + value: 2 + other: b +`), + ThreeWay: []byte(` +$setElementOrder/mergingList: + - name: 1 + - name: 2 +mergingList: + - name: 1 + value: 1 + - name: 2 + value: 2 +`), + Result: []byte(` +mergingList: + - name: 1 + value: 1 + other: a + - name: 2 + value: 2 + - name: 3 + value: 2 + other: b +`), + }, + }, + { + Description: "add duplicate field to map in merging list", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +mergingList: + - name: 1 + - name: 2 + value: 2 +`), + TwoWay: []byte(` +$setElementOrder/mergingList: + - name: 1 + - name: 2 +mergingList: + - name: 1 + value: 1 +`), + Modified: []byte(` +mergingList: + - name: 1 + value: 1 + - name: 2 + value: 2 +`), + Current: []byte(` +mergingList: + - name: 1 + value: 1 + other: a + - name: 2 + value: 2 + other: b +`), + ThreeWay: []byte(`{}`), + Result: []byte(` +mergingList: + - name: 1 + value: 1 + other: a + - name: 2 + value: 2 + other: b +`), + }, + }, + { + Description: "add an item that already exists in current object in merging list", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +mergingList: + - name: 1 + value: a + - name: 2 +`), + TwoWay: []byte(` +$setElementOrder/mergingList: + - name: 1 + - name: 2 + - name: 3 +mergingList: + - name: 3 +`), + Modified: []byte(` +mergingList: + - name: 1 + value: a + - name: 2 + - name: 3 +`), + Current: []byte(` +mergingList: + - name: 1 + value: a + other: x + - name: 2 + - name: 3 +`), + ThreeWay: []byte(`{}`), + Result: []byte(` +mergingList: + - name: 1 + value: a + other: x + - name: 2 + - name: 3 +`), + }, + }, + { + Description: "add duplicate field to map in merging list with conflict", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +mergingList: + - name: 1 + - name: 2 + value: 2 +`), + TwoWay: []byte(` +$setElementOrder/mergingList: + - name: 1 + - name: 2 +mergingList: + - name: 1 + value: 1 +`), + Modified: []byte(` +mergingList: + - name: 1 + value: 1 + - name: 2 + value: 2 +`), + Current: []byte(` +mergingList: + - name: 1 + value: 1 + other: a + - name: 2 + value: 3 + other: b +`), + ThreeWay: []byte(` +$setElementOrder/mergingList: + - name: 1 + - name: 2 +mergingList: + - name: 2 + value: 2 +`), + Result: []byte(` +mergingList: + - name: 1 + value: 1 + other: a + - name: 2 + value: 2 + other: b +`), + }, + }, + { + Description: "replace map field value in merging list", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +mergingList: + - name: 1 + value: 1 + - name: 2 + value: 2 +`), + TwoWay: []byte(` +$setElementOrder/mergingList: + - name: 1 + - name: 2 +mergingList: + - name: 1 + value: a +`), + Modified: []byte(` +mergingList: + - name: 1 + value: a + - name: 2 + value: 2 +`), + Current: []byte(` +mergingList: + - name: 1 + value: 1 + other: a + - name: 2 + value: 2 + other: b +`), + ThreeWay: []byte(` +$setElementOrder/mergingList: + - name: 1 + - name: 2 +mergingList: + - name: 1 + value: a +`), + Result: []byte(` +mergingList: + - name: 1 + value: a + other: a + - name: 2 + value: 2 + other: b +`), + }, + }, + { + Description: "replace map field value in merging list with conflict", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +mergingList: + - name: 1 + value: 1 + - name: 2 + value: 2 +`), + TwoWay: []byte(` +$setElementOrder/mergingList: + - name: 1 + - name: 2 +mergingList: + - name: 1 + value: a +`), + Modified: []byte(` +mergingList: + - name: 1 + value: a + - name: 2 + value: 2 +`), + Current: []byte(` +mergingList: + - name: 1 + value: 3 + other: a + - name: 2 + value: 2 + other: b +`), + ThreeWay: []byte(` +$setElementOrder/mergingList: + - name: 1 + - name: 2 +mergingList: + - name: 1 + value: a +`), + Result: []byte(` +mergingList: + - name: 1 + value: a + other: a + - name: 2 + value: 2 + other: b +`), + }, + }, + { + Description: "delete map from merging list", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +mergingList: + - name: 1 + - name: 2 +`), + TwoWay: []byte(` +$setElementOrder/mergingList: + - name: 2 +mergingList: + - name: 1 + $patch: delete +`), + Modified: []byte(` +mergingList: + - name: 2 +`), + Current: []byte(` +mergingList: + - name: 1 + - name: 2 + other: b +`), + ThreeWay: []byte(` +$setElementOrder/mergingList: + - name: 2 +mergingList: + - name: 1 + $patch: delete +`), + Result: []byte(` +mergingList: + - name: 2 + other: b +`), + }, + }, + { + Description: "delete map from merging list with conflict", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +mergingList: + - name: 1 + - name: 2 +`), + TwoWay: []byte(` +$setElementOrder/mergingList: + - name: 2 +mergingList: + - name: 1 + $patch: delete +`), + Modified: []byte(` +mergingList: + - name: 2 +`), + Current: []byte(` +mergingList: + - name: 1 + other: a + - name: 2 + other: b +`), + ThreeWay: []byte(` +$setElementOrder/mergingList: + - name: 2 +mergingList: + - name: 1 + $patch: delete +`), + Result: []byte(` +mergingList: + - name: 2 + other: b +`), + }, + }, + { + Description: "delete missing map from merging list", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +mergingList: + - name: 1 + - name: 2 +`), + TwoWay: []byte(` +$setElementOrder/mergingList: + - name: 2 +mergingList: + - name: 1 + $patch: delete +`), + Modified: []byte(` +mergingList: + - name: 2 +`), + Current: []byte(` +mergingList: + - name: 2 + other: b +`), + ThreeWay: []byte(` +$setElementOrder/mergingList: + - name: 2 +mergingList: + - name: 1 + $patch: delete +`), + Result: []byte(` +mergingList: + - name: 2 + other: b +`), + }, + }, + { + Description: "delete missing map from merging list with conflict", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +mergingList: + - name: 1 + - name: 2 +`), + TwoWay: []byte(` +$setElementOrder/mergingList: + - name: 2 +mergingList: + - name: 1 + $patch: delete +`), + Modified: []byte(` +mergingList: + - name: 2 +`), + Current: []byte(` +mergingList: + - name: 3 + other: a +`), + ThreeWay: []byte(` +$setElementOrder/mergingList: + - name: 2 +mergingList: + - name: 2 + - name: 1 + $patch: delete +`), + Result: []byte(` +mergingList: + - name: 2 + - name: 3 + other: a +`), + }, + }, + { + Description: "add map and delete map from merging list", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +mergingList: + - name: 1 + - name: 2 +`), + TwoWay: []byte(` +$setElementOrder/mergingList: + - name: 2 + - name: 3 +mergingList: + - name: 3 + - name: 1 + $patch: delete +`), + Modified: []byte(` +mergingList: + - name: 2 + - name: 3 +`), + Current: []byte(` +mergingList: + - name: 1 + - name: 2 + other: b + - name: 4 + other: c +`), + ThreeWay: []byte(` +$setElementOrder/mergingList: + - name: 2 + - name: 3 +mergingList: + - name: 3 + - name: 1 + $patch: delete +`), + Result: []byte(` +mergingList: + - name: 2 + other: b + - name: 4 + other: c + - name: 3 +`), + }, + }, + { + Description: "add map and delete map from merging list with conflict", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +mergingList: + - name: 1 + - name: 2 +`), + TwoWay: []byte(` +$setElementOrder/mergingList: + - name: 2 + - name: 3 +mergingList: + - name: 3 + - name: 1 + $patch: delete +`), + Modified: []byte(` +mergingList: + - name: 2 + - name: 3 +`), + Current: []byte(` +mergingList: + - name: 1 + other: a + - name: 4 + other: c +`), + ThreeWay: []byte(` +$setElementOrder/mergingList: + - name: 2 + - name: 3 +mergingList: + - name: 2 + - name: 3 + - name: 1 + $patch: delete +`), + Result: []byte(` +mergingList: + - name: 4 + other: c + - name: 2 + - name: 3 +`), + }, + }, + { + Description: "delete field from map in merging list", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +mergingList: + - name: 1 + value: 1 + - name: 2 + value: 2 +`), + TwoWay: []byte(` +$setElementOrder/mergingList: + - name: 1 + - name: 2 +mergingList: + - name: 1 + value: null +`), + Modified: []byte(` +mergingList: + - name: 1 + - name: 2 + value: 2 +`), + Current: []byte(` +mergingList: + - name: 1 + value: 1 + other: a + - name: 2 + value: 2 + other: b +`), + ThreeWay: []byte(` +$setElementOrder/mergingList: + - name: 1 + - name: 2 +mergingList: + - name: 1 + value: null +`), + Result: []byte(` +mergingList: + - name: 1 + other: a + - name: 2 + value: 2 + other: b +`), + }, + }, + { + Description: "delete field from map in merging list with conflict", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +mergingList: + - name: 1 + value: 1 + - name: 2 + value: 2 +`), + TwoWay: []byte(` +$setElementOrder/mergingList: + - name: 1 + - name: 2 +mergingList: + - name: 1 + value: null +`), + Modified: []byte(` +mergingList: + - name: 1 + - name: 2 + value: 2 +`), + Current: []byte(` +mergingList: + - name: 1 + value: a + other: a + - name: 2 + value: 2 +`), + ThreeWay: []byte(` +$setElementOrder/mergingList: + - name: 1 + - name: 2 +mergingList: + - name: 1 + value: null +`), + Result: []byte(` +mergingList: + - name: 1 + other: a + - name: 2 + value: 2 +`), + }, + }, + { + Description: "delete missing field from map in merging list", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +mergingList: + - name: 1 + value: 1 + - name: 2 + value: 2 +`), + TwoWay: []byte(` +$setElementOrder/mergingList: + - name: 1 + - name: 2 +mergingList: + - name: 1 + value: null +`), + Modified: []byte(` +mergingList: + - name: 1 + - name: 2 + value: 2 +`), + Current: []byte(` +mergingList: + - name: 1 + other: a + - name: 2 + value: 2 + other: b +`), + ThreeWay: []byte(` +$setElementOrder/mergingList: + - name: 1 + - name: 2 +mergingList: + - name: 1 + value: null +`), + Result: []byte(` +mergingList: + - name: 1 + other: a + - name: 2 + value: 2 + other: b +`), + }, + }, + { + Description: "delete missing field from map in merging list with conflict", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +mergingList: + - name: 1 + value: 1 + - name: 2 + value: 2 +`), + TwoWay: []byte(` +$setElementOrder/mergingList: + - name: 1 + - name: 2 +mergingList: + - name: 1 + value: null +`), + Modified: []byte(` +mergingList: + - name: 1 + - name: 2 + value: 2 +`), + Current: []byte(` +mergingList: + - name: 1 + other: a + - name: 2 + other: b +`), + ThreeWay: []byte(` +$setElementOrder/mergingList: + - name: 1 + - name: 2 +mergingList: + - name: 1 + value: null + - name: 2 + value: 2 +`), + Result: []byte(` +mergingList: + - name: 1 + other: a + - name: 2 + value: 2 + other: b +`), + }, + }, + { + Description: "replace non merging list nested in merging list", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +mergingList: + - name: 1 + nonMergingList: + - name: 1 + - name: 2 + value: 2 + - name: 2 +`), + TwoWay: []byte(` +$setElementOrder/mergingList: + - name: 1 + - name: 2 +mergingList: + - name: 1 + nonMergingList: + - name: 1 + value: 1 +`), + Modified: []byte(` +mergingList: + - name: 1 + nonMergingList: + - name: 1 + value: 1 + - name: 2 +`), + Current: []byte(` +mergingList: + - name: 1 + other: a + nonMergingList: + - name: 1 + - name: 2 + value: 2 + - name: 2 + other: b +`), + ThreeWay: []byte(` +$setElementOrder/mergingList: + - name: 1 + - name: 2 +mergingList: + - name: 1 + nonMergingList: + - name: 1 + value: 1 +`), + Result: []byte(` +mergingList: + - name: 1 + other: a + nonMergingList: + - name: 1 + value: 1 + - name: 2 + other: b +`), + }, + }, + { + Description: "replace non merging list nested in merging list with value conflict", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +mergingList: + - name: 1 + nonMergingList: + - name: 1 + - name: 2 + value: 2 + - name: 2 +`), + TwoWay: []byte(` +$setElementOrder/mergingList: + - name: 1 + - name: 2 +mergingList: + - name: 1 + nonMergingList: + - name: 1 + value: 1 +`), + Modified: []byte(` +mergingList: + - name: 1 + nonMergingList: + - name: 1 + value: 1 + - name: 2 +`), + Current: []byte(` +mergingList: + - name: 1 + other: a + nonMergingList: + - name: 1 + value: c + - name: 2 + other: b +`), + ThreeWay: []byte(` +$setElementOrder/mergingList: + - name: 1 + - name: 2 +mergingList: + - name: 1 + nonMergingList: + - name: 1 + value: 1 +`), + Result: []byte(` +mergingList: + - name: 1 + other: a + nonMergingList: + - name: 1 + value: 1 + - name: 2 + other: b +`), + }, + }, + { + Description: "replace non merging list nested in merging list with deletion conflict", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +mergingList: + - name: 1 + nonMergingList: + - name: 1 + - name: 2 + value: 2 + - name: 2 +`), + TwoWay: []byte(` +$setElementOrder/mergingList: + - name: 1 + - name: 2 +mergingList: + - name: 1 + nonMergingList: + - name: 1 + value: 1 +`), + Modified: []byte(` +mergingList: + - name: 1 + nonMergingList: + - name: 1 + value: 1 + - name: 2 +`), + Current: []byte(` +mergingList: + - name: 1 + other: a + nonMergingList: + - name: 2 + value: 2 + - name: 2 + other: b +`), + ThreeWay: []byte(` +$setElementOrder/mergingList: + - name: 1 + - name: 2 +mergingList: + - name: 1 + nonMergingList: + - name: 1 + value: 1 +`), + Result: []byte(` +mergingList: + - name: 1 + other: a + nonMergingList: + - name: 1 + value: 1 + - name: 2 + other: b +`), + }, + }, + { + Description: "add field to map in merging list nested in merging list", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +mergingList: + - name: 1 + mergingList: + - name: 1 + - name: 2 + value: 2 + - name: 2 +`), + TwoWay: []byte(` +$setElementOrder/mergingList: + - name: 1 + - name: 2 +mergingList: + - $setElementOrder/mergingList: + - name: 1 + - name: 2 + name: 1 + mergingList: + - name: 1 + value: 1 +`), + Modified: []byte(` +mergingList: + - name: 1 + mergingList: + - name: 1 + value: 1 + - name: 2 + value: 2 + - name: 2 +`), + Current: []byte(` +mergingList: + - name: 1 + other: a + mergingList: + - name: 1 + - name: 2 + value: 2 + - name: 2 + other: b +`), + ThreeWay: []byte(` +$setElementOrder/mergingList: + - name: 1 + - name: 2 +mergingList: + - $setElementOrder/mergingList: + - name: 1 + - name: 2 + name: 1 + mergingList: + - name: 1 + value: 1 +`), + Result: []byte(` +mergingList: + - name: 1 + other: a + mergingList: + - name: 1 + value: 1 + - name: 2 + value: 2 + - name: 2 + other: b +`), + }, + }, + { + Description: "add field to map in merging list nested in merging list with value conflict", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +mergingList: + - name: 1 + mergingList: + - name: 1 + - name: 2 + value: 2 + - name: 2 +`), + TwoWay: []byte(` +$setElementOrder/mergingList: + - name: 1 + - name: 2 +mergingList: + - $setElementOrder/mergingList: + - name: 1 + - name: 2 + name: 1 + mergingList: + - name: 1 + value: 1 +`), + Modified: []byte(` +mergingList: + - name: 1 + mergingList: + - name: 1 + value: 1 + - name: 2 + value: 2 + - name: 2 +`), + Current: []byte(` +mergingList: + - name: 1 + other: a + mergingList: + - name: 1 + value: a + other: c + - name: 2 + value: b + other: d + - name: 2 + other: b +`), + ThreeWay: []byte(` +$setElementOrder/mergingList: + - name: 1 + - name: 2 +mergingList: + - $setElementOrder/mergingList: + - name: 1 + - name: 2 + name: 1 + mergingList: + - name: 1 + value: 1 + - name: 2 + value: 2 +`), + Result: []byte(` +mergingList: + - name: 1 + other: a + mergingList: + - name: 1 + value: 1 + other: c + - name: 2 + value: 2 + other: d + - name: 2 + other: b +`), + }, + }, + { + Description: "add field to map in merging list nested in merging list with deletion conflict", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +mergingList: + - name: 1 + mergingList: + - name: 1 + - name: 2 + value: 2 + - name: 2 +`), + TwoWay: []byte(` +$setElementOrder/mergingList: + - name: 1 + - name: 2 +mergingList: + - $setElementOrder/mergingList: + - name: 1 + - name: 2 + name: 1 + mergingList: + - name: 1 + value: 1 +`), + Modified: []byte(` +mergingList: + - name: 1 + mergingList: + - name: 1 + value: 1 + - name: 2 + value: 2 + - name: 2 +`), + Current: []byte(` +mergingList: + - name: 1 + other: a + mergingList: + - name: 2 + value: 2 + other: d + - name: 2 + other: b +`), + ThreeWay: []byte(` +$setElementOrder/mergingList: + - name: 1 + - name: 2 +mergingList: + - $setElementOrder/mergingList: + - name: 1 + - name: 2 + name: 1 + mergingList: + - name: 1 + value: 1 +`), + Result: []byte(` +mergingList: + - name: 1 + other: a + mergingList: + - name: 1 + value: 1 + - name: 2 + value: 2 + other: d + - name: 2 + other: b +`), + }, + }, + + { + Description: "add field to map in merging list nested in merging list with deletion conflict", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +mergingList: + - name: 1 + mergingList: + - name: 1 + - name: 2 + value: 2 + - name: 2 +`), + TwoWay: []byte(` +$setElementOrder/mergingList: + - name: 1 + - name: 2 +mergingList: + - $setElementOrder/mergingList: + - name: 2 + - name: 1 + name: 1 + mergingList: + - name: 1 + value: 1 +`), + Modified: []byte(` +mergingList: + - name: 1 + mergingList: + - name: 2 + value: 2 + - name: 1 + value: 1 + - name: 2 +`), + Current: []byte(` +mergingList: + - name: 1 + other: a + mergingList: + - name: 2 + value: 2 + other: d + - name: 2 + other: b +`), + ThreeWay: []byte(` +$setElementOrder/mergingList: + - name: 1 + - name: 2 +mergingList: + - $setElementOrder/mergingList: + - name: 2 + - name: 1 + name: 1 + mergingList: + - name: 1 + value: 1 +`), + Result: []byte(` +mergingList: + - name: 1 + other: a + mergingList: + - name: 2 + value: 2 + other: d + - name: 1 + value: 1 + - name: 2 + other: b +`), + }, + }, + { + Description: "add map to merging list by pointer", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +mergeItemPtr: + - name: 1 +`), + TwoWay: []byte(` +$setElementOrder/mergeItemPtr: + - name: 1 + - name: 2 +mergeItemPtr: + - name: 2 +`), + Modified: []byte(` +mergeItemPtr: + - name: 1 + - name: 2 +`), + Current: []byte(` +mergeItemPtr: + - name: 1 + other: a + - name: 3 +`), + ThreeWay: []byte(` +$setElementOrder/mergeItemPtr: + - name: 1 + - name: 2 +mergeItemPtr: + - name: 2 +`), + Result: []byte(` +mergeItemPtr: + - name: 1 + other: a + - name: 2 + - name: 3 +`), + }, + }, + { + Description: "add map to merging list by pointer with conflict", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +mergeItemPtr: + - name: 1 +`), + TwoWay: []byte(` +$setElementOrder/mergeItemPtr: + - name: 1 + - name: 2 +mergeItemPtr: + - name: 2 +`), + Modified: []byte(` +mergeItemPtr: + - name: 1 + - name: 2 +`), + Current: []byte(` +mergeItemPtr: + - name: 3 +`), + ThreeWay: []byte(` +$setElementOrder/mergeItemPtr: + - name: 1 + - name: 2 +mergeItemPtr: + - name: 1 + - name: 2 +`), + Result: []byte(` +mergeItemPtr: + - name: 1 + - name: 2 + - name: 3 +`), + }, + }, + { + Description: "add field to map in merging list by pointer", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +mergeItemPtr: + - name: 1 + mergeItemPtr: + - name: 1 + - name: 2 + value: 2 + - name: 2 +`), + TwoWay: []byte(` +$setElementOrder/mergeItemPtr: + - name: 1 + - name: 2 +mergeItemPtr: + - $setElementOrder/mergeItemPtr: + - name: 1 + - name: 2 + name: 1 + mergeItemPtr: + - name: 1 + value: 1 +`), + Modified: []byte(` +mergeItemPtr: + - name: 1 + mergeItemPtr: + - name: 1 + value: 1 + - name: 2 + value: 2 + - name: 2 +`), + Current: []byte(` +mergeItemPtr: + - name: 1 + other: a + mergeItemPtr: + - name: 1 + other: a + - name: 2 + value: 2 + other: b + - name: 2 + other: b +`), + ThreeWay: []byte(` +$setElementOrder/mergeItemPtr: + - name: 1 + - name: 2 +mergeItemPtr: + - $setElementOrder/mergeItemPtr: + - name: 1 + - name: 2 + name: 1 + mergeItemPtr: + - name: 1 + value: 1 +`), + Result: []byte(` +mergeItemPtr: + - name: 1 + other: a + mergeItemPtr: + - name: 1 + value: 1 + other: a + - name: 2 + value: 2 + other: b + - name: 2 + other: b +`), + }, + }, + { + Description: "add field to map in merging list by pointer with conflict", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +mergeItemPtr: + - name: 1 + mergeItemPtr: + - name: 1 + - name: 2 + value: 2 + - name: 2 +`), + TwoWay: []byte(` +$setElementOrder/mergeItemPtr: + - name: 1 + - name: 2 +mergeItemPtr: + - $setElementOrder/mergeItemPtr: + - name: 1 + - name: 2 + name: 1 + mergeItemPtr: + - name: 1 + value: 1 +`), + Modified: []byte(` +mergeItemPtr: + - name: 1 + mergeItemPtr: + - name: 1 + value: 1 + - name: 2 + value: 2 + - name: 2 +`), + Current: []byte(` +mergeItemPtr: + - name: 1 + other: a + mergeItemPtr: + - name: 1 + value: a + - name: 2 + value: 2 + other: b + - name: 2 + other: b +`), + ThreeWay: []byte(` +$setElementOrder/mergeItemPtr: + - name: 1 + - name: 2 +mergeItemPtr: + - $setElementOrder/mergeItemPtr: + - name: 1 + - name: 2 + name: 1 + mergeItemPtr: + - name: 1 + value: 1 +`), + Result: []byte(` +mergeItemPtr: + - name: 1 + other: a + mergeItemPtr: + - name: 1 + value: 1 + - name: 2 + value: 2 + other: b + - name: 2 + other: b +`), + }, + }, + { + Description: "merge lists of scalars", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +mergingIntList: +- 1 +- 2 +`), + TwoWay: []byte(` +$setElementOrder/mergingIntList: +- 1 +- 2 +- 3 +mergingIntList: +- 3 +`), + Modified: []byte(` +mergingIntList: +- 1 +- 2 +- 3 +`), + Current: []byte(` +mergingIntList: +- 1 +- 2 +- 4 +`), + ThreeWay: []byte(` +$setElementOrder/mergingIntList: +- 1 +- 2 +- 3 +mergingIntList: +- 3 +`), + Result: []byte(` +mergingIntList: +- 1 +- 2 +- 3 +- 4 +`), + }, + }, + { + Description: "add duplicate field to map in merging int list", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +mergingIntList: + - 1 + - 2 +`), + TwoWay: []byte(` +$setElementOrder/mergingIntList: + - 1 + - 2 + - 3 +mergingIntList: + - 3 +`), + Modified: []byte(` +mergingIntList: + - 1 + - 2 + - 3 +`), + Current: []byte(` +mergingIntList: + - 1 + - 2 + - 3 +`), + ThreeWay: []byte(`{}`), + Result: []byte(` +mergingIntList: + - 1 + - 2 + - 3 +`), + }, + }, + // test case for setElementOrder + { + Description: "add an item in a list of primitives and preserve order", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +mergingIntList: +- 1 +- 2 +`), + TwoWay: []byte(` +$setElementOrder/mergingIntList: +- 3 +- 1 +- 2 +mergingIntList: +- 3 +`), + Modified: []byte(` +mergingIntList: +- 3 +- 1 +- 2 +`), + Current: []byte(` +mergingIntList: +- 1 +- 4 +- 2 +`), + ThreeWay: []byte(` +$setElementOrder/mergingIntList: +- 3 +- 1 +- 2 +mergingIntList: +- 3 +`), + Result: []byte(` +mergingIntList: +- 3 +- 1 +- 4 +- 2 +`), + }, + }, + { + Description: "delete an item in a list of primitives and preserve order", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +mergingIntList: +- 1 +- 2 +- 3 +`), + TwoWay: []byte(` +$setElementOrder/mergingIntList: +- 2 +- 1 +$deleteFromPrimitiveList/mergingIntList: +- 3 +`), + Modified: []byte(` +mergingIntList: +- 2 +- 1 +`), + Current: []byte(` +mergingIntList: +- 1 +- 2 +- 3 +`), + ThreeWay: []byte(` +$setElementOrder/mergingIntList: +- 2 +- 1 +$deleteFromPrimitiveList/mergingIntList: +- 3 +`), + Result: []byte(` +mergingIntList: +- 2 +- 1 +`), + }, + }, + { + Description: "add an item in a list and preserve order", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +mergingList: + - name: 1 + - name: 2 + value: 2 +`), + TwoWay: []byte(` +$setElementOrder/mergingList: + - name: 3 + - name: 1 + - name: 2 +mergingList: + - name: 3 + value: 3 +`), + Modified: []byte(` +mergingList: + - name: 3 + value: 3 + - name: 1 + - name: 2 + value: 2 +`), + Current: []byte(` +mergingList: + - name: 1 + other: a + - name: 2 + value: 2 + other: b +`), + ThreeWay: []byte(` +$setElementOrder/mergingList: + - name: 3 + - name: 1 + - name: 2 +mergingList: + - name: 3 + value: 3 +`), + Result: []byte(` +mergingList: + - name: 3 + value: 3 + - name: 1 + other: a + - name: 2 + value: 2 + other: b +`), + }, + }, + { + Description: "add multiple items in a list and preserve order", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +mergingList: + - name: 1 + - name: 2 + value: 2 +`), + TwoWay: []byte(` +$setElementOrder/mergingList: + - name: 1 + - name: 4 + - name: 2 + - name: 3 +mergingList: + - name: 4 + value: 4 + - name: 3 + value: 3 +`), + Modified: []byte(` +mergingList: + - name: 1 + - name: 4 + value: 4 + - name: 2 + value: 2 + - name: 3 + value: 3 +`), + Current: []byte(` +mergingList: + - name: 1 + other: a + - name: 2 + value: 2 + other: b +`), + ThreeWay: []byte(` +$setElementOrder/mergingList: + - name: 1 + - name: 4 + - name: 2 + - name: 3 +mergingList: + - name: 4 + value: 4 + - name: 3 + value: 3 +`), + Result: []byte(` +mergingList: + - name: 1 + other: a + - name: 4 + value: 4 + - name: 2 + value: 2 + other: b + - name: 3 + value: 3 +`), + }, + }, + { + Description: "delete an item in a list and preserve order", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +mergingList: + - name: 1 + - name: 3 + value: 3 + - name: 2 + value: 2 +`), + TwoWay: []byte(` +$setElementOrder/mergingList: + - name: 2 + - name: 1 +mergingList: + - name: 3 + $patch: delete +`), + Modified: []byte(` +mergingList: + - name: 2 + value: 2 + - name: 1 +`), + Current: []byte(` +mergingList: + - name: 1 + other: a + - name: 2 + value: 2 + other: b + - name: 3 + value: 3 +`), + ThreeWay: []byte(` +$setElementOrder/mergingList: + - name: 2 + - name: 1 +mergingList: + - name: 3 + $patch: delete +`), + Result: []byte(` +mergingList: + - name: 2 + value: 2 + other: b + - name: 1 + other: a +`), + }, + }, + { + Description: "change an item in a list and preserve order", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +mergingList: + - name: 1 + - name: 3 + value: 3 + - name: 2 + value: 2 +`), + TwoWay: []byte(` +$setElementOrder/mergingList: + - name: 2 + - name: 3 + - name: 1 +mergingList: + - name: 3 + value: x +`), + Modified: []byte(` +mergingList: + - name: 2 + value: 2 + - name: 3 + value: x + - name: 1 +`), + Current: []byte(` +mergingList: + - name: 1 + other: a + - name: 2 + value: 2 + other: b + - name: 3 + value: 3 +`), + ThreeWay: []byte(` +$setElementOrder/mergingList: + - name: 2 + - name: 3 + - name: 1 +mergingList: + - name: 3 + value: x +`), + Result: []byte(` +mergingList: + - name: 2 + value: 2 + other: b + - name: 3 + value: x + - name: 1 + other: a +`), + }, + }, + { + Description: "add and delete an item in a list and preserve order", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +mergingList: + - name: 1 + - name: 3 + value: 3 + - name: 2 + value: 2 +`), + TwoWay: []byte(` +$setElementOrder/mergingList: + - name: 4 + - name: 2 + - name: 1 +mergingList: + - name: 4 + value: 4 + - name: 3 + $patch: delete +`), + Modified: []byte(` +mergingList: + - name: 4 + value: 4 + - name: 2 + value: 2 + - name: 1 +`), + Current: []byte(` +mergingList: + - name: 1 + other: a + - name: 2 + value: 2 + other: b + - name: 3 + value: 3 +`), + ThreeWay: []byte(` +$setElementOrder/mergingList: + - name: 4 + - name: 2 + - name: 1 +mergingList: + - name: 4 + value: 4 + - name: 3 + $patch: delete +`), + Result: []byte(` +mergingList: + - name: 4 + value: 4 + - name: 2 + value: 2 + other: b + - name: 1 + other: a +`), + }, + }, + { + Description: "set elements order in a list", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +mergingList: + - name: 1 + - name: 3 + value: 3 + - name: 4 + value: 4 + - name: 2 + value: 2 +`), + TwoWay: []byte(` +$setElementOrder/mergingList: + - name: 4 + - name: 2 + - name: 3 + - name: 1 +`), + Modified: []byte(` +mergingList: + - name: 4 + value: 4 + - name: 2 + value: 2 + - name: 3 + value: 3 + - name: 1 +`), + Current: []byte(` +mergingList: + - name: 1 + other: a + - name: 3 + value: 3 + - name: 4 + value: 4 + - name: 2 + value: 2 +`), + ThreeWay: []byte(` +$setElementOrder/mergingList: + - name: 4 + - name: 2 + - name: 3 + - name: 1 +`), + Result: []byte(` +mergingList: + - name: 4 + value: 4 + - name: 2 + value: 2 + - name: 3 + value: 3 + - name: 1 + other: a +`), + }, + }, + { + Description: "set elements order in a list with server-only items", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +mergingList: + - name: 1 + - name: 3 + value: 3 + - name: 4 + value: 4 + - name: 2 + value: 2 +`), + TwoWay: []byte(` +$setElementOrder/mergingList: + - name: 4 + - name: 2 + - name: 3 + - name: 1 +`), + Modified: []byte(` +mergingList: + - name: 4 + value: 4 + - name: 2 + value: 2 + - name: 3 + value: 3 + - name: 1 +`), + Current: []byte(` +mergingList: + - name: 1 + other: a + - name: 3 + value: 3 + - name: 4 + value: 4 + - name: 2 + value: 2 + - name: 9 +`), + ThreeWay: []byte(` +$setElementOrder/mergingList: + - name: 4 + - name: 2 + - name: 3 + - name: 1 +`), + Result: []byte(` +mergingList: + - name: 4 + value: 4 + - name: 2 + value: 2 + - name: 3 + value: 3 + - name: 1 + other: a + - name: 9 +`), + }, + }, + { + Description: "set elements order in a list with server-only items 2", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +mergingList: + - name: 1 + - name: 2 + value: 2 + - name: 3 + value: 3 + - name: 4 + value: 4 +`), + TwoWay: []byte(` +$setElementOrder/mergingList: + - name: 2 + - name: 1 + - name: 4 + - name: 3 +`), + Modified: []byte(` +mergingList: + - name: 2 + value: 2 + - name: 1 + - name: 4 + value: 4 + - name: 3 + value: 3 +`), + Current: []byte(` +mergingList: + - name: 1 + other: a + - name: 2 + value: 2 + - name: 9 + - name: 3 + value: 3 + - name: 4 + value: 4 +`), + ThreeWay: []byte(` +$setElementOrder/mergingList: + - name: 2 + - name: 1 + - name: 4 + - name: 3 +`), + Result: []byte(` +mergingList: + - name: 2 + value: 2 + - name: 1 + other: a + - name: 9 + - name: 4 + value: 4 + - name: 3 + value: 3 +`), + }, + }, + { + Description: "set elements order in a list with server-only items 3", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +mergingList: + - name: 1 + - name: 2 + value: 2 + - name: 3 + value: 3 + - name: 4 + value: 4 +`), + TwoWay: []byte(` +$setElementOrder/mergingList: + - name: 2 + - name: 1 + - name: 4 + - name: 3 +`), + Modified: []byte(` +mergingList: + - name: 2 + value: 2 + - name: 1 + - name: 4 + value: 4 + - name: 3 + value: 3 +`), + Current: []byte(` +mergingList: + - name: 1 + other: a + - name: 2 + value: 2 + - name: 7 + - name: 9 + - name: 8 + - name: 3 + value: 3 + - name: 4 + value: 4 +`), + ThreeWay: []byte(` +$setElementOrder/mergingList: + - name: 2 + - name: 1 + - name: 4 + - name: 3 +`), + Result: []byte(` +mergingList: + - name: 2 + value: 2 + - name: 1 + other: a + - name: 7 + - name: 9 + - name: 8 + - name: 4 + value: 4 + - name: 3 + value: 3 +`), + }, + }, + { + Description: "add an item in a int list and preserve order", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +mergingIntList: + - 1 + - 2 +`), + TwoWay: []byte(` +$setElementOrder/mergingIntList: + - 3 + - 1 + - 2 +mergingIntList: + - 3 +`), + Modified: []byte(` +mergingIntList: + - 3 + - 1 + - 2 +`), + Current: []byte(` +mergingIntList: + - 1 + - 2 +`), + ThreeWay: []byte(` +$setElementOrder/mergingIntList: + - 3 + - 1 + - 2 +mergingIntList: + - 3 +`), + Result: []byte(` +mergingIntList: + - 3 + - 1 + - 2 +`), + }, + }, + { + Description: "add multiple items in a int list and preserve order", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +mergingIntList: + - 1 + - 2 +`), + TwoWay: []byte(` +$setElementOrder/mergingIntList: + - 1 + - 4 + - 2 + - 3 +mergingIntList: + - 4 + - 3 +`), + Modified: []byte(` +mergingIntList: + - 1 + - 4 + - 2 + - 3 +`), + Current: []byte(` +mergingIntList: + - 1 + - 2 +`), + ThreeWay: []byte(` +$setElementOrder/mergingIntList: + - 1 + - 4 + - 2 + - 3 +mergingIntList: + - 4 + - 3 +`), + Result: []byte(` +mergingIntList: + - 1 + - 4 + - 2 + - 3 +`), + }, + }, + { + Description: "delete an item in a int list and preserve order", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +mergingIntList: + - 1 + - 3 + - 2 +`), + TwoWay: []byte(` +$setElementOrder/mergingIntList: + - 2 + - 1 +$deleteFromPrimitiveList/mergingIntList: + - 3 +`), + Modified: []byte(` +mergingIntList: + - 2 + - 1 +`), + Current: []byte(` +mergingIntList: + - 1 + - 2 + - 3 +`), + ThreeWay: []byte(` +$setElementOrder/mergingIntList: + - 2 + - 1 +$deleteFromPrimitiveList/mergingIntList: + - 3 +`), + Result: []byte(` +mergingIntList: + - 2 + - 1 +`), + }, + }, + { + Description: "add and delete an item in a int list and preserve order", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +mergingIntList: + - 1 + - 3 + - 2 +`), + TwoWay: []byte(` +$setElementOrder/mergingIntList: + - 4 + - 2 + - 1 +mergingIntList: + - 4 +$deleteFromPrimitiveList/mergingIntList: + - 3 +`), + Modified: []byte(` +mergingIntList: + - 4 + - 2 + - 1 +`), + Current: []byte(` +mergingIntList: + - 1 + - 2 + - 3 +`), + ThreeWay: []byte(` +$setElementOrder/mergingIntList: + - 4 + - 2 + - 1 +mergingIntList: + - 4 +$deleteFromPrimitiveList/mergingIntList: + - 3 +`), + Result: []byte(` +mergingIntList: + - 4 + - 2 + - 1 +`), + }, + }, + { + Description: "set elements order in a int list", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +mergingIntList: + - 1 + - 3 + - 4 + - 2 +`), + TwoWay: []byte(` +$setElementOrder/mergingIntList: + - 4 + - 2 + - 3 + - 1 +`), + Modified: []byte(` +mergingIntList: + - 4 + - 2 + - 3 + - 1 +`), + Current: []byte(` +mergingIntList: + - 1 + - 3 + - 4 + - 2 +`), + ThreeWay: []byte(` +$setElementOrder/mergingIntList: + - 4 + - 2 + - 3 + - 1 +`), + Result: []byte(` +mergingIntList: + - 4 + - 2 + - 3 + - 1 +`), + }, + }, + { + Description: "set elements order in a int list with server-only items", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +mergingIntList: + - 1 + - 3 + - 4 + - 2 +`), + TwoWay: []byte(` +$setElementOrder/mergingIntList: + - 4 + - 2 + - 3 + - 1 +`), + Modified: []byte(` +mergingIntList: + - 4 + - 2 + - 3 + - 1 +`), + Current: []byte(` +mergingIntList: + - 1 + - 3 + - 4 + - 2 + - 9 +`), + ThreeWay: []byte(` +$setElementOrder/mergingIntList: + - 4 + - 2 + - 3 + - 1 +`), + Result: []byte(` +mergingIntList: + - 4 + - 2 + - 3 + - 1 + - 9 +`), + }, + }, + { + Description: "set elements order in a int list with server-only items 2", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +mergingIntList: + - 1 + - 2 + - 3 + - 4 +`), + TwoWay: []byte(` +$setElementOrder/mergingIntList: + - 2 + - 1 + - 4 + - 3 +`), + Modified: []byte(` +mergingIntList: + - 2 + - 1 + - 4 + - 3 +`), + Current: []byte(` +mergingIntList: + - 1 + - 2 + - 9 + - 3 + - 4 +`), + ThreeWay: []byte(` +$setElementOrder/mergingIntList: + - 2 + - 1 + - 4 + - 3 +`), + Result: []byte(` +mergingIntList: + - 2 + - 1 + - 9 + - 4 + - 3 +`), + }, + }, + { + Description: "set elements order in a int list with server-only items 3", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +mergingIntList: + - 1 + - 2 + - 3 + - 4 +`), + TwoWay: []byte(` +$setElementOrder/mergingIntList: + - 2 + - 1 + - 4 + - 3 +`), + Modified: []byte(` +mergingIntList: + - 2 + - 1 + - 4 + - 3 +`), + Current: []byte(` +mergingIntList: + - 1 + - 2 + - 7 + - 9 + - 8 + - 3 + - 4 +`), + ThreeWay: []byte(` +$setElementOrder/mergingIntList: + - 2 + - 1 + - 4 + - 3 +`), + Result: []byte(` +mergingIntList: + - 2 + - 1 + - 7 + - 9 + - 8 + - 4 + - 3 +`), + }, + }, + { + // This test case is used just to demonstrate the behavior when dealing with a list with duplicate + Description: "behavior of set element order for a merging list with duplicate", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +mergingList: +- name: 1 +- name: 2 + value: dup1 +- name: 3 +- name: 2 + value: dup2 +- name: 4 +`), + Current: []byte(` +mergingList: +- name: 1 +- name: 2 + value: dup1 +- name: 3 +- name: 2 + value: dup2 +- name: 4 +`), + Modified: []byte(` +mergingList: +- name: 2 + value: dup1 +- name: 1 +- name: 4 +- name: 3 +- name: 2 + value: dup2 +`), + TwoWay: []byte(` +$setElementOrder/mergingList: +- name: 2 +- name: 1 +- name: 4 +- name: 3 +- name: 2 +`), + TwoWayResult: []byte(` +mergingList: +- name: 2 + value: dup1 +- name: 2 + value: dup2 +- name: 1 +- name: 4 +- name: 3 +`), + ThreeWay: []byte(` +$setElementOrder/mergingList: +- name: 2 +- name: 1 +- name: 4 +- name: 3 +- name: 2 +`), + Result: []byte(` +mergingList: +- name: 2 + value: dup1 +- name: 2 + value: dup2 +- name: 1 +- name: 4 +- name: 3 +`), + }, + }, + { + // This test case is used just to demonstrate the behavior when dealing with a list with duplicate + Description: "behavior of set element order for a merging int list with duplicate", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +mergingIntList: +- 1 +- 2 +- 3 +- 2 +- 4 +`), + Current: []byte(` +mergingIntList: +- 1 +- 2 +- 3 +- 2 +- 4 +`), + Modified: []byte(` +mergingIntList: +- 2 +- 1 +- 4 +- 3 +- 2 +`), + TwoWay: []byte(` +$setElementOrder/mergingIntList: +- 2 +- 1 +- 4 +- 3 +- 2 +`), + TwoWayResult: []byte(` +mergingIntList: +- 2 +- 2 +- 1 +- 4 +- 3 +`), + ThreeWay: []byte(` +$setElementOrder/mergingIntList: +- 2 +- 1 +- 4 +- 3 +- 2 +`), + Result: []byte(` +mergingIntList: +- 2 +- 2 +- 1 +- 4 +- 3 +`), + }, + }, + { + Description: "retainKeys map should clear defaulted field", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(`{}`), + Current: []byte(` +retainKeysMap: + value: foo +`), + Modified: []byte(` +retainKeysMap: + other: bar +`), + TwoWay: []byte(` +retainKeysMap: + other: bar +`), + ThreeWay: []byte(` +retainKeysMap: + $retainKeys: + - other + other: bar +`), + Result: []byte(` +retainKeysMap: + other: bar +`), + }, + }, + { + Description: "retainKeys map should clear defaulted field with conflict (discriminated union)", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(`{}`), + Current: []byte(` +retainKeysMap: + name: type1 + value: foo +`), + Modified: []byte(` +retainKeysMap: + name: type2 + other: bar +`), + TwoWay: []byte(` +retainKeysMap: + name: type2 + other: bar +`), + ThreeWay: []byte(` +retainKeysMap: + $retainKeys: + - name + - other + name: type2 + other: bar +`), + Result: []byte(` +retainKeysMap: + name: type2 + other: bar +`), + }, + }, + { + Description: "retainKeys map adds a field", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +retainKeysMap: + name: foo +`), + Current: []byte(` +retainKeysMap: + name: foo +`), + Modified: []byte(` +retainKeysMap: + name: foo + value: bar +`), + TwoWay: []byte(` +retainKeysMap: + $retainKeys: + - name + - value + value: bar +`), + ThreeWay: []byte(` +retainKeysMap: + $retainKeys: + - name + - value + value: bar +`), + Result: []byte(` +retainKeysMap: + name: foo + value: bar +`), + }, + }, + { + Description: "retainKeys map adds a field and clear a field", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +retainKeysMap: + name: foo +`), + Current: []byte(` +retainKeysMap: + name: foo + other: a +`), + Modified: []byte(` +retainKeysMap: + name: foo + value: bar +`), + TwoWay: []byte(` +retainKeysMap: + $retainKeys: + - name + - value + value: bar +`), + ThreeWay: []byte(` +retainKeysMap: + $retainKeys: + - name + - value + value: bar +`), + Result: []byte(` +retainKeysMap: + name: foo + value: bar +`), + }, + }, + { + Description: "retainKeys map deletes a field", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +retainKeysMap: + name: foo + value: bar +`), + Current: []byte(` +retainKeysMap: + name: foo + value: bar +`), + Modified: []byte(` +retainKeysMap: + name: foo +`), + TwoWay: []byte(` +retainKeysMap: + $retainKeys: + - name + value: null +`), + ThreeWay: []byte(` +retainKeysMap: + $retainKeys: + - name + value: null +`), + Result: []byte(` +retainKeysMap: + name: foo +`), + }, + }, + { + Description: "retainKeys map deletes a field and clears a field", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +retainKeysMap: + name: foo + value: bar +`), + Current: []byte(` +retainKeysMap: + name: foo + value: bar + other: a +`), + Modified: []byte(` +retainKeysMap: + name: foo +`), + TwoWay: []byte(` +retainKeysMap: + $retainKeys: + - name + value: null +`), + ThreeWay: []byte(` +retainKeysMap: + $retainKeys: + - name + value: null +`), + Result: []byte(` +retainKeysMap: + name: foo +`), + }, + }, + { + Description: "retainKeys map clears a field", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +retainKeysMap: + name: foo + value: bar +`), + Current: []byte(` +retainKeysMap: + name: foo + value: bar + other: a +`), + Modified: []byte(` +retainKeysMap: + name: foo + value: bar +`), + TwoWay: []byte(`{}`), + ThreeWay: []byte(` +retainKeysMap: + $retainKeys: + - name + - value +`), + Result: []byte(` +retainKeysMap: + name: foo + value: bar +`), + }, + }, + { + Description: "retainKeys map nested map with no change", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +retainKeysMap: + name: foo + simpleMap: + key1: a +`), + Current: []byte(` +retainKeysMap: + name: foo + simpleMap: + key1: a +`), + Modified: []byte(` +retainKeysMap: + name: foo + value: bar + simpleMap: + key1: a +`), + TwoWay: []byte(` +retainKeysMap: + $retainKeys: + - name + - simpleMap + - value + value: bar +`), + ThreeWay: []byte(` +retainKeysMap: + $retainKeys: + - name + - simpleMap + - value + value: bar +`), + Result: []byte(` +retainKeysMap: + name: foo + value: bar + simpleMap: + key1: a +`), + }, + }, + { + Description: "retainKeys map adds a field in a nested map", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +retainKeysMap: + name: foo + value: bar + simpleMap: + key1: a +`), + Current: []byte(` +retainKeysMap: + name: foo + value: bar + simpleMap: + key1: a + key3: c +`), + Modified: []byte(` +retainKeysMap: + name: foo + value: bar + simpleMap: + key1: a + key2: b +`), + TwoWay: []byte(` +retainKeysMap: + $retainKeys: + - name + - simpleMap + - value + simpleMap: + key2: b +`), + ThreeWay: []byte(` +retainKeysMap: + $retainKeys: + - name + - simpleMap + - value + simpleMap: + key2: b +`), + Result: []byte(` +retainKeysMap: + name: foo + value: bar + simpleMap: + key1: a + key2: b + key3: c +`), + }, + }, + { + Description: "retainKeys map deletes a field in a nested map", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +retainKeysMap: + name: foo + value: bar + simpleMap: + key1: a + key2: b +`), + Current: []byte(` +retainKeysMap: + name: foo + value: bar + simpleMap: + key1: a + key2: b + key3: c +`), + Modified: []byte(` +retainKeysMap: + name: foo + value: bar + simpleMap: + key1: a +`), + TwoWay: []byte(` +retainKeysMap: + $retainKeys: + - name + - simpleMap + - value + simpleMap: + key2: null +`), + ThreeWay: []byte(` +retainKeysMap: + $retainKeys: + - name + - simpleMap + - value + simpleMap: + key2: null +`), + Result: []byte(` +retainKeysMap: + name: foo + value: bar + simpleMap: + key1: a + key3: c +`), + }, + }, + { + Description: "retainKeys map changes a field in a nested map", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +retainKeysMap: + name: foo + value: bar + simpleMap: + key1: a + key2: b +`), + Current: []byte(` +retainKeysMap: + name: foo + value: bar + simpleMap: + key1: a + key2: b + key3: c +`), + Modified: []byte(` +retainKeysMap: + name: foo + value: bar + simpleMap: + key1: x + key2: b +`), + TwoWay: []byte(` +retainKeysMap: + $retainKeys: + - name + - simpleMap + - value + simpleMap: + key1: x +`), + ThreeWay: []byte(` +retainKeysMap: + $retainKeys: + - name + - simpleMap + - value + simpleMap: + key1: x +`), + Result: []byte(` +retainKeysMap: + name: foo + value: bar + simpleMap: + key1: x + key2: b + key3: c +`), + }, + }, + { + Description: "retainKeys map changes a field in a nested map with conflict", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +retainKeysMap: + name: foo + value: bar + simpleMap: + key1: old + key2: b +`), + Current: []byte(` +retainKeysMap: + name: foo + value: bar + simpleMap: + key1: new + key2: b + key3: c +`), + Modified: []byte(` +retainKeysMap: + name: foo + value: bar + simpleMap: + key1: modified + key2: b +`), + TwoWay: []byte(` +retainKeysMap: + $retainKeys: + - name + - simpleMap + - value + simpleMap: + key1: modified +`), + ThreeWay: []byte(` +retainKeysMap: + $retainKeys: + - name + - simpleMap + - value + simpleMap: + key1: modified +`), + Result: []byte(` +retainKeysMap: + name: foo + value: bar + simpleMap: + key1: modified + key2: b + key3: c +`), + }, + }, + { + Description: "retainKeys map replaces non-merging list", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +retainKeysMap: + name: foo + value: bar + nonMergingList: + - name: a + - name: b +`), + Current: []byte(` +retainKeysMap: + name: foo + value: bar + nonMergingList: + - name: a + - name: b +`), + Modified: []byte(` +retainKeysMap: + name: foo + value: bar + nonMergingList: + - name: a + - name: c + - name: b +`), + TwoWay: []byte(` +retainKeysMap: + $retainKeys: + - name + - nonMergingList + - value + nonMergingList: + - name: a + - name: c + - name: b +`), + ThreeWay: []byte(` +retainKeysMap: + $retainKeys: + - name + - nonMergingList + - value + nonMergingList: + - name: a + - name: c + - name: b +`), + Result: []byte(` +retainKeysMap: + name: foo + value: bar + nonMergingList: + - name: a + - name: c + - name: b +`), + }, + }, + { + Description: "retainKeys map nested non-merging list with no change", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +retainKeysMap: + name: foo + nonMergingList: + - name: a + - name: b +`), + Current: []byte(` +retainKeysMap: + name: foo + nonMergingList: + - name: a + - name: b +`), + Modified: []byte(` +retainKeysMap: + name: foo + value: bar + nonMergingList: + - name: a + - name: b +`), + TwoWay: []byte(` +retainKeysMap: + $retainKeys: + - name + - nonMergingList + - value + value: bar +`), + ThreeWay: []byte(` +retainKeysMap: + $retainKeys: + - name + - nonMergingList + - value + value: bar +`), + Result: []byte(` +retainKeysMap: + name: foo + value: bar + nonMergingList: + - name: a + - name: b +`), + }, + }, + { + Description: "retainKeys map nested non-merging list with no change with conflict", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +retainKeysMap: + name: foo + nonMergingList: + - name: a + - name: b +`), + Current: []byte(` +retainKeysMap: + name: foo + nonMergingList: + - name: a + - name: b + - name: c +`), + Modified: []byte(` +retainKeysMap: + name: foo + value: bar + nonMergingList: + - name: a + - name: b +`), + TwoWay: []byte(` +retainKeysMap: + $retainKeys: + - name + - nonMergingList + - value + value: bar +`), + ThreeWay: []byte(` +retainKeysMap: + $retainKeys: + - name + - nonMergingList + - value + value: bar + nonMergingList: + - name: a + - name: b +`), + Result: []byte(` +retainKeysMap: + name: foo + value: bar + nonMergingList: + - name: a + - name: b +`), + }, + }, + { + Description: "retainKeys map deletes nested non-merging list", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +retainKeysMap: + name: foo + nonMergingList: + - name: a + - name: b +`), + Current: []byte(` +retainKeysMap: + name: foo + nonMergingList: + - name: a + - name: b +`), + Modified: []byte(` +retainKeysMap: + name: foo + value: bar +`), + TwoWay: []byte(` +retainKeysMap: + $retainKeys: + - name + - value + value: bar + nonMergingList: null +`), + ThreeWay: []byte(` +retainKeysMap: + $retainKeys: + - name + - value + value: bar + nonMergingList: null +`), + Result: []byte(` +retainKeysMap: + name: foo + value: bar +`), + }, + }, + { + Description: "retainKeys map delete nested non-merging list with conflict", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +retainKeysMap: + name: foo + nonMergingList: + - name: a + - name: b +`), + Current: []byte(` +retainKeysMap: + name: foo + nonMergingList: + - name: a + - name: b + - name: c +`), + Modified: []byte(` +retainKeysMap: + name: foo + value: bar +`), + TwoWay: []byte(` +retainKeysMap: + $retainKeys: + - name + - value + value: bar + nonMergingList: null +`), + ThreeWay: []byte(` +retainKeysMap: + $retainKeys: + - name + - value + value: bar + nonMergingList: null +`), + Result: []byte(` +retainKeysMap: + name: foo + value: bar +`), + }, + }, + { + Description: "retainKeys map nested merging int list with no change", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +retainKeysMap: + name: foo + mergingIntList: + - 1 + - 2 +`), + Current: []byte(` +retainKeysMap: + name: foo + mergingIntList: + - 1 + - 2 + - 3 +`), + Modified: []byte(` +retainKeysMap: + name: foo + value: bar + mergingIntList: + - 1 + - 2 +`), + TwoWay: []byte(` +retainKeysMap: + $retainKeys: + - mergingIntList + - name + - value + value: bar +`), + ThreeWay: []byte(` +retainKeysMap: + $retainKeys: + - mergingIntList + - name + - value + $setElementOrder/mergingIntList: + - 1 + - 2 + value: bar +`), + Result: []byte(` +retainKeysMap: + name: foo + value: bar + mergingIntList: + - 1 + - 2 + - 3 +`), + }, + }, + { + Description: "retainKeys map adds an item in nested merging int list", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +retainKeysMap: + name: foo + mergingIntList: + - 1 + - 2 +`), + Current: []byte(` +retainKeysMap: + name: foo + mergingIntList: + - 1 + - 2 + - 3 +`), + Modified: []byte(` +retainKeysMap: + name: foo + mergingIntList: + - 1 + - 2 + - 4 +`), + TwoWay: []byte(` +retainKeysMap: + $setElementOrder/mergingIntList: + - 1 + - 2 + - 4 + $retainKeys: + - mergingIntList + - name + mergingIntList: + - 4 +`), + ThreeWay: []byte(` +retainKeysMap: + $setElementOrder/mergingIntList: + - 1 + - 2 + - 4 + $retainKeys: + - mergingIntList + - name + mergingIntList: + - 4 +`), + Result: []byte(` +retainKeysMap: + name: foo + mergingIntList: + - 1 + - 2 + - 4 + - 3 +`), + }, + }, + { + Description: "retainKeys map deletes an item in nested merging int list", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +retainKeysMap: + name: foo + mergingIntList: + - 1 + - 2 + - 3 +`), + Current: []byte(` +retainKeysMap: + name: foo + mergingIntList: + - 1 + - 2 + - 3 + - 4 +`), + Modified: []byte(` +retainKeysMap: + name: foo + mergingIntList: + - 1 + - 3 +`), + TwoWay: []byte(` +retainKeysMap: + $retainKeys: + - mergingIntList + - name + $deleteFromPrimitiveList/mergingIntList: + - 2 + $setElementOrder/mergingIntList: + - 1 + - 3 +`), + ThreeWay: []byte(` +retainKeysMap: + $retainKeys: + - mergingIntList + - name + $deleteFromPrimitiveList/mergingIntList: + - 2 + $setElementOrder/mergingIntList: + - 1 + - 3 +`), + Result: []byte(` +retainKeysMap: + name: foo + mergingIntList: + - 1 + - 3 + - 4 +`), + }, + }, + { + Description: "retainKeys map adds an item and deletes an item in nested merging int list", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +retainKeysMap: + name: foo + mergingIntList: + - 1 + - 2 + - 3 +`), + Current: []byte(` +retainKeysMap: + name: foo + mergingIntList: + - 1 + - 2 + - 3 + - 4 +`), + Modified: []byte(` +retainKeysMap: + name: foo + mergingIntList: + - 1 + - 3 + - 5 +`), + TwoWay: []byte(` +retainKeysMap: + $retainKeys: + - mergingIntList + - name + mergingIntList: + - 5 + $deleteFromPrimitiveList/mergingIntList: + - 2 + $setElementOrder/mergingIntList: + - 1 + - 3 + - 5 +`), + ThreeWay: []byte(` +retainKeysMap: + $retainKeys: + - mergingIntList + - name + mergingIntList: + - 5 + $deleteFromPrimitiveList/mergingIntList: + - 2 + $setElementOrder/mergingIntList: + - 1 + - 3 + - 5 +`), + Result: []byte(` +retainKeysMap: + name: foo + mergingIntList: + - 1 + - 3 + - 5 + - 4 +`), + }, + }, + { + Description: "retainKeys map deletes nested merging int list", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +retainKeysMap: + name: foo + mergingIntList: + - 1 + - 2 + - 3 +`), + Current: []byte(` +retainKeysMap: + name: foo + mergingIntList: + - 1 + - 2 + - 3 +`), + Modified: []byte(` +retainKeysMap: + name: foo +`), + TwoWay: []byte(` +retainKeysMap: + $retainKeys: + - name + mergingIntList: null +`), + ThreeWay: []byte(` +retainKeysMap: + $retainKeys: + - name + mergingIntList: null +`), + Result: []byte(` +retainKeysMap: + name: foo +`), + }, + }, + { + Description: "retainKeys map nested merging list with no change", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +retainKeysMap: + name: foo + mergingList: + - name: a + - name: b +`), + Current: []byte(` +retainKeysMap: + name: foo + mergingList: + - name: a + - name: b + - name: c +`), + Modified: []byte(` +retainKeysMap: + name: foo + value: bar + mergingList: + - name: a + - name: b +`), + TwoWay: []byte(` +retainKeysMap: + $retainKeys: + - mergingList + - name + - value + value: bar +`), + ThreeWay: []byte(` +retainKeysMap: + $retainKeys: + - mergingList + - name + - value + $setElementOrder/mergingList: + - name: a + - name: b + value: bar +`), + Result: []byte(` +retainKeysMap: + name: foo + value: bar + mergingList: + - name: a + - name: b + - name: c +`), + }, + }, + { + Description: "retainKeys map adds an item in nested merging list", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +retainKeysMap: + name: foo + mergingList: + - name: a + - name: b +`), + Current: []byte(` +retainKeysMap: + name: foo + mergingList: + - name: a + - name: b + - name: x +`), + Modified: []byte(` +retainKeysMap: + name: foo + mergingList: + - name: a + - name: b + - name: c +`), + TwoWay: []byte(` +retainKeysMap: + $retainKeys: + - mergingList + - name + $setElementOrder/mergingList: + - name: a + - name: b + - name: c + mergingList: + - name: c +`), + ThreeWay: []byte(` +retainKeysMap: + $retainKeys: + - mergingList + - name + $setElementOrder/mergingList: + - name: a + - name: b + - name: c + mergingList: + - name: c +`), + Result: []byte(` +retainKeysMap: + name: foo + mergingList: + - name: a + - name: b + - name: c + - name: x +`), + }, + }, + { + Description: "retainKeys map changes an item in nested merging list", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +retainKeysMap: + name: foo + mergingList: + - name: a + - name: b + value: foo +`), + Current: []byte(` +retainKeysMap: + name: foo + mergingList: + - name: a + - name: b + value: foo + - name: x +`), + Modified: []byte(` +retainKeysMap: + name: foo + mergingList: + - name: a + - name: b + value: bar +`), + TwoWay: []byte(` +retainKeysMap: + $retainKeys: + - mergingList + - name + $setElementOrder/mergingList: + - name: a + - name: b + mergingList: + - name: b + value: bar +`), + ThreeWay: []byte(` +retainKeysMap: + $retainKeys: + - mergingList + - name + $setElementOrder/mergingList: + - name: a + - name: b + mergingList: + - name: b + value: bar +`), + Result: []byte(` +retainKeysMap: + name: foo + mergingList: + - name: a + - name: b + value: bar + - name: x +`), + }, + }, + { + Description: "retainKeys map deletes nested merging list", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +retainKeysMap: + name: foo + mergingList: + - name: a + - name: b +`), + Current: []byte(` +retainKeysMap: + name: foo + mergingList: + - name: a + - name: b +`), + Modified: []byte(` +retainKeysMap: + name: foo + value: bar +`), + TwoWay: []byte(` +retainKeysMap: + $retainKeys: + - name + - value + value: bar + mergingList: null +`), + ThreeWay: []byte(` +retainKeysMap: + $retainKeys: + - name + - value + value: bar + mergingList: null +`), + Result: []byte(` +retainKeysMap: + name: foo + value: bar +`), + }, + }, + { + Description: "retainKeys map deletes an item in nested merging list", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +retainKeysMap: + name: foo + mergingList: + - name: a + - name: b +`), + Current: []byte(` +retainKeysMap: + name: foo + mergingList: + - name: a + - name: b + - name: x +`), + Modified: []byte(` +retainKeysMap: + name: foo + mergingList: + - name: a +`), + TwoWay: []byte(` +retainKeysMap: + $retainKeys: + - mergingList + - name + $setElementOrder/mergingList: + - name: a + mergingList: + - name: b + $patch: delete +`), + ThreeWay: []byte(` +retainKeysMap: + $retainKeys: + - mergingList + - name + $setElementOrder/mergingList: + - name: a + mergingList: + - name: b + $patch: delete +`), + Result: []byte(` +retainKeysMap: + name: foo + mergingList: + - name: a + - name: x +`), + }, + }, + { + Description: "retainKeys list of maps clears a field", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +retainKeysMergingList: +- name: bar +- name: foo + value: a +`), + Current: []byte(` +retainKeysMergingList: +- name: bar +- name: foo + value: a + other: x +`), + Modified: []byte(` +retainKeysMergingList: +- name: bar +- name: foo + value: a +`), + TwoWay: []byte(`{}`), + ThreeWay: []byte(` +$setElementOrder/retainKeysMergingList: + - name: bar + - name: foo +retainKeysMergingList: +- $retainKeys: + - name + - value + name: foo +`), + Result: []byte(` +retainKeysMergingList: +- name: bar +- name: foo + value: a +`), + }, + }, + { + Description: "retainKeys list of maps clears a field with conflict", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +retainKeysMergingList: +- name: bar +- name: foo + value: old +`), + Current: []byte(` +retainKeysMergingList: +- name: bar +- name: foo + value: new + other: x +`), + Modified: []byte(` +retainKeysMergingList: +- name: bar +- name: foo + value: modified +`), + TwoWay: []byte(` +$setElementOrder/retainKeysMergingList: + - name: bar + - name: foo +retainKeysMergingList: +- $retainKeys: + - name + - value + name: foo + value: modified +`), + ThreeWay: []byte(` +$setElementOrder/retainKeysMergingList: + - name: bar + - name: foo +retainKeysMergingList: +- $retainKeys: + - name + - value + name: foo + value: modified +`), + Result: []byte(` +retainKeysMergingList: +- name: bar +- name: foo + value: modified +`), + }, + }, + { + Description: "retainKeys list of maps changes a field and clear a field", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +retainKeysMergingList: +- name: bar +- name: foo + value: old +`), + Current: []byte(` +retainKeysMergingList: +- name: bar +- name: foo + value: old + other: x +`), + Modified: []byte(` +retainKeysMergingList: +- name: bar +- name: foo + value: new +`), + TwoWay: []byte(` +$setElementOrder/retainKeysMergingList: + - name: bar + - name: foo +retainKeysMergingList: +- $retainKeys: + - name + - value + name: foo + value: new +`), + ThreeWay: []byte(` +$setElementOrder/retainKeysMergingList: + - name: bar + - name: foo +retainKeysMergingList: +- $retainKeys: + - name + - value + name: foo + value: new +`), + Result: []byte(` +retainKeysMergingList: +- name: bar +- name: foo + value: new +`), + }, + }, + { + Description: "retainKeys list of maps changes a field and clear a field with conflict", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +retainKeysMergingList: +- name: bar +- name: foo + value: old +`), + Current: []byte(` +retainKeysMergingList: +- name: bar +- name: foo + value: modified + other: x +`), + Modified: []byte(` +retainKeysMergingList: +- name: bar +- name: foo + value: new +`), + TwoWay: []byte(` +$setElementOrder/retainKeysMergingList: + - name: bar + - name: foo +retainKeysMergingList: +- $retainKeys: + - name + - value + name: foo + value: new +`), + ThreeWay: []byte(` +$setElementOrder/retainKeysMergingList: + - name: bar + - name: foo +retainKeysMergingList: +- $retainKeys: + - name + - value + name: foo + value: new +`), + Result: []byte(` +retainKeysMergingList: +- name: bar +- name: foo + value: new +`), + }, + }, + { + Description: "retainKeys list of maps adds a field", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +retainKeysMergingList: +- name: bar +- name: foo +`), + Current: []byte(` +retainKeysMergingList: +- name: bar +- name: foo +`), + Modified: []byte(` +retainKeysMergingList: +- name: bar +- name: foo + value: a +`), + TwoWay: []byte(` +$setElementOrder/retainKeysMergingList: + - name: bar + - name: foo +retainKeysMergingList: +- $retainKeys: + - name + - value + name: foo + value: a +`), + ThreeWay: []byte(` +$setElementOrder/retainKeysMergingList: + - name: bar + - name: foo +retainKeysMergingList: +- $retainKeys: + - name + - value + name: foo + value: a +`), + Result: []byte(` +retainKeysMergingList: +- name: bar +- name: foo + value: a +`), + }, + }, + { + Description: "retainKeys list of maps adds a field and clear a field", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +retainKeysMergingList: +- name: bar +- name: foo +`), + Current: []byte(` +retainKeysMergingList: +- name: bar +- name: foo + other: x +`), + Modified: []byte(` +retainKeysMergingList: +- name: bar +- name: foo + value: a +`), + TwoWay: []byte(` +$setElementOrder/retainKeysMergingList: + - name: bar + - name: foo +retainKeysMergingList: +- $retainKeys: + - name + - value + name: foo + value: a +`), + ThreeWay: []byte(` +$setElementOrder/retainKeysMergingList: + - name: bar + - name: foo +retainKeysMergingList: +- $retainKeys: + - name + - value + name: foo + value: a +`), + Result: []byte(` +retainKeysMergingList: +- name: bar +- name: foo + value: a +`), + }, + }, + { + Description: "retainKeys list of maps deletes a field", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +retainKeysMergingList: +- name: bar +- name: foo + value: a +`), + Current: []byte(` +retainKeysMergingList: +- name: bar +- name: foo + value: a +`), + Modified: []byte(` +retainKeysMergingList: +- name: bar +- name: foo +`), + TwoWay: []byte(` +$setElementOrder/retainKeysMergingList: + - name: bar + - name: foo +retainKeysMergingList: +- $retainKeys: + - name + name: foo + value: null +`), + ThreeWay: []byte(` +$setElementOrder/retainKeysMergingList: + - name: bar + - name: foo +retainKeysMergingList: +- $retainKeys: + - name + name: foo + value: null +`), + Result: []byte(` +retainKeysMergingList: +- name: bar +- name: foo +`), + }, + }, + { + Description: "retainKeys list of maps deletes a field and clear a field", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +retainKeysMergingList: +- name: bar +- name: foo + value: a +`), + Current: []byte(` +retainKeysMergingList: +- name: bar +- name: foo + value: a + other: x +`), + Modified: []byte(` +retainKeysMergingList: +- name: bar +- name: foo +`), + TwoWay: []byte(` +$setElementOrder/retainKeysMergingList: + - name: bar + - name: foo +retainKeysMergingList: +- $retainKeys: + - name + name: foo + value: null +`), + ThreeWay: []byte(` +$setElementOrder/retainKeysMergingList: + - name: bar + - name: foo +retainKeysMergingList: +- $retainKeys: + - name + name: foo + value: null +`), + Result: []byte(` +retainKeysMergingList: +- name: bar +- name: foo +`), + }, + }, + { + Description: "delete and reorder in one list, reorder in another", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +mergingList: +- name: a + value: a +- name: b + value: b +mergeItemPtr: +- name: c + value: c +- name: d + value: d +`), + Current: []byte(` +mergingList: +- name: a + value: a +- name: b + value: b +mergeItemPtr: +- name: c + value: c +- name: d + value: d +`), + Modified: []byte(` +mergingList: +- name: b + value: b +mergeItemPtr: +- name: d + value: d +- name: c + value: c +`), + TwoWay: []byte(` +$setElementOrder/mergingList: +- name: b +$setElementOrder/mergeItemPtr: +- name: d +- name: c +mergingList: +- $patch: delete + name: a +`), + ThreeWay: []byte(` +$setElementOrder/mergingList: +- name: b +$setElementOrder/mergeItemPtr: +- name: d +- name: c +mergingList: +- $patch: delete + name: a +`), + Result: []byte(` +mergingList: +- name: b + value: b +mergeItemPtr: +- name: d + value: d +- name: c + value: c +`), + }, + }, +} + +func TestStrategicMergePatch(t *testing.T) { + testStrategicMergePatchWithCustomArgumentsUsingStruct(t, "bad struct", + "{}", "{}", []byte(""), mergepatch.ErrBadArgKind(struct{}{}, []byte{})) + + mergeItemOpenapiSchema := PatchMetaFromOpenAPI{ + Schema: sptest.GetSchemaOrDie(fakeMergeItemSchema, "mergeItem"), + } + schemas := []LookupPatchMeta{ + mergeItemStructSchema, + mergeItemOpenapiSchema, + } + + tc := StrategicMergePatchTestCases{} + err := yaml.Unmarshal(createStrategicMergePatchTestCaseData, &tc) + if err != nil { + t.Errorf("can't unmarshal test cases: %s\n", err) + return + } + + for _, schema := range schemas { + testStrategicMergePatchWithCustomArguments(t, "bad original", + "", "{}", schema, mergepatch.ErrBadJSONDoc) + testStrategicMergePatchWithCustomArguments(t, "bad patch", + "{}", "", schema, mergepatch.ErrBadJSONDoc) + testStrategicMergePatchWithCustomArguments(t, "nil struct", + "{}", "{}", nil, mergepatch.ErrBadArgKind(struct{}{}, nil)) + + for _, c := range tc.TestCases { + testTwoWayPatch(t, c, schema) + testThreeWayPatch(t, c, schema) + } + + // run multiple times to exercise different map traversal orders + for i := 0; i < 10; i++ { + for _, c := range strategicMergePatchRawTestCases { + testTwoWayPatchForRawTestCase(t, c, schema) + testThreeWayPatchForRawTestCase(t, c, schema) + } + } + } +} + +func testStrategicMergePatchWithCustomArgumentsUsingStruct(t *testing.T, description, original, patch string, dataStruct interface{}, expected error) { + schema, actual := NewPatchMetaFromStruct(dataStruct) + // If actual is not nil, check error. If errors match, return. + if actual != nil { + checkErrorsEqual(t, description, expected, actual, schema) + return + } + testStrategicMergePatchWithCustomArguments(t, description, original, patch, schema, expected) +} + +func testStrategicMergePatchWithCustomArguments(t *testing.T, description, original, patch string, schema LookupPatchMeta, expected error) { + _, actual := StrategicMergePatch([]byte(original), []byte(patch), schema) + checkErrorsEqual(t, description, expected, actual, schema) +} + +func checkErrorsEqual(t *testing.T, description string, expected, actual error, schema LookupPatchMeta) { + if actual != expected { + if actual == nil { + t.Errorf("using %s expected error: %s\ndid not occur in test case: %s", getSchemaType(schema), expected, description) + return + } + + if expected == nil || actual.Error() != expected.Error() { + t.Errorf("using %s unexpected error: %s\noccurred in test case: %s", getSchemaType(schema), actual, description) + return + } + } +} + +func testTwoWayPatch(t *testing.T, c StrategicMergePatchTestCase, schema LookupPatchMeta) { + original, expectedPatch, modified, expectedResult := twoWayTestCaseToJSONOrFail(t, c, schema) + + actualPatch, err := CreateTwoWayMergePatchUsingLookupPatchMeta(original, modified, schema) + if err != nil { + t.Errorf("using %s error: %s\nin test case: %s\ncannot create two way patch: %s:\n%s\n", + getSchemaType(schema), err, c.Description, original, mergepatch.ToYAMLOrError(c.StrategicMergePatchTestCaseData)) + return + } + + testPatchCreation(t, expectedPatch, actualPatch, c.Description) + testPatchApplication(t, original, actualPatch, expectedResult, c.Description, "", schema) +} + +func testTwoWayPatchForRawTestCase(t *testing.T, c StrategicMergePatchRawTestCase, schema LookupPatchMeta) { + original, expectedPatch, modified, expectedResult := twoWayRawTestCaseToJSONOrFail(t, c) + + actualPatch, err := CreateTwoWayMergePatchUsingLookupPatchMeta(original, modified, schema) + if err != nil { + t.Errorf("error: %s\nin test case: %s\ncannot create two way patch:\noriginal:%s\ntwoWay:%s\nmodified:%s\ncurrent:%s\nthreeWay:%s\nresult:%s\n", + err, c.Description, c.Original, c.TwoWay, c.Modified, c.Current, c.ThreeWay, c.Result) + return + } + + testPatchCreation(t, expectedPatch, actualPatch, c.Description) + testPatchApplication(t, original, actualPatch, expectedResult, c.Description, c.ExpectedError, schema) +} + +func twoWayTestCaseToJSONOrFail(t *testing.T, c StrategicMergePatchTestCase, schema LookupPatchMeta) ([]byte, []byte, []byte, []byte) { + expectedResult := c.TwoWayResult + if expectedResult == nil { + expectedResult = c.Modified + } + return sortJsonOrFail(t, testObjectToJSONOrFail(t, c.Original), c.Description, schema), + sortJsonOrFail(t, testObjectToJSONOrFail(t, c.TwoWay), c.Description, schema), + sortJsonOrFail(t, testObjectToJSONOrFail(t, c.Modified), c.Description, schema), + sortJsonOrFail(t, testObjectToJSONOrFail(t, expectedResult), c.Description, schema) +} + +func twoWayRawTestCaseToJSONOrFail(t *testing.T, c StrategicMergePatchRawTestCase) ([]byte, []byte, []byte, []byte) { + expectedResult := c.TwoWayResult + if expectedResult == nil { + expectedResult = c.Modified + } + return yamlToJSONOrError(t, c.Original), + yamlToJSONOrError(t, c.TwoWay), + yamlToJSONOrError(t, c.Modified), + yamlToJSONOrError(t, expectedResult) +} + +func testThreeWayPatch(t *testing.T, c StrategicMergePatchTestCase, schema LookupPatchMeta) { + original, modified, current, expected, result := threeWayTestCaseToJSONOrFail(t, c, schema) + actual, err := CreateThreeWayMergePatch(original, modified, current, schema, false) + if err != nil { + if !mergepatch.IsConflict(err) { + t.Errorf("using %s error: %s\nin test case: %s\ncannot create three way patch:\n%s\n", + getSchemaType(schema), err, c.Description, mergepatch.ToYAMLOrError(c.StrategicMergePatchTestCaseData)) + return + } + + if !strings.Contains(c.Description, "conflict") { + t.Errorf("using %s unexpected conflict: %s\nin test case: %s\ncannot create three way patch:\n%s\n", + getSchemaType(schema), err, c.Description, mergepatch.ToYAMLOrError(c.StrategicMergePatchTestCaseData)) + return + } + + if len(c.Result) > 0 { + actual, err := CreateThreeWayMergePatch(original, modified, current, schema, true) + if err != nil { + t.Errorf("using %s error: %s\nin test case: %s\ncannot force three way patch application:\n%s\n", + getSchemaType(schema), err, c.Description, mergepatch.ToYAMLOrError(c.StrategicMergePatchTestCaseData)) + return + } + + testPatchCreation(t, expected, actual, c.Description) + testPatchApplication(t, current, actual, result, c.Description, "", schema) + } + + return + } + + if strings.Contains(c.Description, "conflict") || len(c.Result) < 1 { + t.Errorf("using %s error in test case: %s\nexpected conflict did not occur:\n%s\n", + getSchemaType(schema), c.Description, mergepatch.ToYAMLOrError(c.StrategicMergePatchTestCaseData)) + return + } + + testPatchCreation(t, expected, actual, c.Description) + testPatchApplication(t, current, actual, result, c.Description, "", schema) +} + +func testThreeWayPatchForRawTestCase(t *testing.T, c StrategicMergePatchRawTestCase, schema LookupPatchMeta) { + original, modified, current, expected, result := threeWayRawTestCaseToJSONOrFail(t, c) + actual, err := CreateThreeWayMergePatch(original, modified, current, schema, false) + if err != nil { + if !mergepatch.IsConflict(err) { + t.Errorf("using %s error: %s\nin test case: %s\ncannot create three way patch:\noriginal:%s\ntwoWay:%s\nmodified:%s\ncurrent:%s\nthreeWay:%s\nresult:%s\n", + getSchemaType(schema), err, c.Description, c.Original, c.TwoWay, c.Modified, c.Current, c.ThreeWay, c.Result) + return + } + + if !strings.Contains(c.Description, "conflict") { + t.Errorf("using %s unexpected conflict: %s\nin test case: %s\ncannot create three way patch:\noriginal:%s\ntwoWay:%s\nmodified:%s\ncurrent:%s\nthreeWay:%s\nresult:%s\n", + getSchemaType(schema), err, c.Description, c.Original, c.TwoWay, c.Modified, c.Current, c.ThreeWay, c.Result) + return + } + + if len(c.Result) > 0 { + actual, err := CreateThreeWayMergePatch(original, modified, current, schema, true) + if err != nil { + t.Errorf("using %s error: %s\nin test case: %s\ncannot force three way patch application:\noriginal:%s\ntwoWay:%s\nmodified:%s\ncurrent:%s\nthreeWay:%s\nresult:%s\n", + getSchemaType(schema), err, c.Description, c.Original, c.TwoWay, c.Modified, c.Current, c.ThreeWay, c.Result) + return + } + + testPatchCreation(t, expected, actual, c.Description) + testPatchApplication(t, current, actual, result, c.Description, c.ExpectedError, schema) + } + + return + } + + if strings.Contains(c.Description, "conflict") || len(c.Result) < 1 { + t.Errorf("using %s error: %s\nin test case: %s\nexpected conflict did not occur:\noriginal:%s\ntwoWay:%s\nmodified:%s\ncurrent:%s\nthreeWay:%s\nresult:%s\n", + getSchemaType(schema), err, c.Description, c.Original, c.TwoWay, c.Modified, c.Current, c.ThreeWay, c.Result) + return + } + + testPatchCreation(t, expected, actual, c.Description) + testPatchApplication(t, current, actual, result, c.Description, c.ExpectedError, schema) +} + +func threeWayTestCaseToJSONOrFail(t *testing.T, c StrategicMergePatchTestCase, schema LookupPatchMeta) ([]byte, []byte, []byte, []byte, []byte) { + return sortJsonOrFail(t, testObjectToJSONOrFail(t, c.Original), c.Description, schema), + sortJsonOrFail(t, testObjectToJSONOrFail(t, c.Modified), c.Description, schema), + sortJsonOrFail(t, testObjectToJSONOrFail(t, c.Current), c.Description, schema), + sortJsonOrFail(t, testObjectToJSONOrFail(t, c.ThreeWay), c.Description, schema), + sortJsonOrFail(t, testObjectToJSONOrFail(t, c.Result), c.Description, schema) +} + +func threeWayRawTestCaseToJSONOrFail(t *testing.T, c StrategicMergePatchRawTestCase) ([]byte, []byte, []byte, []byte, []byte) { + return yamlToJSONOrError(t, c.Original), + yamlToJSONOrError(t, c.Modified), + yamlToJSONOrError(t, c.Current), + yamlToJSONOrError(t, c.ThreeWay), + yamlToJSONOrError(t, c.Result) +} + +func testPatchCreation(t *testing.T, expected, actual []byte, description string) { + if !reflect.DeepEqual(actual, expected) { + t.Errorf("error in test case: %s\nexpected patch:\n%s\ngot:\n%s\n", + description, jsonToYAMLOrError(expected), jsonToYAMLOrError(actual)) + return + } +} + +func testPatchApplication(t *testing.T, original, patch, expected []byte, description, expectedError string, schema LookupPatchMeta) { + result, err := StrategicMergePatchUsingLookupPatchMeta(original, patch, schema) + if len(expectedError) != 0 { + if err != nil && strings.Contains(err.Error(), expectedError) { + return + } + t.Errorf("using %s expected error should contain:\n%s\nin test case: %s\nbut got:\n%s\n", getSchemaType(schema), expectedError, description, err) + } + if err != nil { + t.Errorf("using %s error: %s\nin test case: %s\ncannot apply patch:\n%s\nto original:\n%s\n", + getSchemaType(schema), err, description, jsonToYAMLOrError(patch), jsonToYAMLOrError(original)) + return + } + + if !reflect.DeepEqual(result, expected) { + format := "using error in test case: %s\npatch application failed:\noriginal:\n%s\npatch:\n%s\nexpected:\n%s\ngot:\n%s\n" + t.Errorf(format, description, + jsonToYAMLOrError(original), jsonToYAMLOrError(patch), + jsonToYAMLOrError(expected), jsonToYAMLOrError(result)) + return + } +} + +func testObjectToJSONOrFail(t *testing.T, o map[string]interface{}) []byte { + if o == nil { + return nil + } + + j, err := toJSON(o) + if err != nil { + t.Error(err) + } + return j +} + +func sortJsonOrFail(t *testing.T, j []byte, description string, schema LookupPatchMeta) []byte { + if j == nil { + return nil + } + r, err := sortMergeListsByName(j, schema) + if err != nil { + t.Errorf("using %s error: %s\n in test case: %s\ncannot sort object:\n%s\n", getSchemaType(schema), err, description, j) + return nil + } + + return r +} + +func getSchemaType(schema LookupPatchMeta) string { + return reflect.TypeOf(schema).String() +} + +func jsonToYAMLOrError(j []byte) string { + y, err := jsonToYAML(j) + if err != nil { + return err.Error() + } + + return string(y) +} + +func toJSON(v interface{}) ([]byte, error) { + j, err := json.Marshal(v) + if err != nil { + return nil, fmt.Errorf("json marshal failed: %v\n%v\n", err, spew.Sdump(v)) + } + + return j, nil +} + +func jsonToYAML(j []byte) ([]byte, error) { + y, err := yaml.JSONToYAML(j) + if err != nil { + return nil, fmt.Errorf("json to yaml failed: %v\n%v\n", err, j) + } + + return y, nil +} + +func yamlToJSON(y []byte) ([]byte, error) { + j, err := yaml.YAMLToJSON(y) + if err != nil { + return nil, fmt.Errorf("yaml to json failed: %v\n%v\n", err, y) + } + + return j, nil +} + +func yamlToJSONOrError(t *testing.T, y []byte) []byte { + j, err := yamlToJSON(y) + if err != nil { + t.Errorf("%v", err) + } + + return j +} + +type PrecisionItem struct { + Name string `json:"name,omitempty"` + Int32 int32 `json:"int32,omitempty"` + Int64 int64 `json:"int64,omitempty"` + Float32 float32 `json:"float32,omitempty"` + Float64 float64 `json:"float64,omitempty"` +} + +var ( + precisionItem PrecisionItem + precisionItemStructSchema = PatchMetaFromStruct{T: GetTagStructTypeOrDie(precisionItem)} +) + +func TestNumberConversion(t *testing.T) { + testcases := map[string]struct { + Old string + New string + ExpectedPatch string + ExpectedResult string + }{ + "empty": { + Old: `{}`, + New: `{}`, + ExpectedPatch: `{}`, + ExpectedResult: `{}`, + }, + "int32 medium": { + Old: `{"int32":1000000}`, + New: `{"int32":1000000,"name":"newname"}`, + ExpectedPatch: `{"name":"newname"}`, + ExpectedResult: `{"int32":1000000,"name":"newname"}`, + }, + "int32 max": { + Old: `{"int32":2147483647}`, + New: `{"int32":2147483647,"name":"newname"}`, + ExpectedPatch: `{"name":"newname"}`, + ExpectedResult: `{"int32":2147483647,"name":"newname"}`, + }, + "int64 medium": { + Old: `{"int64":1000000}`, + New: `{"int64":1000000,"name":"newname"}`, + ExpectedPatch: `{"name":"newname"}`, + ExpectedResult: `{"int64":1000000,"name":"newname"}`, + }, + "int64 max": { + Old: `{"int64":9223372036854775807}`, + New: `{"int64":9223372036854775807,"name":"newname"}`, + ExpectedPatch: `{"name":"newname"}`, + ExpectedResult: `{"int64":9223372036854775807,"name":"newname"}`, + }, + "float32 max": { + Old: `{"float32":3.4028234663852886e+38}`, + New: `{"float32":3.4028234663852886e+38,"name":"newname"}`, + ExpectedPatch: `{"name":"newname"}`, + ExpectedResult: `{"float32":3.4028234663852886e+38,"name":"newname"}`, + }, + "float64 max": { + Old: `{"float64":1.7976931348623157e+308}`, + New: `{"float64":1.7976931348623157e+308,"name":"newname"}`, + ExpectedPatch: `{"name":"newname"}`, + ExpectedResult: `{"float64":1.7976931348623157e+308,"name":"newname"}`, + }, + } + + precisionItemOpenapiSchema := PatchMetaFromOpenAPI{ + Schema: sptest.GetSchemaOrDie(fakePrecisionItemSchema, "precisionItem"), + } + precisionItemSchemas := []LookupPatchMeta{ + precisionItemStructSchema, + precisionItemOpenapiSchema, + } + + for _, schema := range precisionItemSchemas { + for k, tc := range testcases { + patch, err := CreateTwoWayMergePatchUsingLookupPatchMeta([]byte(tc.Old), []byte(tc.New), schema) + if err != nil { + t.Errorf("using %s in testcase %s: unexpected error %v", getSchemaType(schema), k, err) + continue + } + if tc.ExpectedPatch != string(patch) { + t.Errorf("using %s in testcase %s: expected %s, got %s", getSchemaType(schema), k, tc.ExpectedPatch, string(patch)) + continue + } + + result, err := StrategicMergePatchUsingLookupPatchMeta([]byte(tc.Old), patch, schema) + if err != nil { + t.Errorf("using %s in testcase %s: unexpected error %v", getSchemaType(schema), k, err) + continue + } + if tc.ExpectedResult != string(result) { + t.Errorf("using %s in testcase %s: expected %s, got %s", getSchemaType(schema), k, tc.ExpectedResult, string(result)) + continue + } + } + } +} + +var replaceRawExtensionPatchTestCases = []StrategicMergePatchRawTestCase{ + { + Description: "replace RawExtension field, rest unchanched", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +name: my-object +value: some-value +other: current-other +replacingItem: + Some: Generic + Yaml: Inside + The: RawExtension + Field: Period +`), + Current: []byte(` +name: my-object +value: some-value +other: current-other +mergingList: + - name: 1 + - name: 2 + - name: 3 +replacingItem: + Some: Generic + Yaml: Inside + The: RawExtension + Field: Period +`), + Modified: []byte(` +name: my-object +value: some-value +other: current-other +mergingList: + - name: 1 + - name: 2 + - name: 3 +replacingItem: + Newly: Modified + Yaml: Inside + The: RawExtension +`), + TwoWay: []byte(` +mergingList: + - name: 1 + - name: 2 + - name: 3 +replacingItem: + Newly: Modified + Yaml: Inside + The: RawExtension +`), + TwoWayResult: []byte(` +name: my-object +value: some-value +other: current-other +mergingList: + - name: 1 + - name: 2 + - name: 3 +replacingItem: + Newly: Modified + Yaml: Inside + The: RawExtension +`), + ThreeWay: []byte(` +replacingItem: + Newly: Modified + Yaml: Inside + The: RawExtension +`), + Result: []byte(` +name: my-object +value: some-value +other: current-other +mergingList: + - name: 1 + - name: 2 + - name: 3 +replacingItem: + Newly: Modified + Yaml: Inside + The: RawExtension +`), + }, + }, + { + Description: "replace RawExtension field and merge list", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +name: my-object +value: some-value +other: current-other +mergingList: + - name: 1 +replacingItem: + Some: Generic + Yaml: Inside + The: RawExtension + Field: Period +`), + Current: []byte(` +name: my-object +value: some-value +other: current-other +mergingList: + - name: 1 + - name: 3 +replacingItem: + Some: Generic + Yaml: Inside + The: RawExtension + Field: Period +`), + Modified: []byte(` +name: my-object +value: some-value +other: current-other +mergingList: + - name: 1 + - name: 2 +replacingItem: + Newly: Modified + Yaml: Inside + The: RawExtension +`), + TwoWay: []byte(` +$setElementOrder/mergingList: + - name: 1 + - name: 2 +mergingList: + - name: 2 +replacingItem: + Newly: Modified + Yaml: Inside + The: RawExtension +`), + TwoWayResult: []byte(` +name: my-object +value: some-value +other: current-other +mergingList: + - name: 1 + - name: 2 +replacingItem: + Newly: Modified + Yaml: Inside + The: RawExtension +`), + ThreeWay: []byte(` +$setElementOrder/mergingList: + - name: 1 + - name: 2 +mergingList: + - name: 2 +replacingItem: + Newly: Modified + Yaml: Inside + The: RawExtension +`), + Result: []byte(` +name: my-object +value: some-value +other: current-other +mergingList: + - name: 1 + - name: 2 + - name: 3 +replacingItem: + Newly: Modified + Yaml: Inside + The: RawExtension +`), + }, + }, +} + +func TestReplaceWithRawExtension(t *testing.T) { + mergeItemOpenapiSchema := PatchMetaFromOpenAPI{ + Schema: sptest.GetSchemaOrDie(fakeMergeItemSchema, "mergeItem"), + } + schemas := []LookupPatchMeta{ + mergeItemStructSchema, + mergeItemOpenapiSchema, + } + + for _, schema := range schemas { + for _, c := range replaceRawExtensionPatchTestCases { + testTwoWayPatchForRawTestCase(t, c, schema) + testThreeWayPatchForRawTestCase(t, c, schema) + } + } +} + +func TestUnknownField(t *testing.T) { + testcases := map[string]struct { + Original string + Current string + Modified string + + ExpectedTwoWay string + ExpectedTwoWayErr string + ExpectedTwoWayResult string + ExpectedThreeWay string + ExpectedThreeWayErr string + ExpectedThreeWayResult string + }{ + // cases we can successfully strategically merge + "no diff": { + Original: `{"array":[1,2,3],"complex":{"nested":true},"name":"foo","scalar":true}`, + Current: `{"array":[1,2,3],"complex":{"nested":true},"name":"foo","scalar":true}`, + Modified: `{"array":[1,2,3],"complex":{"nested":true},"name":"foo","scalar":true}`, + + ExpectedTwoWay: `{}`, + ExpectedTwoWayResult: `{"array":[1,2,3],"complex":{"nested":true},"name":"foo","scalar":true}`, + ExpectedThreeWay: `{}`, + ExpectedThreeWayResult: `{"array":[1,2,3],"complex":{"nested":true},"name":"foo","scalar":true}`, + }, + "added only": { + Original: `{"name":"foo"}`, + Current: `{"name":"foo"}`, + Modified: `{"name":"foo","scalar":true,"complex":{"nested":true},"array":[1,2,3]}`, + + ExpectedTwoWay: `{"array":[1,2,3],"complex":{"nested":true},"scalar":true}`, + ExpectedTwoWayResult: `{"array":[1,2,3],"complex":{"nested":true},"name":"foo","scalar":true}`, + ExpectedThreeWay: `{"array":[1,2,3],"complex":{"nested":true},"scalar":true}`, + ExpectedThreeWayResult: `{"array":[1,2,3],"complex":{"nested":true},"name":"foo","scalar":true}`, + }, + "removed only": { + Original: `{"name":"foo","scalar":true,"complex":{"nested":true}}`, + Current: `{"name":"foo","scalar":true,"complex":{"nested":true},"array":[1,2,3]}`, + Modified: `{"name":"foo"}`, + + ExpectedTwoWay: `{"complex":null,"scalar":null}`, + ExpectedTwoWayResult: `{"name":"foo"}`, + ExpectedThreeWay: `{"complex":null,"scalar":null}`, + ExpectedThreeWayResult: `{"array":[1,2,3],"name":"foo"}`, + }, + + // cases we cannot successfully strategically merge (expect errors) + "diff": { + Original: `{"array":[1,2,3],"complex":{"nested":true},"name":"foo","scalar":true}`, + Current: `{"array":[1,2,3],"complex":{"nested":true},"name":"foo","scalar":true}`, + Modified: `{"array":[1,2,3],"complex":{"nested":false},"name":"foo","scalar":true}`, + + ExpectedTwoWayErr: `unable to find api field`, + ExpectedThreeWayErr: `unable to find api field`, + }, + } + + mergeItemOpenapiSchema := PatchMetaFromOpenAPI{ + Schema: sptest.GetSchemaOrDie(fakeMergeItemSchema, "mergeItem"), + } + schemas := []LookupPatchMeta{ + mergeItemStructSchema, + mergeItemOpenapiSchema, + } + + for _, k := range sets.StringKeySet(testcases).List() { + tc := testcases[k] + for _, schema := range schemas { + func() { + twoWay, err := CreateTwoWayMergePatchUsingLookupPatchMeta([]byte(tc.Original), []byte(tc.Modified), schema) + if err != nil { + if len(tc.ExpectedTwoWayErr) == 0 { + t.Errorf("using %s in testcase %s: error making two-way patch: %v", getSchemaType(schema), k, err) + } + if !strings.Contains(err.Error(), tc.ExpectedTwoWayErr) { + t.Errorf("using %s in testcase %s: expected error making two-way patch to contain '%s', got %s", getSchemaType(schema), k, tc.ExpectedTwoWayErr, err) + } + return + } + + if string(twoWay) != tc.ExpectedTwoWay { + t.Errorf("using %s in testcase %s: expected two-way patch:\n\t%s\ngot\n\t%s", getSchemaType(schema), k, string(tc.ExpectedTwoWay), string(twoWay)) + return + } + + twoWayResult, err := StrategicMergePatchUsingLookupPatchMeta([]byte(tc.Original), twoWay, schema) + if err != nil { + t.Errorf("using %s in testcase %s: error applying two-way patch: %v", getSchemaType(schema), k, err) + return + } + if string(twoWayResult) != tc.ExpectedTwoWayResult { + t.Errorf("using %s in testcase %s: expected two-way result:\n\t%s\ngot\n\t%s", getSchemaType(schema), k, string(tc.ExpectedTwoWayResult), string(twoWayResult)) + return + } + }() + + func() { + threeWay, err := CreateThreeWayMergePatch([]byte(tc.Original), []byte(tc.Modified), []byte(tc.Current), schema, false) + if err != nil { + if len(tc.ExpectedThreeWayErr) == 0 { + t.Errorf("using %s in testcase %s: error making three-way patch: %v", getSchemaType(schema), k, err) + } else if !strings.Contains(err.Error(), tc.ExpectedThreeWayErr) { + t.Errorf("using %s in testcase %s: expected error making three-way patch to contain '%s', got %s", getSchemaType(schema), k, tc.ExpectedThreeWayErr, err) + } + return + } + + if string(threeWay) != tc.ExpectedThreeWay { + t.Errorf("using %s in testcase %s: expected three-way patch:\n\t%s\ngot\n\t%s", getSchemaType(schema), k, string(tc.ExpectedThreeWay), string(threeWay)) + return + } + + threeWayResult, err := StrategicMergePatch([]byte(tc.Current), threeWay, schema) + if err != nil { + t.Errorf("using %s in testcase %s: error applying three-way patch: %v", getSchemaType(schema), k, err) + return + } else if string(threeWayResult) != tc.ExpectedThreeWayResult { + t.Errorf("using %s in testcase %s: expected three-way result:\n\t%s\ngot\n\t%s", getSchemaType(schema), k, string(tc.ExpectedThreeWayResult), string(threeWayResult)) + return + } + }() + } + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/types.go b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/types.go new file mode 100644 index 00000000..f84d65aa --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/types.go @@ -0,0 +1,193 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package strategicpatch + +import ( + "errors" + "strings" + + "k8s.io/apimachinery/pkg/util/mergepatch" + openapi "k8s.io/kube-openapi/pkg/util/proto" +) + +const ( + patchStrategyOpenapiextensionKey = "x-kubernetes-patch-strategy" + patchMergeKeyOpenapiextensionKey = "x-kubernetes-patch-merge-key" +) + +type LookupPatchItem interface { + openapi.SchemaVisitor + + Error() error + Path() *openapi.Path +} + +type kindItem struct { + key string + path *openapi.Path + err error + patchmeta PatchMeta + subschema openapi.Schema + hasVisitKind bool +} + +func NewKindItem(key string, path *openapi.Path) *kindItem { + return &kindItem{ + key: key, + path: path, + } +} + +var _ LookupPatchItem = &kindItem{} + +func (item *kindItem) Error() error { + return item.err +} + +func (item *kindItem) Path() *openapi.Path { + return item.path +} + +func (item *kindItem) VisitPrimitive(schema *openapi.Primitive) { + item.err = errors.New("expected kind, but got primitive") +} + +func (item *kindItem) VisitArray(schema *openapi.Array) { + item.err = errors.New("expected kind, but got slice") +} + +func (item *kindItem) VisitMap(schema *openapi.Map) { + item.err = errors.New("expected kind, but got map") +} + +func (item *kindItem) VisitReference(schema openapi.Reference) { + if !item.hasVisitKind { + schema.SubSchema().Accept(item) + } +} + +func (item *kindItem) VisitKind(schema *openapi.Kind) { + subschema, ok := schema.Fields[item.key] + if !ok { + item.err = FieldNotFoundError{Path: schema.GetPath().String(), Field: item.key} + return + } + + mergeKey, patchStrategies, err := parsePatchMetadata(subschema.GetExtensions()) + if err != nil { + item.err = err + return + } + item.patchmeta = PatchMeta{ + patchStrategies: patchStrategies, + patchMergeKey: mergeKey, + } + item.subschema = subschema +} + +type sliceItem struct { + key string + path *openapi.Path + err error + patchmeta PatchMeta + subschema openapi.Schema + hasVisitKind bool +} + +func NewSliceItem(key string, path *openapi.Path) *sliceItem { + return &sliceItem{ + key: key, + path: path, + } +} + +var _ LookupPatchItem = &sliceItem{} + +func (item *sliceItem) Error() error { + return item.err +} + +func (item *sliceItem) Path() *openapi.Path { + return item.path +} + +func (item *sliceItem) VisitPrimitive(schema *openapi.Primitive) { + item.err = errors.New("expected slice, but got primitive") +} + +func (item *sliceItem) VisitArray(schema *openapi.Array) { + if !item.hasVisitKind { + item.err = errors.New("expected visit kind first, then visit array") + } + subschema := schema.SubType + item.subschema = subschema +} + +func (item *sliceItem) VisitMap(schema *openapi.Map) { + item.err = errors.New("expected slice, but got map") +} + +func (item *sliceItem) VisitReference(schema openapi.Reference) { + if !item.hasVisitKind { + schema.SubSchema().Accept(item) + } else { + item.subschema = schema.SubSchema() + } +} + +func (item *sliceItem) VisitKind(schema *openapi.Kind) { + subschema, ok := schema.Fields[item.key] + if !ok { + item.err = FieldNotFoundError{Path: schema.GetPath().String(), Field: item.key} + return + } + + mergeKey, patchStrategies, err := parsePatchMetadata(subschema.GetExtensions()) + if err != nil { + item.err = err + return + } + item.patchmeta = PatchMeta{ + patchStrategies: patchStrategies, + patchMergeKey: mergeKey, + } + item.hasVisitKind = true + subschema.Accept(item) +} + +func parsePatchMetadata(extensions map[string]interface{}) (string, []string, error) { + ps, foundPS := extensions[patchStrategyOpenapiextensionKey] + var patchStrategies []string + var mergeKey, patchStrategy string + var ok bool + if foundPS { + patchStrategy, ok = ps.(string) + if ok { + patchStrategies = strings.Split(patchStrategy, ",") + } else { + return "", nil, mergepatch.ErrBadArgType(patchStrategy, ps) + } + } + mk, foundMK := extensions[patchMergeKeyOpenapiextensionKey] + if foundMK { + mergeKey, ok = mk.(string) + if !ok { + return "", nil, mergepatch.ErrBadArgType(mergeKey, mk) + } + } + return mergeKey, patchStrategies, nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/BUILD b/vendor/k8s.io/apimachinery/pkg/util/validation/BUILD new file mode 100644 index 00000000..db599cbb --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/BUILD @@ -0,0 +1,37 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_test( + name = "go_default_test", + srcs = ["validation_test.go"], + embed = [":go_default_library"], + deps = ["//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library"], +) + +go_library( + name = "go_default_library", + srcs = ["validation.go"], + importpath = "k8s.io/apimachinery/pkg/util/validation", + deps = ["//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//staging/src/k8s.io/apimachinery/pkg/util/validation/field:all-srcs", + ], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/field/BUILD b/vendor/k8s.io/apimachinery/pkg/util/validation/field/BUILD new file mode 100644 index 00000000..fc59dd81 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/field/BUILD @@ -0,0 +1,42 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_test( + name = "go_default_test", + srcs = [ + "errors_test.go", + "path_test.go", + ], + embed = [":go_default_library"], +) + +go_library( + name = "go_default_library", + srcs = [ + "errors.go", + "path.go", + ], + importpath = "k8s.io/apimachinery/pkg/util/validation/field", + deps = [ + "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/BUILD b/vendor/k8s.io/apimachinery/pkg/util/wait/BUILD new file mode 100644 index 00000000..c062f381 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/BUILD @@ -0,0 +1,37 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_test( + name = "go_default_test", + srcs = ["wait_test.go"], + embed = [":go_default_library"], + deps = ["//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library"], +) + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "wait.go", + ], + importpath = "k8s.io/apimachinery/pkg/util/wait", + deps = ["//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go b/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go index a25e9246..0997de80 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go @@ -284,32 +284,12 @@ func PollImmediateInfinite(interval time.Duration, condition ConditionFunc) erro // PollUntil tries a condition func until it returns true, an error or stopCh is // closed. // -// PollUntil always waits interval before the first run of 'condition'. +// PolUntil always waits interval before the first run of 'condition'. // 'condition' will always be invoked at least once. func PollUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error { return WaitFor(poller(interval, 0), condition, stopCh) } -// PollImmediateUntil tries a condition func until it returns true, an error or stopCh is closed. -// -// PollImmediateUntil runs the 'condition' before waiting for the interval. -// 'condition' will always be invoked at least once. -func PollImmediateUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error { - done, err := condition() - if err != nil { - return err - } - if done { - return nil - } - select { - case <-stopCh: - return ErrWaitTimeout - default: - return PollUntil(interval, condition, stopCh) - } -} - // WaitFunc creates a channel that receives an item every time a test // should be executed and is closed when the last test should be invoked. type WaitFunc func(done <-chan struct{}) <-chan struct{} diff --git a/vendor/k8s.io/apimachinery/pkg/util/yaml/BUILD b/vendor/k8s.io/apimachinery/pkg/util/yaml/BUILD new file mode 100644 index 00000000..596ea292 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/yaml/BUILD @@ -0,0 +1,36 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_test( + name = "go_default_test", + srcs = ["decoder_test.go"], + embed = [":go_default_library"], +) + +go_library( + name = "go_default_library", + srcs = ["decoder.go"], + importpath = "k8s.io/apimachinery/pkg/util/yaml", + deps = [ + "//vendor/github.com/ghodss/yaml:go_default_library", + "//vendor/github.com/golang/glog:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/apimachinery/pkg/version/BUILD b/vendor/k8s.io/apimachinery/pkg/version/BUILD new file mode 100644 index 00000000..bdccf7b3 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/version/BUILD @@ -0,0 +1,28 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "types.go", + ], + importpath = "k8s.io/apimachinery/pkg/version", +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/apimachinery/pkg/version/helpers.go b/vendor/k8s.io/apimachinery/pkg/version/helpers.go deleted file mode 100644 index 5e041d6f..00000000 --- a/vendor/k8s.io/apimachinery/pkg/version/helpers.go +++ /dev/null @@ -1,88 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package version - -import ( - "regexp" - "strconv" - "strings" -) - -type versionType int - -const ( - // Bigger the version type number, higher priority it is - versionTypeAlpha versionType = iota - versionTypeBeta - versionTypeGA -) - -var kubeVersionRegex = regexp.MustCompile("^v([\\d]+)(?:(alpha|beta)([\\d]+))?$") - -func parseKubeVersion(v string) (majorVersion int, vType versionType, minorVersion int, ok bool) { - var err error - submatches := kubeVersionRegex.FindStringSubmatch(v) - if len(submatches) != 4 { - return 0, 0, 0, false - } - switch submatches[2] { - case "alpha": - vType = versionTypeAlpha - case "beta": - vType = versionTypeBeta - case "": - vType = versionTypeGA - default: - return 0, 0, 0, false - } - if majorVersion, err = strconv.Atoi(submatches[1]); err != nil { - return 0, 0, 0, false - } - if vType != versionTypeGA { - if minorVersion, err = strconv.Atoi(submatches[3]); err != nil { - return 0, 0, 0, false - } - } - return majorVersion, vType, minorVersion, true -} - -// CompareKubeAwareVersionStrings compares two kube-like version strings. -// Kube-like version strings are starting with a v, followed by a major version, optional "alpha" or "beta" strings -// followed by a minor version (e.g. v1, v2beta1). Versions will be sorted based on GA/alpha/beta first and then major -// and minor versions. e.g. v2, v1, v1beta2, v1beta1, v1alpha1. -func CompareKubeAwareVersionStrings(v1, v2 string) int { - if v1 == v2 { - return 0 - } - v1major, v1type, v1minor, ok1 := parseKubeVersion(v1) - v2major, v2type, v2minor, ok2 := parseKubeVersion(v2) - switch { - case !ok1 && !ok2: - return strings.Compare(v2, v1) - case !ok1 && ok2: - return -1 - case ok1 && !ok2: - return 1 - } - if v1type != v2type { - return int(v1type) - int(v2type) - } - if v1major != v2major { - return v1major - v2major - } - return v1minor - v2minor -} diff --git a/vendor/k8s.io/apimachinery/pkg/version/helpers_test.go b/vendor/k8s.io/apimachinery/pkg/version/helpers_test.go deleted file mode 100644 index 863a5369..00000000 --- a/vendor/k8s.io/apimachinery/pkg/version/helpers_test.go +++ /dev/null @@ -1,52 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package version - -import ( - "testing" -) - -func TestCompareKubeAwareVersionStrings(t *testing.T) { - tests := []*struct { - v1, v2 string - expectedGreater bool - }{ - {"v1", "v2", false}, - {"v2", "v1", true}, - {"v10", "v2", true}, - {"v1", "v2alpha1", true}, - {"v1", "v2beta1", true}, - {"v1alpha2", "v1alpha1", true}, - {"v1beta1", "v2alpha3", true}, - {"v1alpha10", "v1alpha2", true}, - {"v1beta10", "v1beta2", true}, - {"foo", "v1beta2", false}, - {"bar", "foo", true}, - {"version1", "version2", true}, // Non kube-like versions are sorted alphabetically - {"version1", "version10", true}, // Non kube-like versions are sorted alphabetically - } - - for _, tc := range tests { - if e, a := tc.expectedGreater, CompareKubeAwareVersionStrings(tc.v1, tc.v2) > 0; e != a { - if e { - t.Errorf("expected %s to be greater than %s", tc.v1, tc.v2) - } else { - t.Errorf("expected %s to be less than than %s", tc.v1, tc.v2) - } - } - } -} diff --git a/vendor/k8s.io/apimachinery/pkg/watch/BUILD b/vendor/k8s.io/apimachinery/pkg/watch/BUILD new file mode 100644 index 00000000..3106af8f --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/watch/BUILD @@ -0,0 +1,69 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "filter.go", + "mux.go", + "streamwatcher.go", + "until.go", + "watch.go", + "zz_generated.deepcopy.go", + ], + importpath = "k8s.io/apimachinery/pkg/watch", + deps = [ + "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", + ], +) + +go_test( + name = "go_default_xtest", + srcs = [ + "filter_test.go", + "mux_test.go", + "streamwatcher_test.go", + "watch_test.go", + ], + deps = [ + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["until_test.go"], + embed = [":go_default_library"], + deps = [ + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/apimachinery/pkg/watch/filter.go b/vendor/k8s.io/apimachinery/pkg/watch/filter.go index 22c9449f..3ca27f22 100644 --- a/vendor/k8s.io/apimachinery/pkg/watch/filter.go +++ b/vendor/k8s.io/apimachinery/pkg/watch/filter.go @@ -62,7 +62,11 @@ func (fw *filteredWatch) Stop() { // loop waits for new values, filters them, and resends them. func (fw *filteredWatch) loop() { defer close(fw.result) - for event := range fw.incoming.ResultChan() { + for { + event, ok := <-fw.incoming.ResultChan() + if !ok { + break + } filtered, keep := fw.f(event) if keep { fw.result <- filtered diff --git a/vendor/k8s.io/apimachinery/pkg/watch/mux.go b/vendor/k8s.io/apimachinery/pkg/watch/mux.go index 0ac8dc4e..a65088c1 100644 --- a/vendor/k8s.io/apimachinery/pkg/watch/mux.go +++ b/vendor/k8s.io/apimachinery/pkg/watch/mux.go @@ -204,7 +204,11 @@ func (m *Broadcaster) Shutdown() { func (m *Broadcaster) loop() { // Deliberately not catching crashes here. Yes, bring down the process if there's a // bug in watch.Broadcaster. - for event := range m.incoming { + for { + event, ok := <-m.incoming + if !ok { + break + } if event.Type == internalRunFunctionMarker { event.Object.(functionFakeRuntimeObject)() continue diff --git a/vendor/k8s.io/apimachinery/pkg/watch/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/watch/zz_generated.deepcopy.go index 0d266ffb..b1b19d11 100644 --- a/vendor/k8s.io/apimachinery/pkg/watch/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/watch/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/json/BUILD b/vendor/k8s.io/apimachinery/third_party/forked/golang/json/BUILD new file mode 100644 index 00000000..7ece664d --- /dev/null +++ b/vendor/k8s.io/apimachinery/third_party/forked/golang/json/BUILD @@ -0,0 +1,32 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_library( + name = "go_default_library", + srcs = ["fields.go"], + importpath = "k8s.io/apimachinery/third_party/forked/golang/json", +) + +go_test( + name = "go_default_test", + srcs = ["fields_test.go"], + embed = [":go_default_library"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/json/OWNERS b/vendor/k8s.io/apimachinery/third_party/forked/golang/json/OWNERS new file mode 100644 index 00000000..8e8d9fce --- /dev/null +++ b/vendor/k8s.io/apimachinery/third_party/forked/golang/json/OWNERS @@ -0,0 +1,5 @@ +approvers: +- pwittrock +reviewers: +- mengqiy +- apelisse diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/json/fields.go b/vendor/k8s.io/apimachinery/third_party/forked/golang/json/fields.go new file mode 100644 index 00000000..8205a4dd --- /dev/null +++ b/vendor/k8s.io/apimachinery/third_party/forked/golang/json/fields.go @@ -0,0 +1,513 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package json is forked from the Go standard library to enable us to find the +// field of a struct that a given JSON key maps to. +package json + +import ( + "bytes" + "fmt" + "reflect" + "sort" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +const ( + patchStrategyTagKey = "patchStrategy" + patchMergeKeyTagKey = "patchMergeKey" +) + +// Finds the patchStrategy and patchMergeKey struct tag fields on a given +// struct field given the struct type and the JSON name of the field. +// It returns field type, a slice of patch strategies, merge key and error. +// TODO: fix the returned errors to be introspectable. +func LookupPatchMetadataForStruct(t reflect.Type, jsonField string) ( + elemType reflect.Type, patchStrategies []string, patchMergeKey string, e error) { + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + + if t.Kind() != reflect.Struct { + e = fmt.Errorf("merging an object in json but data type is not struct, instead is: %s", + t.Kind().String()) + return + } + jf := []byte(jsonField) + // Find the field that the JSON library would use. + var f *field + fields := cachedTypeFields(t) + for i := range fields { + ff := &fields[i] + if bytes.Equal(ff.nameBytes, jf) { + f = ff + break + } + // Do case-insensitive comparison. + if f == nil && ff.equalFold(ff.nameBytes, jf) { + f = ff + } + } + if f != nil { + // Find the reflect.Value of the most preferential struct field. + tjf := t.Field(f.index[0]) + // we must navigate down all the anonymously included structs in the chain + for i := 1; i < len(f.index); i++ { + tjf = tjf.Type.Field(f.index[i]) + } + patchStrategy := tjf.Tag.Get(patchStrategyTagKey) + patchMergeKey = tjf.Tag.Get(patchMergeKeyTagKey) + patchStrategies = strings.Split(patchStrategy, ",") + elemType = tjf.Type + return + } + e = fmt.Errorf("unable to find api field in struct %s for the json field %q", t.Name(), jsonField) + return +} + +// A field represents a single field found in a struct. +type field struct { + name string + nameBytes []byte // []byte(name) + equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent + + tag bool + // index is the sequence of indexes from the containing type fields to this field. + // it is a slice because anonymous structs will need multiple navigation steps to correctly + // resolve the proper fields + index []int + typ reflect.Type + omitEmpty bool + quoted bool +} + +func (f field) String() string { + return fmt.Sprintf("{name: %s, type: %v, tag: %v, index: %v, omitEmpty: %v, quoted: %v}", f.name, f.typ, f.tag, f.index, f.omitEmpty, f.quoted) +} + +func fillField(f field) field { + f.nameBytes = []byte(f.name) + f.equalFold = foldFunc(f.nameBytes) + return f +} + +// byName sorts field by name, breaking ties with depth, +// then breaking ties with "name came from json tag", then +// breaking ties with index sequence. +type byName []field + +func (x byName) Len() int { return len(x) } + +func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byName) Less(i, j int) bool { + if x[i].name != x[j].name { + return x[i].name < x[j].name + } + if len(x[i].index) != len(x[j].index) { + return len(x[i].index) < len(x[j].index) + } + if x[i].tag != x[j].tag { + return x[i].tag + } + return byIndex(x).Less(i, j) +} + +// byIndex sorts field by index sequence. +type byIndex []field + +func (x byIndex) Len() int { return len(x) } + +func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byIndex) Less(i, j int) bool { + for k, xik := range x[i].index { + if k >= len(x[j].index) { + return false + } + if xik != x[j].index[k] { + return xik < x[j].index[k] + } + } + return len(x[i].index) < len(x[j].index) +} + +// typeFields returns a list of fields that JSON should recognize for the given type. +// The algorithm is breadth-first search over the set of structs to include - the top struct +// and then any reachable anonymous structs. +func typeFields(t reflect.Type) []field { + // Anonymous fields to explore at the current level and the next. + current := []field{} + next := []field{{typ: t}} + + // Count of queued names for current level and the next. + count := map[reflect.Type]int{} + nextCount := map[reflect.Type]int{} + + // Types already visited at an earlier level. + visited := map[reflect.Type]bool{} + + // Fields found. + var fields []field + + for len(next) > 0 { + current, next = next, current[:0] + count, nextCount = nextCount, map[reflect.Type]int{} + + for _, f := range current { + if visited[f.typ] { + continue + } + visited[f.typ] = true + + // Scan f.typ for fields to include. + for i := 0; i < f.typ.NumField(); i++ { + sf := f.typ.Field(i) + if sf.PkgPath != "" { // unexported + continue + } + tag := sf.Tag.Get("json") + if tag == "-" { + continue + } + name, opts := parseTag(tag) + if !isValidTag(name) { + name = "" + } + index := make([]int, len(f.index)+1) + copy(index, f.index) + index[len(f.index)] = i + + ft := sf.Type + if ft.Name() == "" && ft.Kind() == reflect.Ptr { + // Follow pointer. + ft = ft.Elem() + } + + // Record found field and index sequence. + if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { + tagged := name != "" + if name == "" { + name = sf.Name + } + fields = append(fields, fillField(field{ + name: name, + tag: tagged, + index: index, + typ: ft, + omitEmpty: opts.Contains("omitempty"), + quoted: opts.Contains("string"), + })) + if count[f.typ] > 1 { + // If there were multiple instances, add a second, + // so that the annihilation code will see a duplicate. + // It only cares about the distinction between 1 or 2, + // so don't bother generating any more copies. + fields = append(fields, fields[len(fields)-1]) + } + continue + } + + // Record new anonymous struct to explore in next round. + nextCount[ft]++ + if nextCount[ft] == 1 { + next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft})) + } + } + } + } + + sort.Sort(byName(fields)) + + // Delete all fields that are hidden by the Go rules for embedded fields, + // except that fields with JSON tags are promoted. + + // The fields are sorted in primary order of name, secondary order + // of field index length. Loop over names; for each name, delete + // hidden fields by choosing the one dominant field that survives. + out := fields[:0] + for advance, i := 0, 0; i < len(fields); i += advance { + // One iteration per name. + // Find the sequence of fields with the name of this first field. + fi := fields[i] + name := fi.name + for advance = 1; i+advance < len(fields); advance++ { + fj := fields[i+advance] + if fj.name != name { + break + } + } + if advance == 1 { // Only one field with this name + out = append(out, fi) + continue + } + dominant, ok := dominantField(fields[i : i+advance]) + if ok { + out = append(out, dominant) + } + } + + fields = out + sort.Sort(byIndex(fields)) + + return fields +} + +// dominantField looks through the fields, all of which are known to +// have the same name, to find the single field that dominates the +// others using Go's embedding rules, modified by the presence of +// JSON tags. If there are multiple top-level fields, the boolean +// will be false: This condition is an error in Go and we skip all +// the fields. +func dominantField(fields []field) (field, bool) { + // The fields are sorted in increasing index-length order. The winner + // must therefore be one with the shortest index length. Drop all + // longer entries, which is easy: just truncate the slice. + length := len(fields[0].index) + tagged := -1 // Index of first tagged field. + for i, f := range fields { + if len(f.index) > length { + fields = fields[:i] + break + } + if f.tag { + if tagged >= 0 { + // Multiple tagged fields at the same level: conflict. + // Return no field. + return field{}, false + } + tagged = i + } + } + if tagged >= 0 { + return fields[tagged], true + } + // All remaining fields have the same length. If there's more than one, + // we have a conflict (two fields named "X" at the same level) and we + // return no field. + if len(fields) > 1 { + return field{}, false + } + return fields[0], true +} + +var fieldCache struct { + sync.RWMutex + m map[reflect.Type][]field +} + +// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. +func cachedTypeFields(t reflect.Type) []field { + fieldCache.RLock() + f := fieldCache.m[t] + fieldCache.RUnlock() + if f != nil { + return f + } + + // Compute fields without lock. + // Might duplicate effort but won't hold other computations back. + f = typeFields(t) + if f == nil { + f = []field{} + } + + fieldCache.Lock() + if fieldCache.m == nil { + fieldCache.m = map[reflect.Type][]field{} + } + fieldCache.m[t] = f + fieldCache.Unlock() + return f +} + +func isValidTag(s string) bool { + if s == "" { + return false + } + for _, c := range s { + switch { + case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c): + // Backslash and quote chars are reserved, but + // otherwise any punctuation chars are allowed + // in a tag name. + default: + if !unicode.IsLetter(c) && !unicode.IsDigit(c) { + return false + } + } + } + return true +} + +const ( + caseMask = ^byte(0x20) // Mask to ignore case in ASCII. + kelvin = '\u212a' + smallLongEss = '\u017f' +) + +// foldFunc returns one of four different case folding equivalence +// functions, from most general (and slow) to fastest: +// +// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8 +// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S') +// 3) asciiEqualFold, no special, but includes non-letters (including _) +// 4) simpleLetterEqualFold, no specials, no non-letters. +// +// The letters S and K are special because they map to 3 runes, not just 2: +// * S maps to s and to U+017F 'Å¿' Latin small letter long s +// * k maps to K and to U+212A 'K' Kelvin sign +// See http://play.golang.org/p/tTxjOc0OGo +// +// The returned function is specialized for matching against s and +// should only be given s. It's not curried for performance reasons. +func foldFunc(s []byte) func(s, t []byte) bool { + nonLetter := false + special := false // special letter + for _, b := range s { + if b >= utf8.RuneSelf { + return bytes.EqualFold + } + upper := b & caseMask + if upper < 'A' || upper > 'Z' { + nonLetter = true + } else if upper == 'K' || upper == 'S' { + // See above for why these letters are special. + special = true + } + } + if special { + return equalFoldRight + } + if nonLetter { + return asciiEqualFold + } + return simpleLetterEqualFold +} + +// equalFoldRight is a specialization of bytes.EqualFold when s is +// known to be all ASCII (including punctuation), but contains an 's', +// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t. +// See comments on foldFunc. +func equalFoldRight(s, t []byte) bool { + for _, sb := range s { + if len(t) == 0 { + return false + } + tb := t[0] + if tb < utf8.RuneSelf { + if sb != tb { + sbUpper := sb & caseMask + if 'A' <= sbUpper && sbUpper <= 'Z' { + if sbUpper != tb&caseMask { + return false + } + } else { + return false + } + } + t = t[1:] + continue + } + // sb is ASCII and t is not. t must be either kelvin + // sign or long s; sb must be s, S, k, or K. + tr, size := utf8.DecodeRune(t) + switch sb { + case 's', 'S': + if tr != smallLongEss { + return false + } + case 'k', 'K': + if tr != kelvin { + return false + } + default: + return false + } + t = t[size:] + + } + if len(t) > 0 { + return false + } + return true +} + +// asciiEqualFold is a specialization of bytes.EqualFold for use when +// s is all ASCII (but may contain non-letters) and contains no +// special-folding letters. +// See comments on foldFunc. +func asciiEqualFold(s, t []byte) bool { + if len(s) != len(t) { + return false + } + for i, sb := range s { + tb := t[i] + if sb == tb { + continue + } + if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') { + if sb&caseMask != tb&caseMask { + return false + } + } else { + return false + } + } + return true +} + +// simpleLetterEqualFold is a specialization of bytes.EqualFold for +// use when s is all ASCII letters (no underscores, etc) and also +// doesn't contain 'k', 'K', 's', or 'S'. +// See comments on foldFunc. +func simpleLetterEqualFold(s, t []byte) bool { + if len(s) != len(t) { + return false + } + for i, b := range s { + if b&caseMask != t[i]&caseMask { + return false + } + } + return true +} + +// tagOptions is the string following a comma in a struct field's "json" +// tag, or the empty string. It does not include the leading comma. +type tagOptions string + +// parseTag splits a struct field's json tag into its name and +// comma-separated options. +func parseTag(tag string) (string, tagOptions) { + if idx := strings.Index(tag, ","); idx != -1 { + return tag[:idx], tagOptions(tag[idx+1:]) + } + return tag, tagOptions("") +} + +// Contains reports whether a comma-separated list of options +// contains a particular substr flag. substr must be surrounded by a +// string boundary or commas. +func (o tagOptions) Contains(optionName string) bool { + if len(o) == 0 { + return false + } + s := string(o) + for s != "" { + var next string + i := strings.Index(s, ",") + if i >= 0 { + s, next = s[:i], s[i+1:] + } + if s == optionName { + return true + } + s = next + } + return false +} diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/json/fields_test.go b/vendor/k8s.io/apimachinery/third_party/forked/golang/json/fields_test.go new file mode 100644 index 00000000..33b78bc4 --- /dev/null +++ b/vendor/k8s.io/apimachinery/third_party/forked/golang/json/fields_test.go @@ -0,0 +1,30 @@ +package json + +import ( + "reflect" + "testing" +) + +func TestLookupPtrToStruct(t *testing.T) { + type Elem struct { + Key string + Value string + } + type Outer struct { + Inner []Elem `json:"inner" patchStrategy:"merge" patchMergeKey:"key"` + } + outer := &Outer{} + elemType, patchStrategies, patchMergeKey, err := LookupPatchMetadataForStruct(reflect.TypeOf(outer), "inner") + if err != nil { + t.Fatal(err) + } + if elemType != reflect.TypeOf([]Elem{}) { + t.Errorf("elemType = %v, want: %v", elemType, reflect.TypeOf([]Elem{})) + } + if !reflect.DeepEqual(patchStrategies, []string{"merge"}) { + t.Errorf("patchStrategies = %v, want: %v", patchStrategies, []string{"merge"}) + } + if patchMergeKey != "key" { + t.Errorf("patchMergeKey = %v, want: %v", patchMergeKey, "key") + } +} diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/BUILD b/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/BUILD new file mode 100644 index 00000000..22c8ec04 --- /dev/null +++ b/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/BUILD @@ -0,0 +1,32 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_test( + name = "go_default_test", + srcs = ["deep_equal_test.go"], + embed = [":go_default_library"], +) + +go_library( + name = "go_default_library", + srcs = ["deep_equal.go"], + importpath = "k8s.io/apimachinery/third_party/forked/golang/reflect", +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/deep_equal.go b/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/deep_equal.go index 7ed1d1cf..9e45dbe1 100644 --- a/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/deep_equal.go +++ b/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/deep_equal.go @@ -44,7 +44,7 @@ func (e Equalities) AddFunc(eqFunc interface{}) error { return fmt.Errorf("expected func, got: %v", ft) } if ft.NumIn() != 2 { - return fmt.Errorf("expected two 'in' params, got: %v", ft) + return fmt.Errorf("expected three 'in' params, got: %v", ft) } if ft.NumOut() != 1 { return fmt.Errorf("expected one 'out' param, got: %v", ft) diff --git a/vendor/k8s.io/apiserver/.import-restrictions b/vendor/k8s.io/apiserver/.import-restrictions new file mode 100644 index 00000000..c80e742d --- /dev/null +++ b/vendor/k8s.io/apiserver/.import-restrictions @@ -0,0 +1,10 @@ +{ + "Rules": [ + { + "SelectorRegexp": "k8s[.]io/kubernetes", + "ForbiddenPrefixes": [ + "" + ] + } + ] +} diff --git a/vendor/k8s.io/apiserver/CONTRIBUTING.md b/vendor/k8s.io/apiserver/CONTRIBUTING.md new file mode 100644 index 00000000..94a28899 --- /dev/null +++ b/vendor/k8s.io/apiserver/CONTRIBUTING.md @@ -0,0 +1,7 @@ +# Contributing guidelines + +Do not open pull requests directly against this repository, they will be ignored. Instead, please open pull requests against [kubernetes/kubernetes](https://git.k8s.io/kubernetes/). Please follow the same [contributing guide](https://git.k8s.io/kubernetes/CONTRIBUTING.md) you would follow for any other pull request made to kubernetes/kubernetes. + +This repository is published from [kubernetes/kubernetes/staging/src/k8s.io/apiserver](https://git.k8s.io/kubernetes/staging/src/k8s.io/apiserver) by the [kubernetes publishing-bot](https://git.k8s.io/publishing-bot). + +Please see [Staging Directory and Publishing](https://git.k8s.io/community/contributors/devel/staging.md) for more information diff --git a/vendor/k8s.io/apiserver/LICENSE b/vendor/k8s.io/apiserver/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/k8s.io/apiserver/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/apiserver/OWNERS b/vendor/k8s.io/apiserver/OWNERS new file mode 100644 index 00000000..a77fd665 --- /dev/null +++ b/vendor/k8s.io/apiserver/OWNERS @@ -0,0 +1,19 @@ +approvers: +- lavalamp +- smarterclayton +- deads2k +- sttts +- liggitt +reviewers: +- lavalamp +- smarterclayton +- wojtek-t +- deads2k +- caesarxuchao +- liggitt +- sttts +- ncdc +- tallclair +- enj +- hzxuzhonghu +- CaoShuFeng diff --git a/vendor/k8s.io/apiserver/README.md b/vendor/k8s.io/apiserver/README.md new file mode 100644 index 00000000..130ba87d --- /dev/null +++ b/vendor/k8s.io/apiserver/README.md @@ -0,0 +1,30 @@ +# apiserver + +Generic library for building a Kubernetes aggregated API server. + + +## Purpose + +This library contains code to create Kubernetes aggregation server complete with delegated authentication and authorization, +`kubectl` compatible discovery information, optional admission chain, and versioned types. It's first consumers are +`k8s.io/kubernetes`, `k8s.io/kube-aggregator`, and `github.com/kubernetes-incubator/service-catalog`. + + +## Compatibility + +There are *NO compatibility guarantees* for this repository, yet. It is in direct support of Kubernetes, so branches +will track Kubernetes and be compatible with that repo. As we more cleanly separate the layers, we will review the +compatibility guarantee. We have a goal to make this easier to use in 2017. + + +## Where does it come from? + +`apiserver` is synced from https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apiserver. +Code changes are made in that location, merged into `k8s.io/kubernetes` and later synced here. + + +## Things you should *NOT* do + + 1. Directly modify any files under `pkg` in this repo. Those are driven from `k8s.io/kubernetes/staging/src/k8s.io/apiserver`. + 2. Expect compatibility. This repo is changing quickly in direct support of + Kubernetes and the API isn't yet stable enough for API guarantees. diff --git a/vendor/k8s.io/apiserver/SECURITY_CONTACTS b/vendor/k8s.io/apiserver/SECURITY_CONTACTS new file mode 100644 index 00000000..0648a8eb --- /dev/null +++ b/vendor/k8s.io/apiserver/SECURITY_CONTACTS @@ -0,0 +1,17 @@ +# Defined below are the security contacts for this repo. +# +# They are the contact point for the Product Security Team to reach out +# to for triaging and handling of incoming issues. +# +# The below names agree to abide by the +# [Embargo Policy](https://github.com/kubernetes/sig-release/blob/master/security-release-process-documentation/security-release-process.md#embargo-policy) +# and will be removed and replaced if they violate that agreement. +# +# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE +# INSTRUCTIONS AT https://kubernetes.io/security/ + +cjcullen +jessfraz +liggitt +philips +tallclair diff --git a/vendor/k8s.io/apiserver/code-of-conduct.md b/vendor/k8s.io/apiserver/code-of-conduct.md new file mode 100644 index 00000000..0d15c00c --- /dev/null +++ b/vendor/k8s.io/apiserver/code-of-conduct.md @@ -0,0 +1,3 @@ +# Kubernetes Community Code of Conduct + +Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md) diff --git a/vendor/k8s.io/apiserver/pkg/features/OWNERS b/vendor/k8s.io/apiserver/pkg/features/OWNERS new file mode 100644 index 00000000..fe7b0144 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/features/OWNERS @@ -0,0 +1,2 @@ +approvers: +- feature-approvers diff --git a/vendor/k8s.io/apiserver/pkg/features/kube_features.go b/vendor/k8s.io/apiserver/pkg/features/kube_features.go new file mode 100644 index 00000000..57bab8b0 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/features/kube_features.go @@ -0,0 +1,81 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package features + +import ( + utilfeature "k8s.io/apiserver/pkg/util/feature" +) + +const ( + // Every feature gate should add method here following this template: + // + // // owner: @username + // // alpha: v1.4 + // MyFeature() bool + + // owner: @tallclair + // alpha: v1.5 + // + // StreamingProxyRedirects controls whether the apiserver should intercept (and follow) + // redirects from the backend (Kubelet) for streaming requests (exec/attach/port-forward). + StreamingProxyRedirects utilfeature.Feature = "StreamingProxyRedirects" + + // owner: @tallclair + // alpha: v1.7 + // beta: v1.8 + // + // AdvancedAuditing enables a much more general API auditing pipeline, which includes support for + // pluggable output backends and an audit policy specifying how different requests should be + // audited. + AdvancedAuditing utilfeature.Feature = "AdvancedAuditing" + + // owner: @ilackams + // alpha: v1.7 + // + // Enables compression of REST responses (GET and LIST only) + APIResponseCompression utilfeature.Feature = "APIResponseCompression" + + // owner: @smarterclayton + // alpha: v1.7 + // + // Allow asynchronous coordination of object creation. + // Auto-enabled by the Initializers admission plugin. + Initializers utilfeature.Feature = "Initializers" + + // owner: @smarterclayton + // alpha: v1.8 + // beta: v1.9 + // + // Allow API clients to retrieve resource lists in chunks rather than + // all at once. + APIListChunking utilfeature.Feature = "APIListChunking" +) + +func init() { + utilfeature.DefaultFeatureGate.Add(defaultKubernetesFeatureGates) +} + +// defaultKubernetesFeatureGates consists of all known Kubernetes-specific feature keys. +// To add a new feature, define a key for it above and add it here. The features will be +// available throughout Kubernetes binaries. +var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureSpec{ + StreamingProxyRedirects: {Default: true, PreRelease: utilfeature.Beta}, + AdvancedAuditing: {Default: true, PreRelease: utilfeature.Beta}, + APIResponseCompression: {Default: false, PreRelease: utilfeature.Alpha}, + Initializers: {Default: false, PreRelease: utilfeature.Alpha}, + APIListChunking: {Default: true, PreRelease: utilfeature.Beta}, +} diff --git a/vendor/k8s.io/apiserver/pkg/util/feature/feature_gate.go b/vendor/k8s.io/apiserver/pkg/util/feature/feature_gate.go new file mode 100644 index 00000000..fe35adc6 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/feature/feature_gate.go @@ -0,0 +1,347 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package feature + +import ( + "fmt" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + + "github.com/golang/glog" + "github.com/spf13/pflag" +) + +type Feature string + +const ( + flagName = "feature-gates" + + // allAlphaGate is a global toggle for alpha features. Per-feature key + // values override the default set by allAlphaGate. Examples: + // AllAlpha=false,NewFeature=true will result in newFeature=true + // AllAlpha=true,NewFeature=false will result in newFeature=false + allAlphaGate Feature = "AllAlpha" +) + +var ( + // The generic features. + defaultFeatures = map[Feature]FeatureSpec{ + allAlphaGate: {Default: false, PreRelease: Alpha}, + } + + // Special handling for a few gates. + specialFeatures = map[Feature]func(known map[Feature]FeatureSpec, enabled map[Feature]bool, val bool){ + allAlphaGate: setUnsetAlphaGates, + } + + // DefaultFeatureGate is a shared global FeatureGate. + DefaultFeatureGate FeatureGate = NewFeatureGate() +) + +type FeatureSpec struct { + Default bool + PreRelease prerelease +} + +type prerelease string + +const ( + // Values for PreRelease. + Alpha = prerelease("ALPHA") + Beta = prerelease("BETA") + GA = prerelease("") + + // Deprecated + Deprecated = prerelease("DEPRECATED") +) + +// FeatureGate parses and stores flag gates for known features from +// a string like feature1=true,feature2=false,... +type FeatureGate interface { + // AddFlag adds a flag for setting global feature gates to the specified FlagSet. + AddFlag(fs *pflag.FlagSet) + // Set parses and stores flag gates for known features + // from a string like feature1=true,feature2=false,... + Set(value string) error + // SetFromMap stores flag gates for known features from a map[string]bool or returns an error + SetFromMap(m map[string]bool) error + // Enabled returns true if the key is enabled. + Enabled(key Feature) bool + // Add adds features to the featureGate. + Add(features map[Feature]FeatureSpec) error + // KnownFeatures returns a slice of strings describing the FeatureGate's known features. + KnownFeatures() []string + // DeepCopy returns a deep copy of the FeatureGate object, such that gates can be + // set on the copy without mutating the original. This is useful for validating + // config against potential feature gate changes before committing those changes. + DeepCopy() FeatureGate +} + +// featureGate implements FeatureGate as well as pflag.Value for flag parsing. +type featureGate struct { + special map[Feature]func(map[Feature]FeatureSpec, map[Feature]bool, bool) + + // lock guards writes to known, enabled, and reads/writes of closed + lock sync.Mutex + // known holds a map[Feature]FeatureSpec + known *atomic.Value + // enabled holds a map[Feature]bool + enabled *atomic.Value + // closed is set to true when AddFlag is called, and prevents subsequent calls to Add + closed bool +} + +func setUnsetAlphaGates(known map[Feature]FeatureSpec, enabled map[Feature]bool, val bool) { + for k, v := range known { + if v.PreRelease == Alpha { + if _, found := enabled[k]; !found { + enabled[k] = val + } + } + } +} + +// Set, String, and Type implement pflag.Value +var _ pflag.Value = &featureGate{} + +func NewFeatureGate() *featureGate { + known := map[Feature]FeatureSpec{} + for k, v := range defaultFeatures { + known[k] = v + } + + knownValue := &atomic.Value{} + knownValue.Store(known) + + enabled := map[Feature]bool{} + enabledValue := &atomic.Value{} + enabledValue.Store(enabled) + + f := &featureGate{ + known: knownValue, + special: specialFeatures, + enabled: enabledValue, + } + return f +} + +// Set parses a string of the form "key1=value1,key2=value2,..." into a +// map[string]bool of known keys or returns an error. +func (f *featureGate) Set(value string) error { + f.lock.Lock() + defer f.lock.Unlock() + + // Copy existing state + known := map[Feature]FeatureSpec{} + for k, v := range f.known.Load().(map[Feature]FeatureSpec) { + known[k] = v + } + enabled := map[Feature]bool{} + for k, v := range f.enabled.Load().(map[Feature]bool) { + enabled[k] = v + } + + for _, s := range strings.Split(value, ",") { + if len(s) == 0 { + continue + } + arr := strings.SplitN(s, "=", 2) + k := Feature(strings.TrimSpace(arr[0])) + featureSpec, ok := known[k] + if !ok { + return fmt.Errorf("unrecognized key: %s", k) + } + if len(arr) != 2 { + return fmt.Errorf("missing bool value for %s", k) + } + v := strings.TrimSpace(arr[1]) + boolValue, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("invalid value of %s: %s, err: %v", k, v, err) + } + enabled[k] = boolValue + if boolValue && featureSpec.PreRelease == Deprecated { + glog.Warningf("enabling deprecated feature gate %s", k) + } + + // Handle "special" features like "all alpha gates" + if fn, found := f.special[k]; found { + fn(known, enabled, boolValue) + } + } + + // Persist changes + f.known.Store(known) + f.enabled.Store(enabled) + + glog.Infof("feature gates: %v", enabled) + return nil +} + +// SetFromMap stores flag gates for known features from a map[string]bool or returns an error +func (f *featureGate) SetFromMap(m map[string]bool) error { + f.lock.Lock() + defer f.lock.Unlock() + + // Copy existing state + known := map[Feature]FeatureSpec{} + for k, v := range f.known.Load().(map[Feature]FeatureSpec) { + known[k] = v + } + enabled := map[Feature]bool{} + for k, v := range f.enabled.Load().(map[Feature]bool) { + enabled[k] = v + } + + for k, v := range m { + k := Feature(k) + _, ok := known[k] + if !ok { + return fmt.Errorf("unrecognized key: %s", k) + } + enabled[k] = v + // Handle "special" features like "all alpha gates" + if fn, found := f.special[k]; found { + fn(known, enabled, v) + } + } + + // Persist changes + f.known.Store(known) + f.enabled.Store(enabled) + + glog.Infof("feature gates: %v", f.enabled) + return nil +} + +// String returns a string containing all enabled feature gates, formatted as "key1=value1,key2=value2,...". +func (f *featureGate) String() string { + pairs := []string{} + for k, v := range f.enabled.Load().(map[Feature]bool) { + pairs = append(pairs, fmt.Sprintf("%s=%t", k, v)) + } + sort.Strings(pairs) + return strings.Join(pairs, ",") +} + +func (f *featureGate) Type() string { + return "mapStringBool" +} + +// Add adds features to the featureGate. +func (f *featureGate) Add(features map[Feature]FeatureSpec) error { + f.lock.Lock() + defer f.lock.Unlock() + + if f.closed { + return fmt.Errorf("cannot add a feature gate after adding it to the flag set") + } + + // Copy existing state + known := map[Feature]FeatureSpec{} + for k, v := range f.known.Load().(map[Feature]FeatureSpec) { + known[k] = v + } + + for name, spec := range features { + if existingSpec, found := known[name]; found { + if existingSpec == spec { + continue + } + return fmt.Errorf("feature gate %q with different spec already exists: %v", name, existingSpec) + } + + known[name] = spec + } + + // Persist updated state + f.known.Store(known) + + return nil +} + +// Enabled returns true if the key is enabled. +func (f *featureGate) Enabled(key Feature) bool { + if v, ok := f.enabled.Load().(map[Feature]bool)[key]; ok { + return v + } + return f.known.Load().(map[Feature]FeatureSpec)[key].Default +} + +// AddFlag adds a flag for setting global feature gates to the specified FlagSet. +func (f *featureGate) AddFlag(fs *pflag.FlagSet) { + f.lock.Lock() + // TODO(mtaufen): Shouldn't we just close it on the first Set/SetFromMap instead? + // Not all components expose a feature gates flag using this AddFlag method, and + // in the future, all components will completely stop exposing a feature gates flag, + // in favor of componentconfig. + f.closed = true + f.lock.Unlock() + + known := f.KnownFeatures() + fs.Var(f, flagName, ""+ + "A set of key=value pairs that describe feature gates for alpha/experimental features. "+ + "Options are:\n"+strings.Join(known, "\n")) +} + +// KnownFeatures returns a slice of strings describing the FeatureGate's known features. +func (f *featureGate) KnownFeatures() []string { + var known []string + for k, v := range f.known.Load().(map[Feature]FeatureSpec) { + pre := "" + if v.PreRelease != GA { + pre = fmt.Sprintf("%s - ", v.PreRelease) + } + known = append(known, fmt.Sprintf("%s=true|false (%sdefault=%t)", k, pre, v.Default)) + } + sort.Strings(known) + return known +} + +// DeepCopy returns a deep copy of the FeatureGate object, such that gates can be +// set on the copy without mutating the original. This is useful for validating +// config against potential feature gate changes before committing those changes. +func (f *featureGate) DeepCopy() FeatureGate { + // Copy existing state. + known := map[Feature]FeatureSpec{} + for k, v := range f.known.Load().(map[Feature]FeatureSpec) { + known[k] = v + } + enabled := map[Feature]bool{} + for k, v := range f.enabled.Load().(map[Feature]bool) { + enabled[k] = v + } + + // Store copied state in new atomics. + knownValue := &atomic.Value{} + knownValue.Store(known) + enabledValue := &atomic.Value{} + enabledValue.Store(enabled) + + // Construct a new featureGate around the copied state. + // Note that specialFeatures is treated as immutable by convention, + // and we maintain the value of f.closed across the copy. + return &featureGate{ + special: specialFeatures, + known: knownValue, + enabled: enabledValue, + closed: f.closed, + } +} diff --git a/vendor/k8s.io/apiserver/pkg/util/feature/feature_gate_test.go b/vendor/k8s.io/apiserver/pkg/util/feature/feature_gate_test.go new file mode 100644 index 00000000..f26f3cb8 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/feature/feature_gate_test.go @@ -0,0 +1,320 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package feature + +import ( + "fmt" + "strings" + "testing" + + "github.com/spf13/pflag" +) + +func TestFeatureGateFlag(t *testing.T) { + // gates for testing + const testAlphaGate Feature = "TestAlpha" + const testBetaGate Feature = "TestBeta" + + tests := []struct { + arg string + expect map[Feature]bool + parseError string + }{ + { + arg: "", + expect: map[Feature]bool{ + allAlphaGate: false, + testAlphaGate: false, + testBetaGate: false, + }, + }, + { + arg: "fooBarBaz=maybeidk", + expect: map[Feature]bool{ + allAlphaGate: false, + testAlphaGate: false, + testBetaGate: false, + }, + parseError: "unrecognized key: fooBarBaz", + }, + { + arg: "AllAlpha=false", + expect: map[Feature]bool{ + allAlphaGate: false, + testAlphaGate: false, + testBetaGate: false, + }, + }, + { + arg: "AllAlpha=true", + expect: map[Feature]bool{ + allAlphaGate: true, + testAlphaGate: true, + testBetaGate: false, + }, + }, + { + arg: "AllAlpha=banana", + expect: map[Feature]bool{ + allAlphaGate: false, + testAlphaGate: false, + testBetaGate: false, + }, + parseError: "invalid value of AllAlpha", + }, + { + arg: "AllAlpha=false,TestAlpha=true", + expect: map[Feature]bool{ + allAlphaGate: false, + testAlphaGate: true, + testBetaGate: false, + }, + }, + { + arg: "TestAlpha=true,AllAlpha=false", + expect: map[Feature]bool{ + allAlphaGate: false, + testAlphaGate: true, + testBetaGate: false, + }, + }, + { + arg: "AllAlpha=true,TestAlpha=false", + expect: map[Feature]bool{ + allAlphaGate: true, + testAlphaGate: false, + testBetaGate: false, + }, + }, + { + arg: "TestAlpha=false,AllAlpha=true", + expect: map[Feature]bool{ + allAlphaGate: true, + testAlphaGate: false, + testBetaGate: false, + }, + }, + { + arg: "TestBeta=true,AllAlpha=false", + expect: map[Feature]bool{ + allAlphaGate: false, + testAlphaGate: false, + testBetaGate: true, + }, + }, + } + for i, test := range tests { + fs := pflag.NewFlagSet("testfeaturegateflag", pflag.ContinueOnError) + f := NewFeatureGate() + f.Add(map[Feature]FeatureSpec{ + testAlphaGate: {Default: false, PreRelease: Alpha}, + testBetaGate: {Default: false, PreRelease: Beta}, + }) + f.AddFlag(fs) + + err := fs.Parse([]string{fmt.Sprintf("--%s=%s", flagName, test.arg)}) + if test.parseError != "" { + if !strings.Contains(err.Error(), test.parseError) { + t.Errorf("%d: Parse() Expected %v, Got %v", i, test.parseError, err) + } + } else if err != nil { + t.Errorf("%d: Parse() Expected nil, Got %v", i, err) + } + for k, v := range test.expect { + if actual := f.enabled.Load().(map[Feature]bool)[k]; actual != v { + t.Errorf("%d: expected %s=%v, Got %v", i, k, v, actual) + } + } + } +} + +func TestFeatureGateOverride(t *testing.T) { + const testAlphaGate Feature = "TestAlpha" + const testBetaGate Feature = "TestBeta" + + // Don't parse the flag, assert defaults are used. + var f FeatureGate = NewFeatureGate() + f.Add(map[Feature]FeatureSpec{ + testAlphaGate: {Default: false, PreRelease: Alpha}, + testBetaGate: {Default: false, PreRelease: Beta}, + }) + + f.Set("TestAlpha=true,TestBeta=true") + if f.Enabled(testAlphaGate) != true { + t.Errorf("Expected true") + } + if f.Enabled(testBetaGate) != true { + t.Errorf("Expected true") + } + + f.Set("TestAlpha=false") + if f.Enabled(testAlphaGate) != false { + t.Errorf("Expected false") + } + if f.Enabled(testBetaGate) != true { + t.Errorf("Expected true") + } +} + +func TestFeatureGateFlagDefaults(t *testing.T) { + // gates for testing + const testAlphaGate Feature = "TestAlpha" + const testBetaGate Feature = "TestBeta" + + // Don't parse the flag, assert defaults are used. + var f FeatureGate = NewFeatureGate() + f.Add(map[Feature]FeatureSpec{ + testAlphaGate: {Default: false, PreRelease: Alpha}, + testBetaGate: {Default: true, PreRelease: Beta}, + }) + + if f.Enabled(testAlphaGate) != false { + t.Errorf("Expected false") + } + if f.Enabled(testBetaGate) != true { + t.Errorf("Expected true") + } +} + +func TestFeatureGateSetFromMap(t *testing.T) { + // gates for testing + const testAlphaGate Feature = "TestAlpha" + const testBetaGate Feature = "TestBeta" + + tests := []struct { + name string + setmap map[string]bool + expect map[Feature]bool + setmapError string + }{ + { + name: "set TestAlpha and TestBeta true", + setmap: map[string]bool{ + "TestAlpha": true, + "TestBeta": true, + }, + expect: map[Feature]bool{ + testAlphaGate: true, + testBetaGate: true, + }, + }, + { + name: "set TestBeta true", + setmap: map[string]bool{ + "TestBeta": true, + }, + expect: map[Feature]bool{ + testAlphaGate: false, + testBetaGate: true, + }, + }, + { + name: "set TestAlpha false", + setmap: map[string]bool{ + "TestAlpha": false, + }, + expect: map[Feature]bool{ + testAlphaGate: false, + testBetaGate: false, + }, + }, + { + name: "set TestInvaild true", + setmap: map[string]bool{ + "TestInvaild": true, + }, + expect: map[Feature]bool{ + testAlphaGate: false, + testBetaGate: false, + }, + setmapError: "unrecognized key:", + }, + } + for i, test := range tests { + t.Run(fmt.Sprintf("SetFromMap %s", test.name), func(t *testing.T) { + f := NewFeatureGate() + f.Add(map[Feature]FeatureSpec{ + testAlphaGate: {Default: false, PreRelease: Alpha}, + testBetaGate: {Default: false, PreRelease: Beta}, + }) + err := f.SetFromMap(test.setmap) + if test.setmapError != "" { + if !strings.Contains(err.Error(), test.setmapError) { + t.Errorf("%d: SetFromMap(%#v) Expected err:%v, Got err:%v", i, test.setmap, test.setmapError, err) + } + } else if err != nil { + t.Errorf("%d: SetFromMap(%#v) Expected success, Got err:%v", i, test.setmap, err) + } + for k, v := range test.expect { + if actual := f.Enabled(k); actual != v { + t.Errorf("%d: SetFromMap(%#v) Expected %s=%v, Got %s=%v", i, test.setmap, k, v, k, actual) + } + } + }) + } +} + +func TestFeatureGateString(t *testing.T) { + // gates for testing + const testAlphaGate Feature = "TestAlpha" + const testBetaGate Feature = "TestBeta" + const testGAGate Feature = "TestGA" + + featuremap := map[Feature]FeatureSpec{ + testGAGate: {Default: true, PreRelease: GA}, + testAlphaGate: {Default: false, PreRelease: Alpha}, + testBetaGate: {Default: true, PreRelease: Beta}, + } + + tests := []struct { + setmap map[string]bool + expect string + }{ + { + setmap: map[string]bool{ + "TestAlpha": false, + }, + expect: "TestAlpha=false", + }, + { + setmap: map[string]bool{ + "TestAlpha": false, + "TestBeta": true, + }, + expect: "TestAlpha=false,TestBeta=true", + }, + { + setmap: map[string]bool{ + "TestGA": true, + "TestAlpha": false, + "TestBeta": true, + }, + expect: "TestAlpha=false,TestBeta=true,TestGA=true", + }, + } + for i, test := range tests { + t.Run(fmt.Sprintf("SetFromMap %s", test.expect), func(t *testing.T) { + f := NewFeatureGate() + f.Add(featuremap) + f.SetFromMap(test.setmap) + result := f.String() + if result != test.expect { + t.Errorf("%d: SetFromMap(%#v) Expected %s, Got %s", i, test.setmap, test.expect, result) + } + }) + } +} diff --git a/vendor/k8s.io/kube-openapi/.gitignore b/vendor/k8s.io/kube-openapi/.gitignore new file mode 100644 index 00000000..e3604777 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/.gitignore @@ -0,0 +1,20 @@ +# Binaries for programs and plugins +*.exe +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 +.glide/ + +# Intellij IDEA files +.idea/ +*.iml +.vscode + diff --git a/vendor/k8s.io/kube-openapi/.travis.yml b/vendor/k8s.io/kube-openapi/.travis.yml new file mode 100644 index 00000000..996a4df0 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/.travis.yml @@ -0,0 +1,4 @@ +language: go +go_import_path: k8s.io/kube-openapi +script: go test ./pkg/... + diff --git a/vendor/k8s.io/kube-openapi/LICENSE b/vendor/k8s.io/kube-openapi/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/kube-openapi/OWNERS b/vendor/k8s.io/kube-openapi/OWNERS new file mode 100755 index 00000000..e8c62bfa --- /dev/null +++ b/vendor/k8s.io/kube-openapi/OWNERS @@ -0,0 +1,11 @@ +reviewers: +- yujuhong +- gmarek +- mbohlool +- philips +- seans3 +- apelisse +approvers: +- mbohlool +- lavalamp +- seans3 diff --git a/vendor/k8s.io/kube-openapi/README.md b/vendor/k8s.io/kube-openapi/README.md new file mode 100644 index 00000000..babadde1 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/README.md @@ -0,0 +1,14 @@ +# Kube OpenAPI + +This repo is the home for Kubernetes OpenAPI discovery spec generation. The goal +is to support a subset of OpenAPI features to satisfy kubernetes use-cases but +implement that subset with little to no assumption about the structure of the +code or routes. Thus, there should be no kubernetes specific code in this repo. + + +There are two main parts: + - A model generator that goes through .go files, find and generate model +definitions. + - The spec generator that is responsible for dynamically generate +the final OpenAPI spec using web service routes or combining other +OpenAPI/Json specs. diff --git a/vendor/k8s.io/kube-openapi/code-of-conduct.md b/vendor/k8s.io/kube-openapi/code-of-conduct.md new file mode 100644 index 00000000..0d15c00c --- /dev/null +++ b/vendor/k8s.io/kube-openapi/code-of-conduct.md @@ -0,0 +1,3 @@ +# Kubernetes Community Code of Conduct + +Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md) diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/doc.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/doc.go new file mode 100644 index 00000000..11ed8a6b --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package proto is a collection of libraries for parsing and indexing the type definitions. +// The openapi spec contains the object model definitions and extensions metadata. +package proto diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go new file mode 100644 index 00000000..a57dcd36 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go @@ -0,0 +1,299 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package proto + +import ( + "fmt" + "sort" + "strings" + + "github.com/googleapis/gnostic/OpenAPIv2" + "gopkg.in/yaml.v2" +) + +func newSchemaError(path *Path, format string, a ...interface{}) error { + err := fmt.Sprintf(format, a...) + if path.Len() == 0 { + return fmt.Errorf("SchemaError: %v", err) + } + return fmt.Errorf("SchemaError(%v): %v", path, err) +} + +// VendorExtensionToMap converts openapi VendorExtension to a map. +func VendorExtensionToMap(e []*openapi_v2.NamedAny) map[string]interface{} { + values := map[string]interface{}{} + + for _, na := range e { + if na.GetName() == "" || na.GetValue() == nil { + continue + } + if na.GetValue().GetYaml() == "" { + continue + } + var value interface{} + err := yaml.Unmarshal([]byte(na.GetValue().GetYaml()), &value) + if err != nil { + continue + } + + values[na.GetName()] = value + } + + return values +} + +// Definitions is an implementation of `Models`. It looks for +// models in an openapi Schema. +type Definitions struct { + models map[string]Schema +} + +var _ Models = &Definitions{} + +// NewOpenAPIData creates a new `Models` out of the openapi document. +func NewOpenAPIData(doc *openapi_v2.Document) (Models, error) { + definitions := Definitions{ + models: map[string]Schema{}, + } + + // Save the list of all models first. This will allow us to + // validate that we don't have any dangling reference. + for _, namedSchema := range doc.GetDefinitions().GetAdditionalProperties() { + definitions.models[namedSchema.GetName()] = nil + } + + // Now, parse each model. We can validate that references exists. + for _, namedSchema := range doc.GetDefinitions().GetAdditionalProperties() { + path := NewPath(namedSchema.GetName()) + schema, err := definitions.ParseSchema(namedSchema.GetValue(), &path) + if err != nil { + return nil, err + } + definitions.models[namedSchema.GetName()] = schema + } + + return &definitions, nil +} + +// We believe the schema is a reference, verify that and returns a new +// Schema +func (d *Definitions) parseReference(s *openapi_v2.Schema, path *Path) (Schema, error) { + if len(s.GetProperties().GetAdditionalProperties()) > 0 { + return nil, newSchemaError(path, "unallowed embedded type definition") + } + if len(s.GetType().GetValue()) > 0 { + return nil, newSchemaError(path, "definition reference can't have a type") + } + + if !strings.HasPrefix(s.GetXRef(), "#/definitions/") { + return nil, newSchemaError(path, "unallowed reference to non-definition %q", s.GetXRef()) + } + reference := strings.TrimPrefix(s.GetXRef(), "#/definitions/") + if _, ok := d.models[reference]; !ok { + return nil, newSchemaError(path, "unknown model in reference: %q", reference) + } + return &Ref{ + BaseSchema: d.parseBaseSchema(s, path), + reference: reference, + definitions: d, + }, nil +} + +func (d *Definitions) parseBaseSchema(s *openapi_v2.Schema, path *Path) BaseSchema { + return BaseSchema{ + Description: s.GetDescription(), + Extensions: VendorExtensionToMap(s.GetVendorExtension()), + Path: *path, + } +} + +// We believe the schema is a map, verify and return a new schema +func (d *Definitions) parseMap(s *openapi_v2.Schema, path *Path) (Schema, error) { + if len(s.GetType().GetValue()) != 0 && s.GetType().GetValue()[0] != object { + return nil, newSchemaError(path, "invalid object type") + } + var sub Schema + if s.GetAdditionalProperties().GetSchema() == nil { + sub = &Arbitrary{ + BaseSchema: d.parseBaseSchema(s, path), + } + } else { + var err error + sub, err = d.ParseSchema(s.GetAdditionalProperties().GetSchema(), path) + if err != nil { + return nil, err + } + } + return &Map{ + BaseSchema: d.parseBaseSchema(s, path), + SubType: sub, + }, nil +} + +func (d *Definitions) parsePrimitive(s *openapi_v2.Schema, path *Path) (Schema, error) { + var t string + if len(s.GetType().GetValue()) > 1 { + return nil, newSchemaError(path, "primitive can't have more than 1 type") + } + if len(s.GetType().GetValue()) == 1 { + t = s.GetType().GetValue()[0] + } + switch t { + case String: // do nothing + case Number: // do nothing + case Integer: // do nothing + case Boolean: // do nothing + default: + return nil, newSchemaError(path, "Unknown primitive type: %q", t) + } + return &Primitive{ + BaseSchema: d.parseBaseSchema(s, path), + Type: t, + Format: s.GetFormat(), + }, nil +} + +func (d *Definitions) parseArray(s *openapi_v2.Schema, path *Path) (Schema, error) { + if len(s.GetType().GetValue()) != 1 { + return nil, newSchemaError(path, "array should have exactly one type") + } + if s.GetType().GetValue()[0] != array { + return nil, newSchemaError(path, `array should have type "array"`) + } + if len(s.GetItems().GetSchema()) != 1 { + return nil, newSchemaError(path, "array should have exactly one sub-item") + } + sub, err := d.ParseSchema(s.GetItems().GetSchema()[0], path) + if err != nil { + return nil, err + } + return &Array{ + BaseSchema: d.parseBaseSchema(s, path), + SubType: sub, + }, nil +} + +func (d *Definitions) parseKind(s *openapi_v2.Schema, path *Path) (Schema, error) { + if len(s.GetType().GetValue()) != 0 && s.GetType().GetValue()[0] != object { + return nil, newSchemaError(path, "invalid object type") + } + if s.GetProperties() == nil { + return nil, newSchemaError(path, "object doesn't have properties") + } + + fields := map[string]Schema{} + + for _, namedSchema := range s.GetProperties().GetAdditionalProperties() { + var err error + path := path.FieldPath(namedSchema.GetName()) + fields[namedSchema.GetName()], err = d.ParseSchema(namedSchema.GetValue(), &path) + if err != nil { + return nil, err + } + } + + return &Kind{ + BaseSchema: d.parseBaseSchema(s, path), + RequiredFields: s.GetRequired(), + Fields: fields, + }, nil +} + +func (d *Definitions) parseArbitrary(s *openapi_v2.Schema, path *Path) (Schema, error) { + return &Arbitrary{ + BaseSchema: d.parseBaseSchema(s, path), + }, nil +} + +// ParseSchema creates a walkable Schema from an openapi schema. While +// this function is public, it doesn't leak through the interface. +func (d *Definitions) ParseSchema(s *openapi_v2.Schema, path *Path) (Schema, error) { + if s.GetXRef() != "" { + return d.parseReference(s, path) + } + objectTypes := s.GetType().GetValue() + switch len(objectTypes) { + case 0: + // in the OpenAPI schema served by older k8s versions, object definitions created from structs did not include + // the type:object property (they only included the "properties" property), so we need to handle this case + if s.GetProperties() != nil { + return d.parseKind(s, path) + } else { + // Definition has no type and no properties. Treat it as an arbitrary value + // TODO: what if it has additionalProperties or patternProperties? + return d.parseArbitrary(s, path) + } + case 1: + t := objectTypes[0] + switch t { + case object: + if s.GetProperties() != nil { + return d.parseKind(s, path) + } else { + return d.parseMap(s, path) + } + case array: + return d.parseArray(s, path) + } + return d.parsePrimitive(s, path) + default: + // the OpenAPI generator never generates (nor it ever did in the past) OpenAPI type definitions with multiple types + return nil, newSchemaError(path, "definitions with multiple types aren't supported") + } +} + +// LookupModel is public through the interface of Models. It +// returns a visitable schema from the given model name. +func (d *Definitions) LookupModel(model string) Schema { + return d.models[model] +} + +func (d *Definitions) ListModels() []string { + models := []string{} + + for model := range d.models { + models = append(models, model) + } + + sort.Strings(models) + return models +} + +type Ref struct { + BaseSchema + + reference string + definitions *Definitions +} + +var _ Reference = &Ref{} + +func (r *Ref) Reference() string { + return r.reference +} + +func (r *Ref) SubSchema() Schema { + return r.definitions.models[r.reference] +} + +func (r *Ref) Accept(v SchemaVisitor) { + v.VisitReference(r) +} + +func (r *Ref) GetName() string { + return fmt.Sprintf("Reference to %q", r.reference) +} diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go new file mode 100644 index 00000000..f26b5ef8 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go @@ -0,0 +1,276 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package proto + +import ( + "fmt" + "sort" + "strings" +) + +// Defines openapi types. +const ( + Integer = "integer" + Number = "number" + String = "string" + Boolean = "boolean" + + // These types are private as they should never leak, and are + // represented by actual structs. + array = "array" + object = "object" +) + +// Models interface describe a model provider. They can give you the +// schema for a specific model. +type Models interface { + LookupModel(string) Schema + ListModels() []string +} + +// SchemaVisitor is an interface that you need to implement if you want +// to "visit" an openapi schema. A dispatch on the Schema type will call +// the appropriate function based on its actual type: +// - Array is a list of one and only one given subtype +// - Map is a map of string to one and only one given subtype +// - Primitive can be string, integer, number and boolean. +// - Kind is an object with specific fields mapping to specific types. +// - Reference is a link to another definition. +type SchemaVisitor interface { + VisitArray(*Array) + VisitMap(*Map) + VisitPrimitive(*Primitive) + VisitKind(*Kind) + VisitReference(Reference) +} + +// SchemaVisitorArbitrary is an additional visitor interface which handles +// arbitrary types. For backwards compatibility, it's a separate interface +// which is checked for at runtime. +type SchemaVisitorArbitrary interface { + SchemaVisitor + VisitArbitrary(*Arbitrary) +} + +// Schema is the base definition of an openapi type. +type Schema interface { + // Giving a visitor here will let you visit the actual type. + Accept(SchemaVisitor) + + // Pretty print the name of the type. + GetName() string + // Describes how to access this field. + GetPath() *Path + // Describes the field. + GetDescription() string + // Returns type extensions. + GetExtensions() map[string]interface{} +} + +// Path helps us keep track of type paths +type Path struct { + parent *Path + key string +} + +func NewPath(key string) Path { + return Path{key: key} +} + +func (p *Path) Get() []string { + if p == nil { + return []string{} + } + if p.key == "" { + return p.parent.Get() + } + return append(p.parent.Get(), p.key) +} + +func (p *Path) Len() int { + return len(p.Get()) +} + +func (p *Path) String() string { + return strings.Join(p.Get(), "") +} + +// ArrayPath appends an array index and creates a new path +func (p *Path) ArrayPath(i int) Path { + return Path{ + parent: p, + key: fmt.Sprintf("[%d]", i), + } +} + +// FieldPath appends a field name and creates a new path +func (p *Path) FieldPath(field string) Path { + return Path{ + parent: p, + key: fmt.Sprintf(".%s", field), + } +} + +// BaseSchema holds data used by each types of schema. +type BaseSchema struct { + Description string + Extensions map[string]interface{} + + Path Path +} + +func (b *BaseSchema) GetDescription() string { + return b.Description +} + +func (b *BaseSchema) GetExtensions() map[string]interface{} { + return b.Extensions +} + +func (b *BaseSchema) GetPath() *Path { + return &b.Path +} + +// Array must have all its element of the same `SubType`. +type Array struct { + BaseSchema + + SubType Schema +} + +var _ Schema = &Array{} + +func (a *Array) Accept(v SchemaVisitor) { + v.VisitArray(a) +} + +func (a *Array) GetName() string { + return fmt.Sprintf("Array of %s", a.SubType.GetName()) +} + +// Kind is a complex object. It can have multiple different +// subtypes for each field, as defined in the `Fields` field. Mandatory +// fields are listed in `RequiredFields`. The key of the object is +// always of type `string`. +type Kind struct { + BaseSchema + + // Lists names of required fields. + RequiredFields []string + // Maps field names to types. + Fields map[string]Schema +} + +var _ Schema = &Kind{} + +func (k *Kind) Accept(v SchemaVisitor) { + v.VisitKind(k) +} + +func (k *Kind) GetName() string { + properties := []string{} + for key := range k.Fields { + properties = append(properties, key) + } + return fmt.Sprintf("Kind(%v)", properties) +} + +// IsRequired returns true if `field` is a required field for this type. +func (k *Kind) IsRequired(field string) bool { + for _, f := range k.RequiredFields { + if f == field { + return true + } + } + return false +} + +// Keys returns a alphabetically sorted list of keys. +func (k *Kind) Keys() []string { + keys := make([]string, 0) + for key := range k.Fields { + keys = append(keys, key) + } + sort.Strings(keys) + return keys +} + +// Map is an object who values must all be of the same `SubType`. +// The key of the object is always of type `string`. +type Map struct { + BaseSchema + + SubType Schema +} + +var _ Schema = &Map{} + +func (m *Map) Accept(v SchemaVisitor) { + v.VisitMap(m) +} + +func (m *Map) GetName() string { + return fmt.Sprintf("Map of %s", m.SubType.GetName()) +} + +// Primitive is a literal. There can be multiple types of primitives, +// and this subtype can be visited through the `subType` field. +type Primitive struct { + BaseSchema + + // Type of a primitive must be one of: integer, number, string, boolean. + Type string + Format string +} + +var _ Schema = &Primitive{} + +func (p *Primitive) Accept(v SchemaVisitor) { + v.VisitPrimitive(p) +} + +func (p *Primitive) GetName() string { + if p.Format == "" { + return p.Type + } + return fmt.Sprintf("%s (%s)", p.Type, p.Format) +} + +// Arbitrary is a value of any type (primitive, object or array) +type Arbitrary struct { + BaseSchema +} + +var _ Schema = &Arbitrary{} + +func (a *Arbitrary) Accept(v SchemaVisitor) { + if visitor, ok := v.(SchemaVisitorArbitrary); ok { + visitor.VisitArbitrary(a) + } +} + +func (a *Arbitrary) GetName() string { + return "Arbitrary value (primitive, object or array)" +} + +// Reference implementation depends on the type of document. +type Reference interface { + Schema + + Reference() string + SubSchema() Schema +} diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi_suite_test.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi_suite_test.go new file mode 100644 index 00000000..24b5168e --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi_suite_test.go @@ -0,0 +1,49 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package proto_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/config" + . "github.com/onsi/ginkgo/types" + . "github.com/onsi/gomega" + + "fmt" + "testing" +) + +func TestOpenapi(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecsWithDefaultAndCustomReporters(t, "Openapi Suite", []Reporter{newlineReporter{}}) +} + +// Print a newline after the default newlineReporter due to issue +// https://github.com/jstemmer/go-junit-report/issues/31 +type newlineReporter struct{} + +func (newlineReporter) SpecSuiteWillBegin(config GinkgoConfigType, summary *SuiteSummary) {} + +func (newlineReporter) BeforeSuiteDidRun(setupSummary *SetupSummary) {} + +func (newlineReporter) AfterSuiteDidRun(setupSummary *SetupSummary) {} + +func (newlineReporter) SpecWillRun(specSummary *SpecSummary) {} + +func (newlineReporter) SpecDidComplete(specSummary *SpecSummary) {} + +// SpecSuiteDidEnd Prints a newline between "35 Passed | 0 Failed | 0 Pending | 0 Skipped" and "--- PASS:" +func (newlineReporter) SpecSuiteDidEnd(summary *SuiteSummary) { fmt.Printf("\n") } diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi_test.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi_test.go new file mode 100644 index 00000000..b6e23725 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi_test.go @@ -0,0 +1,265 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package proto_test + +import ( + "path/filepath" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "k8s.io/kube-openapi/pkg/util/proto" + "k8s.io/kube-openapi/pkg/util/proto/testing" +) + +var fakeSchema = testing.Fake{Path: filepath.Join("testdata", "swagger.json")} +var fakeSchemaNext = testing.Fake{Path: filepath.Join("testdata", "swagger_next.json")} + +var _ = Describe("Reading apps/v1beta1/Deployment from v1.8 openAPIData", func() { + var models proto.Models + BeforeEach(func() { + s, err := fakeSchema.OpenAPISchema() + Expect(err).To(BeNil()) + models, err = proto.NewOpenAPIData(s) + Expect(err).To(BeNil()) + }) + + model := "io.k8s.api.apps.v1beta1.Deployment" + var schema proto.Schema + It("should lookup the Schema by its model name", func() { + schema = models.LookupModel(model) + Expect(schema).ToNot(BeNil()) + }) + + var deployment *proto.Kind + It("should be a Kind", func() { + deployment = schema.(*proto.Kind) + Expect(deployment).ToNot(BeNil()) + }) + + It("should have a path", func() { + Expect(deployment.GetPath().Get()).To(Equal([]string{"io.k8s.api.apps.v1beta1.Deployment"})) + }) + + It("should have a kind key of type string", func() { + Expect(deployment.Fields).To(HaveKey("kind")) + key := deployment.Fields["kind"].(*proto.Primitive) + Expect(key).ToNot(BeNil()) + Expect(key.Type).To(Equal("string")) + Expect(key.GetPath().Get()).To(Equal([]string{"io.k8s.api.apps.v1beta1.Deployment", ".kind"})) + }) + + It("should have a apiVersion key of type string", func() { + Expect(deployment.Fields).To(HaveKey("apiVersion")) + key := deployment.Fields["apiVersion"].(*proto.Primitive) + Expect(key).ToNot(BeNil()) + Expect(key.Type).To(Equal("string")) + Expect(key.GetPath().Get()).To(Equal([]string{"io.k8s.api.apps.v1beta1.Deployment", ".apiVersion"})) + }) + + It("should have a metadata key of type Reference", func() { + Expect(deployment.Fields).To(HaveKey("metadata")) + key := deployment.Fields["metadata"].(proto.Reference) + Expect(key).ToNot(BeNil()) + Expect(key.Reference()).To(Equal("io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta")) + subSchema := key.SubSchema().(*proto.Kind) + Expect(subSchema).ToNot(BeNil()) + }) + + var status *proto.Kind + It("should have a status key of type Reference", func() { + Expect(deployment.Fields).To(HaveKey("status")) + key := deployment.Fields["status"].(proto.Reference) + Expect(key).ToNot(BeNil()) + Expect(key.Reference()).To(Equal("io.k8s.api.apps.v1beta1.DeploymentStatus")) + status = key.SubSchema().(*proto.Kind) + Expect(status).ToNot(BeNil()) + }) + + It("should have a valid DeploymentStatus", func() { + By("having availableReplicas key") + Expect(status.Fields).To(HaveKey("availableReplicas")) + replicas := status.Fields["availableReplicas"].(*proto.Primitive) + Expect(replicas).ToNot(BeNil()) + Expect(replicas.Type).To(Equal("integer")) + + By("having conditions key") + Expect(status.Fields).To(HaveKey("conditions")) + conditions := status.Fields["conditions"].(*proto.Array) + Expect(conditions).ToNot(BeNil()) + Expect(conditions.GetName()).To(Equal(`Array of Reference to "io.k8s.api.apps.v1beta1.DeploymentCondition"`)) + Expect(conditions.GetExtensions()).To(Equal(map[string]interface{}{ + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge", + })) + condition := conditions.SubType.(proto.Reference) + Expect(condition.Reference()).To(Equal("io.k8s.api.apps.v1beta1.DeploymentCondition")) + }) + + var spec *proto.Kind + It("should have a spec key of type Reference", func() { + Expect(deployment.Fields).To(HaveKey("spec")) + key := deployment.Fields["spec"].(proto.Reference) + Expect(key).ToNot(BeNil()) + Expect(key.Reference()).To(Equal("io.k8s.api.apps.v1beta1.DeploymentSpec")) + spec = key.SubSchema().(*proto.Kind) + Expect(spec).ToNot(BeNil()) + }) + + It("should have a spec with no gvk", func() { + _, found := spec.GetExtensions()["x-kubernetes-group-version-kind"] + Expect(found).To(BeFalse()) + }) + + It("should have a spec with a PodTemplateSpec sub-field", func() { + Expect(spec.Fields).To(HaveKey("template")) + key := spec.Fields["template"].(proto.Reference) + Expect(key).ToNot(BeNil()) + Expect(key.Reference()).To(Equal("io.k8s.api.core.v1.PodTemplateSpec")) + }) +}) + +var _ = Describe("Reading apps/v1beta1/Deployment from v1.11 openAPIData", func() { + var models proto.Models + BeforeEach(func() { + s, err := fakeSchemaNext.OpenAPISchema() + Expect(err).To(BeNil()) + models, err = proto.NewOpenAPIData(s) + Expect(err).To(BeNil()) + }) + + model := "io.k8s.api.apps.v1beta1.Deployment" + var schema proto.Schema + It("should lookup the Schema by its model name", func() { + schema = models.LookupModel(model) + Expect(schema).ToNot(BeNil()) + }) + + var deployment *proto.Kind + It("should be a Kind", func() { + deployment = schema.(*proto.Kind) + Expect(deployment).ToNot(BeNil()) + }) +}) + +var _ = Describe("Reading apps/v1beta1/ControllerRevision from v1.11 openAPIData", func() { + var models proto.Models + BeforeEach(func() { + s, err := fakeSchemaNext.OpenAPISchema() + Expect(err).To(BeNil()) + models, err = proto.NewOpenAPIData(s) + Expect(err).To(BeNil()) + }) + + model := "io.k8s.api.apps.v1beta1.ControllerRevision" + var schema proto.Schema + It("should lookup the Schema by its model name", func() { + schema = models.LookupModel(model) + Expect(schema).ToNot(BeNil()) + }) + + var cr *proto.Kind + It("data property should be map[string]Arbitrary", func() { + cr = schema.(*proto.Kind) + Expect(cr).ToNot(BeNil()) + Expect(cr.Fields).To(HaveKey("data")) + + data := cr.Fields["data"].(*proto.Map) + Expect(data).ToNot(BeNil()) + Expect(data.GetName()).To(Equal("Map of Arbitrary value (primitive, object or array)")) + Expect(data.GetPath().Get()).To(Equal([]string{"io.k8s.api.apps.v1beta1.ControllerRevision", ".data"})) + + arbitrary := data.SubType.(*proto.Arbitrary) + Expect(arbitrary).ToNot(BeNil()) + Expect(arbitrary.GetName()).To(Equal("Arbitrary value (primitive, object or array)")) + Expect(arbitrary.GetPath().Get()).To(Equal([]string{"io.k8s.api.apps.v1beta1.ControllerRevision", ".data"})) + }) +}) + +var _ = Describe("Reading authorization.k8s.io/v1/SubjectAccessReview from openAPIData", func() { + var models proto.Models + BeforeEach(func() { + s, err := fakeSchema.OpenAPISchema() + Expect(err).To(BeNil()) + models, err = proto.NewOpenAPIData(s) + Expect(err).To(BeNil()) + }) + + model := "io.k8s.api.authorization.v1.LocalSubjectAccessReview" + var schema proto.Schema + It("should lookup the Schema by its model", func() { + schema = models.LookupModel(model) + Expect(schema).ToNot(BeNil()) + }) + + var sarspec *proto.Kind + It("should be a Kind and have a spec", func() { + sar := schema.(*proto.Kind) + Expect(sar).ToNot(BeNil()) + Expect(sar.Fields).To(HaveKey("spec")) + specRef := sar.Fields["spec"].(proto.Reference) + Expect(specRef).ToNot(BeNil()) + Expect(specRef.Reference()).To(Equal("io.k8s.api.authorization.v1.SubjectAccessReviewSpec")) + sarspec = specRef.SubSchema().(*proto.Kind) + Expect(sarspec).ToNot(BeNil()) + }) + + It("should have a valid SubjectAccessReviewSpec", func() { + Expect(sarspec.Fields).To(HaveKey("extra")) + extra := sarspec.Fields["extra"].(*proto.Map) + Expect(extra).ToNot(BeNil()) + Expect(extra.GetName()).To(Equal("Map of Array of string")) + Expect(extra.GetPath().Get()).To(Equal([]string{"io.k8s.api.authorization.v1.SubjectAccessReviewSpec", ".extra"})) + array := extra.SubType.(*proto.Array) + Expect(array).ToNot(BeNil()) + Expect(array.GetName()).To(Equal("Array of string")) + Expect(array.GetPath().Get()).To(Equal([]string{"io.k8s.api.authorization.v1.SubjectAccessReviewSpec", ".extra"})) + str := array.SubType.(*proto.Primitive) + Expect(str).ToNot(BeNil()) + Expect(str.Type).To(Equal("string")) + Expect(str.GetName()).To(Equal("string")) + Expect(str.GetPath().Get()).To(Equal([]string{"io.k8s.api.authorization.v1.SubjectAccessReviewSpec", ".extra"})) + }) +}) + +var _ = Describe("Path", func() { + It("can be created by NewPath", func() { + path := proto.NewPath("key") + Expect(path.String()).To(Equal("key")) + }) + It("can create and print complex paths", func() { + key := proto.NewPath("key") + array := key.ArrayPath(12) + field := array.FieldPath("subKey") + + Expect(field.String()).To(Equal("key[12].subKey")) + }) + It("has a length", func() { + key := proto.NewPath("key") + array := key.ArrayPath(12) + field := array.FieldPath("subKey") + + Expect(field.Len()).To(Equal(3)) + }) + It("can look like an array", func() { + key := proto.NewPath("key") + array := key.ArrayPath(12) + field := array.FieldPath("subKey") + + Expect(field.Get()).To(Equal([]string{"key", "[12]", ".subKey"})) + }) +}) diff --git a/vendor/k8s.io/kube-openapi/pkg/util/trie.go b/vendor/k8s.io/kube-openapi/pkg/util/trie.go new file mode 100644 index 00000000..a9a76c17 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/util/trie.go @@ -0,0 +1,79 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +// A simple trie implementation with Add and HasPrefix methods only. +type Trie struct { + children map[byte]*Trie + wordTail bool + word string +} + +// NewTrie creates a Trie and add all strings in the provided list to it. +func NewTrie(list []string) Trie { + ret := Trie{ + children: make(map[byte]*Trie), + wordTail: false, + } + for _, v := range list { + ret.Add(v) + } + return ret +} + +// Add adds a word to this trie +func (t *Trie) Add(v string) { + root := t + for _, b := range []byte(v) { + child, exists := root.children[b] + if !exists { + child = &Trie{ + children: make(map[byte]*Trie), + wordTail: false, + } + root.children[b] = child + } + root = child + } + root.wordTail = true + root.word = v +} + +// HasPrefix returns true of v has any of the prefixes stored in this trie. +func (t *Trie) HasPrefix(v string) bool { + _, has := t.GetPrefix(v) + return has +} + +// GetPrefix is like HasPrefix but return the prefix in case of match or empty string otherwise. +func (t *Trie) GetPrefix(v string) (string, bool) { + root := t + if root.wordTail { + return root.word, true + } + for _, b := range []byte(v) { + child, exists := root.children[b] + if !exists { + return "", false + } + if child.wordTail { + return child.word, true + } + root = child + } + return "", false +} diff --git a/vendor/k8s.io/kube-openapi/pkg/util/util.go b/vendor/k8s.io/kube-openapi/pkg/util/util.go new file mode 100644 index 00000000..bcc0c4d4 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/util/util.go @@ -0,0 +1,39 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import "strings" + +// ToCanonicalName converts Golang package/type name into canonical OpenAPI name. +// Examples: +// Input: k8s.io/api/core/v1.Pod +// Output: io.k8s.api.core.v1.Pod +// +// Input: k8s.io/api/core/v1 +// Output: io.k8s.api.core.v1 +func ToCanonicalName(name string) string { + nameParts := strings.Split(name, "/") + // Reverse first part. e.g., io.k8s... instead of k8s.io... + if len(nameParts) > 0 && strings.Contains(nameParts[0], ".") { + parts := strings.Split(nameParts[0], ".") + for i, j := 0, len(parts)-1; i < j; i, j = i+1, j-1 { + parts[i], parts[j] = parts[j], parts[i] + } + nameParts[0] = strings.Join(parts, ".") + } + return strings.Join(nameParts, ".") +} diff --git a/vendor/k8s.io/kube-openapi/pkg/util/util_test.go b/vendor/k8s.io/kube-openapi/pkg/util/util_test.go new file mode 100644 index 00000000..f3a1b91a --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/util/util_test.go @@ -0,0 +1,37 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import "testing" + +func TestCanonicalName(t *testing.T) { + + var tests = []struct { + input string + expected string + }{ + {"k8s.io/api/core/v1.Pod", "io.k8s.api.core.v1.Pod"}, + {"k8s.io/api/networking/v1/NetworkPolicy", "io.k8s.api.networking.v1.NetworkPolicy"}, + {"k8s.io/api/apps/v1beta2.Scale", "io.k8s.api.apps.v1beta2.Scale"}, + {"servicecatalog.k8s.io/foo/bar/v1alpha1.Baz", "io.k8s.servicecatalog.foo.bar.v1alpha1.Baz"}, + } + for _, test := range tests { + if got := ToCanonicalName(test.input); got != test.expected { + t.Errorf("ToCanonicalName(%q) = %v", test.input, got) + } + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/api/OWNERS b/vendor/k8s.io/kubernetes/pkg/api/OWNERS new file mode 100644 index 00000000..8f7783f9 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/api/OWNERS @@ -0,0 +1,4 @@ +approvers: +- api-approvers +reviewers: +- api-reviewers diff --git a/vendor/k8s.io/kubernetes/pkg/apis/OWNERS b/vendor/k8s.io/kubernetes/pkg/apis/OWNERS new file mode 100644 index 00000000..180d60f9 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/OWNERS @@ -0,0 +1,44 @@ +approvers: +- erictune +- lavalamp +- smarterclayton +- thockin +- liggitt +# - bgrant0607 # manual escalations only +reviewers: +- lavalamp +- smarterclayton +- wojtek-t +- deads2k +- yujuhong +- brendandburns +- derekwaynecarr +- caesarxuchao +- vishh +- mikedanese +- liggitt +- nikhiljindal +- gmarek +- erictune +- pmorie +- sttts +- dchen1107 +- saad-ali +- luxas +- janetkuo +- justinsb +- pwittrock +- ncdc +- tallclair +- yifan-gu +- eparis +- mwielgus +- feiskyer +- soltysh +- piosz +- dims +- errordeveloper +- madhusudancs +- krousey +- rootfs +- jszczepkowski diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/core/BUILD new file mode 100644 index 00000000..8d54d672 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/BUILD @@ -0,0 +1,62 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "annotation_key_constants.go", + "doc.go", + "field_constants.go", + "json.go", + "objectreference.go", + "register.go", + "resource.go", + "taint.go", + "toleration.go", + "types.go", + "zz_generated.deepcopy.go", + ], + importpath = "k8s.io/kubernetes/pkg/apis/core", + visibility = ["//visibility:public"], + deps = [ + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = [ + "taint_test.go", + "toleration_test.go", + ], + embed = [":go_default_library"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/apis/core/fuzzer:all-srcs", + "//pkg/apis/core/helper:all-srcs", + "//pkg/apis/core/install:all-srcs", + "//pkg/apis/core/pods:all-srcs", + "//pkg/apis/core/v1:all-srcs", + "//pkg/apis/core/validation:all-srcs", + ], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/OWNERS b/vendor/k8s.io/kubernetes/pkg/apis/core/OWNERS new file mode 100644 index 00000000..17bebcb3 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/OWNERS @@ -0,0 +1,43 @@ +approvers: +- erictune +- lavalamp +- smarterclayton +- thockin +- liggitt +# - bgrant0607 # manual escalations only +reviewers: +- thockin +- lavalamp +- smarterclayton +- wojtek-t +- deads2k +- yujuhong +- brendandburns +- derekwaynecarr +- caesarxuchao +- vishh +- mikedanese +- liggitt +- nikhiljindal +- gmarek +- erictune +- davidopp +- pmorie +- sttts +- dchen1107 +- saad-ali +- zmerlynn +- luxas +- janetkuo +- justinsb +- pwittrock +- roberthbailey +- ncdc +- tallclair +- yifan-gu +- eparis +- mwielgus +- soltysh +- piosz +- jsafrane +- jbeda diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/annotation_key_constants.go b/vendor/k8s.io/kubernetes/pkg/apis/core/annotation_key_constants.go new file mode 100644 index 00000000..131fdd99 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/annotation_key_constants.go @@ -0,0 +1,92 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file should be consistent with pkg/api/v1/annotation_key_constants.go. + +package core + +const ( + // ImagePolicyFailedOpenKey is added to pods created by failing open when the image policy + // webhook backend fails. + ImagePolicyFailedOpenKey string = "alpha.image-policy.k8s.io/failed-open" + + // PodPresetOptOutAnnotationKey represents the annotation key for a pod to exempt itself from pod preset manipulation + PodPresetOptOutAnnotationKey string = "podpreset.admission.kubernetes.io/exclude" + + // MirrorAnnotationKey represents the annotation key set by kubelets when creating mirror pods + MirrorPodAnnotationKey string = "kubernetes.io/config.mirror" + + // TolerationsAnnotationKey represents the key of tolerations data (json serialized) + // in the Annotations of a Pod. + TolerationsAnnotationKey string = "scheduler.alpha.kubernetes.io/tolerations" + + // TaintsAnnotationKey represents the key of taints data (json serialized) + // in the Annotations of a Node. + TaintsAnnotationKey string = "scheduler.alpha.kubernetes.io/taints" + + // SeccompPodAnnotationKey represents the key of a seccomp profile applied + // to all containers of a pod. + SeccompPodAnnotationKey string = "seccomp.security.alpha.kubernetes.io/pod" + + // SeccompContainerAnnotationKeyPrefix represents the key of a seccomp profile applied + // to one container of a pod. + SeccompContainerAnnotationKeyPrefix string = "container.seccomp.security.alpha.kubernetes.io/" + + // PreferAvoidPodsAnnotationKey represents the key of preferAvoidPods data (json serialized) + // in the Annotations of a Node. + PreferAvoidPodsAnnotationKey string = "scheduler.alpha.kubernetes.io/preferAvoidPods" + + // SysctlsPodAnnotationKey represents the key of sysctls which are set for the infrastructure + // container of a pod. The annotation value is a comma separated list of sysctl_name=value + // key-value pairs. Only a limited set of whitelisted and isolated sysctls is supported by + // the kubelet. Pods with other sysctls will fail to launch. + SysctlsPodAnnotationKey string = "security.alpha.kubernetes.io/sysctls" + + // UnsafeSysctlsPodAnnotationKey represents the key of sysctls which are set for the infrastructure + // container of a pod. The annotation value is a comma separated list of sysctl_name=value + // key-value pairs. Unsafe sysctls must be explicitly enabled for a kubelet. They are properly + // namespaced to a pod or a container, but their isolation is usually unclear or weak. Their use + // is at-your-own-risk. Pods that attempt to set an unsafe sysctl that is not enabled for a kubelet + // will fail to launch. + UnsafeSysctlsPodAnnotationKey string = "security.alpha.kubernetes.io/unsafe-sysctls" + + // ObjectTTLAnnotations represents a suggestion for kubelet for how long it can cache + // an object (e.g. secret, config map) before fetching it again from apiserver. + // This annotation can be attached to node. + ObjectTTLAnnotationKey string = "node.alpha.kubernetes.io/ttl" + + // BootstrapCheckpointAnnotationKey represents a Resource (Pod) that should be checkpointed by + // the kubelet prior to running + BootstrapCheckpointAnnotationKey string = "node.kubernetes.io/bootstrap-checkpoint" + + // annotation key prefix used to identify non-convertible json paths. + NonConvertibleAnnotationPrefix = "non-convertible.kubernetes.io" + + kubectlPrefix = "kubectl.kubernetes.io/" + + // LastAppliedConfigAnnotation is the annotation used to store the previous + // configuration of a resource for use in a three way diff by UpdateApplyAnnotation. + LastAppliedConfigAnnotation = kubectlPrefix + "last-applied-configuration" + + // AnnotationLoadBalancerSourceRangesKey is the key of the annotation on a service to set allowed ingress ranges on their LoadBalancers + // + // It should be a comma-separated list of CIDRs, e.g. `0.0.0.0/0` to + // allow full access (the default) or `18.0.0.0/8,56.0.0.0/8` to allow + // access only from the CIDRs currently allocated to MIT & the USPS. + // + // Not all cloud providers support this annotation, though AWS & GCE do. + AnnotationLoadBalancerSourceRangesKey = "service.beta.kubernetes.io/load-balancer-source-ranges" +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/core/doc.go new file mode 100644 index 00000000..6017bfda --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/doc.go @@ -0,0 +1,24 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package + +// Package api contains the latest (or "internal") version of the +// Kubernetes API objects. This is the API objects as represented in memory. +// The contract presented to clients is located in the versioned packages, +// which are sub-directories. The first one is "v1". Those packages +// describe how a particular version is serialized to storage/network. +package core // import "k8s.io/kubernetes/pkg/apis/core" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/field_constants.go b/vendor/k8s.io/kubernetes/pkg/apis/core/field_constants.go new file mode 100644 index 00000000..a26f8056 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/field_constants.go @@ -0,0 +1,38 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package core + +// Field path constants that are specific to the internal API +// representation. +const ( + NodeUnschedulableField = "spec.unschedulable" + ObjectNameField = "metadata.name" + PodHostField = "spec.nodeName" + PodStatusField = "status.phase" + SecretTypeField = "type" + + EventReasonField = "action" + EventSourceField = "reportingComponent" + EventTypeField = "type" + EventInvolvedKindField = "involvedObject.kind" + EventInvolvedNamespaceField = "involvedObject.namespace" + EventInvolvedNameField = "involvedObject.name" + EventInvolvedUIDField = "involvedObject.uid" + EventInvolvedAPIVersionField = "involvedObject.apiVersion" + EventInvolvedResourceVersionField = "involvedObject.resourceVersion" + EventInvolvedFieldPathField = "involvedObject.fieldPath" +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/json.go b/vendor/k8s.io/kubernetes/pkg/apis/core/json.go new file mode 100644 index 00000000..937cd056 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/json.go @@ -0,0 +1,28 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package core + +import "encoding/json" + +// This file implements json marshaling/unmarshaling interfaces on objects that are currently marshaled into annotations +// to prevent anyone from marshaling these internal structs. + +var _ = json.Marshaler(&AvoidPods{}) +var _ = json.Unmarshaler(&AvoidPods{}) + +func (AvoidPods) MarshalJSON() ([]byte, error) { panic("do not marshal internal struct") } +func (*AvoidPods) UnmarshalJSON([]byte) error { panic("do not unmarshal to internal struct") } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/objectreference.go b/vendor/k8s.io/kubernetes/pkg/apis/core/objectreference.go new file mode 100644 index 00000000..55b27f30 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/objectreference.go @@ -0,0 +1,34 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +//TODO: consider making these methods functions, because we don't want helper +//functions in the k8s.io/api repo. + +package core + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" +) + +func (obj *ObjectReference) SetGroupVersionKind(gvk schema.GroupVersionKind) { + obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() +} + +func (obj *ObjectReference) GroupVersionKind() schema.GroupVersionKind { + return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) +} + +func (obj *ObjectReference) GetObjectKind() schema.ObjectKind { return obj } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/register.go b/vendor/k8s.io/kubernetes/pkg/apis/core/register.go new file mode 100644 index 00000000..2784cbe1 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/register.go @@ -0,0 +1,99 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package core + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +func addKnownTypes(scheme *runtime.Scheme) error { + if err := scheme.AddIgnoredConversionType(&metav1.TypeMeta{}, &metav1.TypeMeta{}); err != nil { + return err + } + scheme.AddKnownTypes(SchemeGroupVersion, + &Pod{}, + &PodList{}, + &PodStatusResult{}, + &PodTemplate{}, + &PodTemplateList{}, + &ReplicationControllerList{}, + &ReplicationController{}, + &ServiceList{}, + &Service{}, + &ServiceProxyOptions{}, + &NodeList{}, + &Node{}, + &NodeConfigSource{}, + &NodeProxyOptions{}, + &Endpoints{}, + &EndpointsList{}, + &Binding{}, + &Event{}, + &EventList{}, + &List{}, + &LimitRange{}, + &LimitRangeList{}, + &ResourceQuota{}, + &ResourceQuotaList{}, + &Namespace{}, + &NamespaceList{}, + &ServiceAccount{}, + &ServiceAccountList{}, + &Secret{}, + &SecretList{}, + &PersistentVolume{}, + &PersistentVolumeList{}, + &PersistentVolumeClaim{}, + &PersistentVolumeClaimList{}, + &PodAttachOptions{}, + &PodLogOptions{}, + &PodExecOptions{}, + &PodPortForwardOptions{}, + &PodProxyOptions{}, + &ComponentStatus{}, + &ComponentStatusList{}, + &SerializedReference{}, + &RangeAllocation{}, + &ConfigMap{}, + &ConfigMapList{}, + ) + + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/resource.go b/vendor/k8s.io/kubernetes/pkg/apis/core/resource.go new file mode 100644 index 00000000..1910cd92 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/resource.go @@ -0,0 +1,62 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package core + +import ( + "k8s.io/apimachinery/pkg/api/resource" +) + +func (self ResourceName) String() string { + return string(self) +} + +// Returns the CPU limit if specified. +func (self *ResourceList) Cpu() *resource.Quantity { + if val, ok := (*self)[ResourceCPU]; ok { + return &val + } + return &resource.Quantity{Format: resource.DecimalSI} +} + +// Returns the Memory limit if specified. +func (self *ResourceList) Memory() *resource.Quantity { + if val, ok := (*self)[ResourceMemory]; ok { + return &val + } + return &resource.Quantity{Format: resource.BinarySI} +} + +func (self *ResourceList) Pods() *resource.Quantity { + if val, ok := (*self)[ResourcePods]; ok { + return &val + } + return &resource.Quantity{} +} + +func (self *ResourceList) NvidiaGPU() *resource.Quantity { + if val, ok := (*self)[ResourceNvidiaGPU]; ok { + return &val + } + return &resource.Quantity{} +} + +func (self *ResourceList) StorageEphemeral() *resource.Quantity { + if val, ok := (*self)[ResourceEphemeralStorage]; ok { + return &val + } + return &resource.Quantity{} +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/taint.go b/vendor/k8s.io/kubernetes/pkg/apis/core/taint.go new file mode 100644 index 00000000..ae1feb74 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/taint.go @@ -0,0 +1,36 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +//TODO: consider making these methods functions, because we don't want helper +//functions in the k8s.io/api repo. + +package core + +import "fmt" + +// MatchTaint checks if the taint matches taintToMatch. Taints are unique by key:effect, +// if the two taints have same key:effect, regard as they match. +func (t *Taint) MatchTaint(taintToMatch Taint) bool { + return t.Key == taintToMatch.Key && t.Effect == taintToMatch.Effect +} + +// taint.ToString() converts taint struct to string in format key=value:effect or key:effect. +func (t *Taint) ToString() string { + if len(t.Value) == 0 { + return fmt.Sprintf("%v:%v", t.Key, t.Effect) + } + return fmt.Sprintf("%v=%v:%v", t.Key, t.Value, t.Effect) +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/taint_test.go b/vendor/k8s.io/kubernetes/pkg/apis/core/taint_test.go new file mode 100644 index 00000000..baa9404e --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/taint_test.go @@ -0,0 +1,120 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package core + +import "testing" + +func TestTaintToString(t *testing.T) { + testCases := []struct { + taint *Taint + expectedString string + }{ + { + taint: &Taint{ + Key: "foo", + Value: "bar", + Effect: TaintEffectNoSchedule, + }, + expectedString: "foo=bar:NoSchedule", + }, + { + taint: &Taint{ + Key: "foo", + Effect: TaintEffectNoSchedule, + }, + expectedString: "foo:NoSchedule", + }, + } + + for i, tc := range testCases { + if tc.expectedString != tc.taint.ToString() { + t.Errorf("[%v] expected taint %v converted to %s, got %s", i, tc.taint, tc.expectedString, tc.taint.ToString()) + } + } +} + +func TestMatchTaint(t *testing.T) { + testCases := []struct { + description string + taint *Taint + taintToMatch Taint + expectMatch bool + }{ + { + description: "two taints with the same key,value,effect should match", + taint: &Taint{ + Key: "foo", + Value: "bar", + Effect: TaintEffectNoSchedule, + }, + taintToMatch: Taint{ + Key: "foo", + Value: "bar", + Effect: TaintEffectNoSchedule, + }, + expectMatch: true, + }, + { + description: "two taints with the same key,effect but different value should match", + taint: &Taint{ + Key: "foo", + Value: "bar", + Effect: TaintEffectNoSchedule, + }, + taintToMatch: Taint{ + Key: "foo", + Value: "different-value", + Effect: TaintEffectNoSchedule, + }, + expectMatch: true, + }, + { + description: "two taints with the different key cannot match", + taint: &Taint{ + Key: "foo", + Value: "bar", + Effect: TaintEffectNoSchedule, + }, + taintToMatch: Taint{ + Key: "different-key", + Value: "bar", + Effect: TaintEffectNoSchedule, + }, + expectMatch: false, + }, + { + description: "two taints with the different effect cannot match", + taint: &Taint{ + Key: "foo", + Value: "bar", + Effect: TaintEffectNoSchedule, + }, + taintToMatch: Taint{ + Key: "foo", + Value: "bar", + Effect: TaintEffectPreferNoSchedule, + }, + expectMatch: false, + }, + } + + for _, tc := range testCases { + if tc.expectMatch != tc.taint.MatchTaint(tc.taintToMatch) { + t.Errorf("[%s] expect taint %s match taint %s", tc.description, tc.taint.ToString(), tc.taintToMatch.ToString()) + } + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/toleration.go b/vendor/k8s.io/kubernetes/pkg/apis/core/toleration.go new file mode 100644 index 00000000..1dfbc9f1 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/toleration.go @@ -0,0 +1,30 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +//TODO: consider making these methods functions, because we don't want helper +//functions in the k8s.io/api repo. + +package core + +// MatchToleration checks if the toleration matches tolerationToMatch. Tolerations are unique by , +// if the two tolerations have same combination, regard as they match. +// TODO: uniqueness check for tolerations in api validations. +func (t *Toleration) MatchToleration(tolerationToMatch *Toleration) bool { + return t.Key == tolerationToMatch.Key && + t.Effect == tolerationToMatch.Effect && + t.Operator == tolerationToMatch.Operator && + t.Value == tolerationToMatch.Value +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/toleration_test.go b/vendor/k8s.io/kubernetes/pkg/apis/core/toleration_test.go new file mode 100644 index 00000000..ec7a8dec --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/toleration_test.go @@ -0,0 +1,136 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package core + +import "testing" + +func TestMatchToleration(t *testing.T) { + + tolerationSeconds := int64(5) + tolerationToMatchSeconds := int64(3) + testCases := []struct { + description string + toleration *Toleration + tolerationToMatch *Toleration + expectMatch bool + }{ + { + description: "two taints with the same key,operator,value,effect should match", + toleration: &Toleration{ + Key: "foo", + Operator: "Exists", + Value: "bar", + Effect: TaintEffectNoSchedule, + }, + tolerationToMatch: &Toleration{ + Key: "foo", + Operator: "Exists", + Value: "bar", + Effect: TaintEffectNoSchedule, + }, + expectMatch: true, + }, + { + description: "two taints with the different key cannot match", + toleration: &Toleration{ + Key: "foo", + Operator: "Exists", + Value: "bar", + Effect: TaintEffectNoSchedule, + }, + tolerationToMatch: &Toleration{ + Key: "different-key", + Operator: "Exists", + Value: "bar", + Effect: TaintEffectNoSchedule, + }, + expectMatch: false, + }, + { + description: "two taints with the different operator cannot match", + toleration: &Toleration{ + Key: "foo", + Operator: "Exists", + Value: "bar", + Effect: TaintEffectNoSchedule, + }, + tolerationToMatch: &Toleration{ + Key: "foo", + Operator: "different-operator", + Value: "bar", + Effect: TaintEffectNoSchedule, + }, + expectMatch: false, + }, + { + description: "two taints with the different value cannot match", + toleration: &Toleration{ + Key: "foo", + Operator: "Exists", + Value: "bar", + Effect: TaintEffectNoSchedule, + }, + tolerationToMatch: &Toleration{ + Key: "foo", + Operator: "Exists", + Value: "different-value", + Effect: TaintEffectNoSchedule, + }, + expectMatch: false, + }, + { + description: "two taints with the different effect cannot match", + toleration: &Toleration{ + Key: "foo", + Operator: "Exists", + Value: "bar", + Effect: TaintEffectNoSchedule, + }, + tolerationToMatch: &Toleration{ + Key: "foo", + Operator: "Exists", + Value: "bar", + Effect: TaintEffectPreferNoSchedule, + }, + expectMatch: false, + }, + { + description: "two taints with the different tolerationSeconds should match", + toleration: &Toleration{ + Key: "foo", + Operator: "Exists", + Value: "bar", + Effect: TaintEffectNoSchedule, + TolerationSeconds: &tolerationSeconds, + }, + tolerationToMatch: &Toleration{ + Key: "foo", + Operator: "Exists", + Value: "bar", + Effect: TaintEffectNoSchedule, + TolerationSeconds: &tolerationToMatchSeconds, + }, + expectMatch: true, + }, + } + + for _, tc := range testCases { + if actual := tc.toleration.MatchToleration(tc.tolerationToMatch); actual != tc.expectMatch { + t.Errorf("[%s] expect: %v , got: %v", tc.description, tc.expectMatch, !tc.expectMatch) + } + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/types.go b/vendor/k8s.io/kubernetes/pkg/apis/core/types.go new file mode 100644 index 00000000..8b183deb --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/types.go @@ -0,0 +1,4669 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package core + +import ( + "k8s.io/apimachinery/pkg/api/resource" + metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// Common string formats +// --------------------- +// Many fields in this API have formatting requirements. The commonly used +// formats are defined here. +// +// C_IDENTIFIER: This is a string that conforms to the definition of an "identifier" +// in the C language. This is captured by the following regex: +// [A-Za-z_][A-Za-z0-9_]* +// This defines the format, but not the length restriction, which should be +// specified at the definition of any field of this type. +// +// DNS_LABEL: This is a string, no more than 63 characters long, that conforms +// to the definition of a "label" in RFCs 1035 and 1123. This is captured +// by the following regex: +// [a-z0-9]([-a-z0-9]*[a-z0-9])? +// +// DNS_SUBDOMAIN: This is a string, no more than 253 characters long, that conforms +// to the definition of a "subdomain" in RFCs 1035 and 1123. This is captured +// by the following regex: +// [a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)* +// or more simply: +// DNS_LABEL(\.DNS_LABEL)* +// +// IANA_SVC_NAME: This is a string, no more than 15 characters long, that +// conforms to the definition of IANA service name in RFC 6335. +// It must contains at least one letter [a-z] and it must contains only [a-z0-9-]. +// Hypens ('-') cannot be leading or trailing character of the string +// and cannot be adjacent to other hyphens. + +// ObjectMeta is metadata that all persisted resources must have, which includes all objects +// users must create. +// DEPRECATED: Use k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta instead - this type will be removed soon. +type ObjectMeta struct { + // Name is unique within a namespace. Name is required when creating resources, although + // some resources may allow a client to request the generation of an appropriate name + // automatically. Name is primarily intended for creation idempotence and configuration + // definition. + // +optional + Name string + + // GenerateName indicates that the name should be made unique by the server prior to persisting + // it. A non-empty value for the field indicates the name will be made unique (and the name + // returned to the client will be different than the name passed). The value of this field will + // be combined with a unique suffix on the server if the Name field has not been provided. + // The provided value must be valid within the rules for Name, and may be truncated by the length + // of the suffix required to make the value unique on the server. + // + // If this field is specified, and Name is not present, the server will NOT return a 409 if the + // generated name exists - instead, it will either return 201 Created or 500 with Reason + // ServerTimeout indicating a unique name could not be found in the time allotted, and the client + // should retry (optionally after the time indicated in the Retry-After header). + // +optional + GenerateName string + + // Namespace defines the space within which name must be unique. An empty namespace is + // equivalent to the "default" namespace, but "default" is the canonical representation. + // Not all objects are required to be scoped to a namespace - the value of this field for + // those objects will be empty. + // +optional + Namespace string + + // SelfLink is a URL representing this object. + // +optional + SelfLink string + + // UID is the unique in time and space value for this object. It is typically generated by + // the server on successful creation of a resource and is not allowed to change on PUT + // operations. + // +optional + UID types.UID + + // An opaque value that represents the version of this resource. May be used for optimistic + // concurrency, change detection, and the watch operation on a resource or set of resources. + // Clients must treat these values as opaque and values may only be valid for a particular + // resource or set of resources. Only servers will generate resource versions. + // +optional + ResourceVersion string + + // A sequence number representing a specific generation of the desired state. + // Populated by the system. Read-only. + // +optional + Generation int64 + + // CreationTimestamp is a timestamp representing the server time when this object was + // created. It is not guaranteed to be set in happens-before order across separate operations. + // Clients may not set this value. It is represented in RFC3339 form and is in UTC. + // +optional + CreationTimestamp metav1.Time + + // DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This + // field is set by the server when a graceful deletion is requested by the user, and is not + // directly settable by a client. The resource is expected to be deleted (no longer visible + // from resource lists, and not reachable by name) after the time in this field. Once set, + // this value may not be unset or be set further into the future, although it may be shortened + // or the resource may be deleted prior to this time. For example, a user may request that + // a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination + // signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard + // termination signal (SIGKILL) to the container and after cleanup, remove the pod from the + // API. In the presence of network partitions, this object may still exist after this + // timestamp, until an administrator or automated process can determine the resource is + // fully terminated. + // If not set, graceful deletion of the object has not been requested. + // + // Populated by the system when a graceful deletion is requested. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + DeletionTimestamp *metav1.Time + + // DeletionGracePeriodSeconds records the graceful deletion value set when graceful deletion + // was requested. Represents the most recent grace period, and may only be shortened once set. + // +optional + DeletionGracePeriodSeconds *int64 + + // Labels are key value pairs that may be used to scope and select individual resources. + // Label keys are of the form: + // label-key ::= prefixed-name | name + // prefixed-name ::= prefix '/' name + // prefix ::= DNS_SUBDOMAIN + // name ::= DNS_LABEL + // The prefix is optional. If the prefix is not specified, the key is assumed to be private + // to the user. Other system components that wish to use labels must specify a prefix. The + // "kubernetes.io/" prefix is reserved for use by kubernetes components. + // +optional + Labels map[string]string + + // Annotations are unstructured key value data stored with a resource that may be set by + // external tooling. They are not queryable and should be preserved when modifying + // objects. Annotation keys have the same formatting restrictions as Label keys. See the + // comments on Labels for details. + // +optional + Annotations map[string]string + + // List of objects depended by this object. If ALL objects in the list have + // been deleted, this object will be garbage collected. If this object is managed by a controller, + // then an entry in this list will point to this controller, with the controller field set to true. + // There cannot be more than one managing controller. + // +optional + OwnerReferences []metav1.OwnerReference + + // An initializer is a controller which enforces some system invariant at object creation time. + // This field is a list of initializers that have not yet acted on this object. If nil or empty, + // this object has been completely initialized. Otherwise, the object is considered uninitialized + // and is hidden (in list/watch and get calls) from clients that haven't explicitly asked to + // observe uninitialized objects. + // + // When an object is created, the system will populate this list with the current set of initializers. + // Only privileged users may set or modify this list. Once it is empty, it may not be modified further + // by any user. + Initializers *metav1.Initializers + + // Must be empty before the object is deleted from the registry. Each entry + // is an identifier for the responsible component that will remove the entry + // from the list. If the deletionTimestamp of the object is non-nil, entries + // in this list can only be removed. + // +optional + Finalizers []string + + // The name of the cluster which the object belongs to. + // This is used to distinguish resources with same name and namespace in different clusters. + // This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request. + // +optional + ClusterName string +} + +const ( + // NamespaceDefault means the object is in the default namespace which is applied when not specified by clients + NamespaceDefault string = "default" + // NamespaceAll is the default argument to specify on a context when you want to list or filter resources across all namespaces + NamespaceAll string = "" + // NamespaceNone is the argument for a context when there is no namespace. + NamespaceNone string = "" + // NamespaceSystem is the system namespace where we place system components. + NamespaceSystem string = "kube-system" + // NamespacePublic is the namespace where we place public info (ConfigMaps) + NamespacePublic string = "kube-public" + // TerminationMessagePathDefault means the default path to capture the application termination message running in a container + TerminationMessagePathDefault string = "/dev/termination-log" +) + +// Volume represents a named volume in a pod that may be accessed by any containers in the pod. +type Volume struct { + // Required: This must be a DNS_LABEL. Each volume in a pod must have + // a unique name. + Name string + // The VolumeSource represents the location and type of a volume to mount. + // This is optional for now. If not specified, the Volume is implied to be an EmptyDir. + // This implied behavior is deprecated and will be removed in a future version. + // +optional + VolumeSource +} + +// VolumeSource represents the source location of a volume to mount. +// Only one of its members may be specified. +type VolumeSource struct { + // HostPath represents file or directory on the host machine that is + // directly exposed to the container. This is generally used for system + // agents or other privileged things that are allowed to see the host + // machine. Most containers will NOT need this. + // --- + // TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not + // mount host directories as read/write. + // +optional + HostPath *HostPathVolumeSource + // EmptyDir represents a temporary directory that shares a pod's lifetime. + // +optional + EmptyDir *EmptyDirVolumeSource + // GCEPersistentDisk represents a GCE Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // +optional + GCEPersistentDisk *GCEPersistentDiskVolumeSource + // AWSElasticBlockStore represents an AWS EBS disk that is attached to a + // kubelet's host machine and then exposed to the pod. + // +optional + AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource + // GitRepo represents a git repository at a particular revision. + // +optional + GitRepo *GitRepoVolumeSource + // Secret represents a secret that should populate this volume. + // +optional + Secret *SecretVolumeSource + // NFS represents an NFS mount on the host that shares a pod's lifetime + // +optional + NFS *NFSVolumeSource + // ISCSIVolumeSource represents an ISCSI Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // +optional + ISCSI *ISCSIVolumeSource + // Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime + // +optional + Glusterfs *GlusterfsVolumeSource + // PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace + // +optional + PersistentVolumeClaim *PersistentVolumeClaimVolumeSource + // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime + // +optional + RBD *RBDVolumeSource + + // Quobyte represents a Quobyte mount on the host that shares a pod's lifetime + // +optional + Quobyte *QuobyteVolumeSource + + // FlexVolume represents a generic volume resource that is + // provisioned/attached using an exec based plugin. + // +optional + FlexVolume *FlexVolumeSource + + // Cinder represents a cinder volume attached and mounted on kubelets host machine + // +optional + Cinder *CinderVolumeSource + + // CephFS represents a Cephfs mount on the host that shares a pod's lifetime + // +optional + CephFS *CephFSVolumeSource + + // Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running + // +optional + Flocker *FlockerVolumeSource + + // DownwardAPI represents metadata about the pod that should populate this volume + // +optional + DownwardAPI *DownwardAPIVolumeSource + // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. + // +optional + FC *FCVolumeSource + // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. + // +optional + AzureFile *AzureFileVolumeSource + // ConfigMap represents a configMap that should populate this volume + // +optional + ConfigMap *ConfigMapVolumeSource + // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine + // +optional + VsphereVolume *VsphereVirtualDiskVolumeSource + // AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + // +optional + AzureDisk *AzureDiskVolumeSource + // PhotonPersistentDisk represents a Photon Controller persistent disk attached and mounted on kubelets host machine + PhotonPersistentDisk *PhotonPersistentDiskVolumeSource + // Items for all in one resources secrets, configmaps, and downward API + Projected *ProjectedVolumeSource + // PortworxVolume represents a portworx volume attached and mounted on kubelets host machine + // +optional + PortworxVolume *PortworxVolumeSource + // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + // +optional + ScaleIO *ScaleIOVolumeSource + // StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod + // +optional + StorageOS *StorageOSVolumeSource +} + +// Similar to VolumeSource but meant for the administrator who creates PVs. +// Exactly one of its members must be set. +type PersistentVolumeSource struct { + // GCEPersistentDisk represents a GCE Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // +optional + GCEPersistentDisk *GCEPersistentDiskVolumeSource + // AWSElasticBlockStore represents an AWS EBS disk that is attached to a + // kubelet's host machine and then exposed to the pod. + // +optional + AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource + // HostPath represents a directory on the host. + // Provisioned by a developer or tester. + // This is useful for single-node development and testing only! + // On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. + // +optional + HostPath *HostPathVolumeSource + // Glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod + // +optional + Glusterfs *GlusterfsVolumeSource + // NFS represents an NFS mount on the host that shares a pod's lifetime + // +optional + NFS *NFSVolumeSource + // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime + // +optional + RBD *RBDPersistentVolumeSource + // Quobyte represents a Quobyte mount on the host that shares a pod's lifetime + // +optional + Quobyte *QuobyteVolumeSource + // ISCSIPersistentVolumeSource represents an ISCSI resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // +optional + ISCSI *ISCSIPersistentVolumeSource + // FlexVolume represents a generic volume resource that is + // provisioned/attached using an exec based plugin. + // +optional + FlexVolume *FlexPersistentVolumeSource + // Cinder represents a cinder volume attached and mounted on kubelets host machine + // +optional + Cinder *CinderVolumeSource + // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime + // +optional + CephFS *CephFSPersistentVolumeSource + // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. + // +optional + FC *FCVolumeSource + // Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running + // +optional + Flocker *FlockerVolumeSource + // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. + // +optional + AzureFile *AzureFilePersistentVolumeSource + // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine + // +optional + VsphereVolume *VsphereVirtualDiskVolumeSource + // AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + // +optional + AzureDisk *AzureDiskVolumeSource + // PhotonPersistentDisk represents a Photon Controller persistent disk attached and mounted on kubelets host machine + PhotonPersistentDisk *PhotonPersistentDiskVolumeSource + // PortworxVolume represents a portworx volume attached and mounted on kubelets host machine + // +optional + PortworxVolume *PortworxVolumeSource + // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + // +optional + ScaleIO *ScaleIOPersistentVolumeSource + // Local represents directly-attached storage with node affinity + // +optional + Local *LocalVolumeSource + // StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod + // More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md + // +optional + StorageOS *StorageOSPersistentVolumeSource + // CSI (Container Storage Interface) represents storage that handled by an external CSI driver (Beta feature). + // +optional + CSI *CSIPersistentVolumeSource +} + +type PersistentVolumeClaimVolumeSource struct { + // ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume + ClaimName string + // Optional: Defaults to false (read/write). ReadOnly here + // will force the ReadOnly setting in VolumeMounts + // +optional + ReadOnly bool +} + +const ( + // BetaStorageClassAnnotation represents the beta/previous StorageClass annotation. + // It's deprecated and will be removed in a future release. (#51440) + BetaStorageClassAnnotation = "volume.beta.kubernetes.io/storage-class" + + // MountOptionAnnotation defines mount option annotation used in PVs + MountOptionAnnotation = "volume.beta.kubernetes.io/mount-options" + + // AlphaStorageNodeAffinityAnnotation defines node affinity policies for a PersistentVolume. + // Value is a string of the json representation of type NodeAffinity + AlphaStorageNodeAffinityAnnotation = "volume.alpha.kubernetes.io/node-affinity" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type PersistentVolume struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + //Spec defines a persistent volume owned by the cluster + // +optional + Spec PersistentVolumeSpec + + // Status represents the current information about persistent volume. + // +optional + Status PersistentVolumeStatus +} + +type PersistentVolumeSpec struct { + // Resources represents the actual resources of the volume + Capacity ResourceList + // Source represents the location and type of a volume to mount. + PersistentVolumeSource + // AccessModes contains all ways the volume can be mounted + // +optional + AccessModes []PersistentVolumeAccessMode + // ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. + // ClaimRef is expected to be non-nil when bound. + // claim.VolumeName is the authoritative bind between PV and PVC. + // When set to non-nil value, PVC.Spec.Selector of the referenced PVC is + // ignored, i.e. labels of this PV do not need to match PVC selector. + // +optional + ClaimRef *ObjectReference + // Optional: what happens to a persistent volume when released from its claim. + // +optional + PersistentVolumeReclaimPolicy PersistentVolumeReclaimPolicy + // Name of StorageClass to which this persistent volume belongs. Empty value + // means that this volume does not belong to any StorageClass. + // +optional + StorageClassName string + // A list of mount options, e.g. ["ro", "soft"]. Not validated - mount will + // simply fail if one is invalid. + // +optional + MountOptions []string + // volumeMode defines if a volume is intended to be used with a formatted filesystem + // or to remain in raw block state. Value of Filesystem is implied when not included in spec. + // This is an alpha feature and may change in the future. + // +optional + VolumeMode *PersistentVolumeMode + // NodeAffinity defines constraints that limit what nodes this volume can be accessed from. + // This field influences the scheduling of pods that use this volume. + // +optional + NodeAffinity *VolumeNodeAffinity +} + +// VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from. +type VolumeNodeAffinity struct { + // Required specifies hard node constraints that must be met. + Required *NodeSelector +} + +// PersistentVolumeReclaimPolicy describes a policy for end-of-life maintenance of persistent volumes +type PersistentVolumeReclaimPolicy string + +const ( + // PersistentVolumeReclaimRecycle means the volume will be recycled back into the pool of unbound persistent volumes on release from its claim. + // The volume plugin must support Recycling. + // DEPRECATED: The PersistentVolumeReclaimRecycle called Recycle is being deprecated. See announcement here: https://groups.google.com/forum/#!topic/kubernetes-dev/uexugCza84I + PersistentVolumeReclaimRecycle PersistentVolumeReclaimPolicy = "Recycle" + // PersistentVolumeReclaimDelete means the volume will be deleted from Kubernetes on release from its claim. + // The volume plugin must support Deletion. + PersistentVolumeReclaimDelete PersistentVolumeReclaimPolicy = "Delete" + // PersistentVolumeReclaimRetain means the volume will be left in its current phase (Released) for manual reclamation by the administrator. + // The default policy is Retain. + PersistentVolumeReclaimRetain PersistentVolumeReclaimPolicy = "Retain" +) + +// PersistentVolumeMode describes how a volume is intended to be consumed, either Block or Filesystem. +type PersistentVolumeMode string + +const ( + // PersistentVolumeBlock means the volume will not be formatted with a filesystem and will remain a raw block device. + PersistentVolumeBlock PersistentVolumeMode = "Block" + // PersistentVolumeFilesystem means the volume will be or is formatted with a filesystem. + PersistentVolumeFilesystem PersistentVolumeMode = "Filesystem" +) + +type PersistentVolumeStatus struct { + // Phase indicates if a volume is available, bound to a claim, or released by a claim + // +optional + Phase PersistentVolumePhase + // A human-readable message indicating details about why the volume is in this state. + // +optional + Message string + // Reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI + // +optional + Reason string +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type PersistentVolumeList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + Items []PersistentVolume +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PersistentVolumeClaim is a user's request for and claim to a persistent volume +type PersistentVolumeClaim struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the volume requested by a pod author + // +optional + Spec PersistentVolumeClaimSpec + + // Status represents the current information about a claim + // +optional + Status PersistentVolumeClaimStatus +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type PersistentVolumeClaimList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + Items []PersistentVolumeClaim +} + +// PersistentVolumeClaimSpec describes the common attributes of storage devices +// and allows a Source for provider-specific attributes +type PersistentVolumeClaimSpec struct { + // Contains the types of access modes required + // +optional + AccessModes []PersistentVolumeAccessMode + // A label query over volumes to consider for binding. This selector is + // ignored when VolumeName is set + // +optional + Selector *metav1.LabelSelector + // Resources represents the minimum resources required + // +optional + Resources ResourceRequirements + // VolumeName is the binding reference to the PersistentVolume backing this + // claim. When set to non-empty value Selector is not evaluated + // +optional + VolumeName string + // Name of the StorageClass required by the claim. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#class-1 + // +optional + StorageClassName *string + // volumeMode defines what type of volume is required by the claim. + // Value of Filesystem is implied when not included in claim spec. + // This is an alpha feature and may change in the future. + // +optional + VolumeMode *PersistentVolumeMode +} + +type PersistentVolumeClaimConditionType string + +// These are valid conditions of Pvc +const ( + // An user trigger resize of pvc has been started + PersistentVolumeClaimResizing PersistentVolumeClaimConditionType = "Resizing" + // PersistentVolumeClaimFileSystemResizePending - controller resize is finished and a file system resize is pending on node + PersistentVolumeClaimFileSystemResizePending PersistentVolumeClaimConditionType = "FileSystemResizePending" +) + +type PersistentVolumeClaimCondition struct { + Type PersistentVolumeClaimConditionType + Status ConditionStatus + // +optional + LastProbeTime metav1.Time + // +optional + LastTransitionTime metav1.Time + // +optional + Reason string + // +optional + Message string +} + +type PersistentVolumeClaimStatus struct { + // Phase represents the current phase of PersistentVolumeClaim + // +optional + Phase PersistentVolumeClaimPhase + // AccessModes contains all ways the volume backing the PVC can be mounted + // +optional + AccessModes []PersistentVolumeAccessMode + // Represents the actual resources of the underlying volume + // +optional + Capacity ResourceList + // +optional + Conditions []PersistentVolumeClaimCondition +} + +type PersistentVolumeAccessMode string + +const ( + // can be mounted read/write mode to exactly 1 host + ReadWriteOnce PersistentVolumeAccessMode = "ReadWriteOnce" + // can be mounted in read-only mode to many hosts + ReadOnlyMany PersistentVolumeAccessMode = "ReadOnlyMany" + // can be mounted in read/write mode to many hosts + ReadWriteMany PersistentVolumeAccessMode = "ReadWriteMany" +) + +type PersistentVolumePhase string + +const ( + // used for PersistentVolumes that are not available + VolumePending PersistentVolumePhase = "Pending" + // used for PersistentVolumes that are not yet bound + // Available volumes are held by the binder and matched to PersistentVolumeClaims + VolumeAvailable PersistentVolumePhase = "Available" + // used for PersistentVolumes that are bound + VolumeBound PersistentVolumePhase = "Bound" + // used for PersistentVolumes where the bound PersistentVolumeClaim was deleted + // released volumes must be recycled before becoming available again + // this phase is used by the persistent volume claim binder to signal to another process to reclaim the resource + VolumeReleased PersistentVolumePhase = "Released" + // used for PersistentVolumes that failed to be correctly recycled or deleted after being released from a claim + VolumeFailed PersistentVolumePhase = "Failed" +) + +type PersistentVolumeClaimPhase string + +const ( + // used for PersistentVolumeClaims that are not yet bound + ClaimPending PersistentVolumeClaimPhase = "Pending" + // used for PersistentVolumeClaims that are bound + ClaimBound PersistentVolumeClaimPhase = "Bound" + // used for PersistentVolumeClaims that lost their underlying + // PersistentVolume. The claim was bound to a PersistentVolume and this + // volume does not exist any longer and all data on it was lost. + ClaimLost PersistentVolumeClaimPhase = "Lost" +) + +type HostPathType string + +const ( + // For backwards compatible, leave it empty if unset + HostPathUnset HostPathType = "" + // If nothing exists at the given path, an empty directory will be created there + // as needed with file mode 0755, having the same group and ownership with Kubelet. + HostPathDirectoryOrCreate HostPathType = "DirectoryOrCreate" + // A directory must exist at the given path + HostPathDirectory HostPathType = "Directory" + // If nothing exists at the given path, an empty file will be created there + // as needed with file mode 0644, having the same group and ownership with Kubelet. + HostPathFileOrCreate HostPathType = "FileOrCreate" + // A file must exist at the given path + HostPathFile HostPathType = "File" + // A UNIX socket must exist at the given path + HostPathSocket HostPathType = "Socket" + // A character device must exist at the given path + HostPathCharDev HostPathType = "CharDevice" + // A block device must exist at the given path + HostPathBlockDev HostPathType = "BlockDevice" +) + +// Represents a host path mapped into a pod. +// Host path volumes do not support ownership management or SELinux relabeling. +type HostPathVolumeSource struct { + // If the path is a symlink, it will follow the link to the real path. + Path string + // Defaults to "" + Type *HostPathType +} + +// Represents an empty directory for a pod. +// Empty directory volumes support ownership management and SELinux relabeling. +type EmptyDirVolumeSource struct { + // TODO: Longer term we want to represent the selection of underlying + // media more like a scheduling problem - user says what traits they + // need, we give them a backing store that satisfies that. For now + // this will cover the most common needs. + // Optional: what type of storage medium should back this directory. + // The default is "" which means to use the node's default medium. + // +optional + Medium StorageMedium + // Total amount of local storage required for this EmptyDir volume. + // The size limit is also applicable for memory medium. + // The maximum usage on memory medium EmptyDir would be the minimum value between + // the SizeLimit specified here and the sum of memory limits of all containers in a pod. + // The default is nil which means that the limit is undefined. + // More info: http://kubernetes.io/docs/user-guide/volumes#emptydir + // +optional + SizeLimit *resource.Quantity +} + +// StorageMedium defines ways that storage can be allocated to a volume. +type StorageMedium string + +const ( + StorageMediumDefault StorageMedium = "" // use whatever the default is for the node + StorageMediumMemory StorageMedium = "Memory" // use memory (tmpfs) + StorageMediumHugePages StorageMedium = "HugePages" // use hugepages +) + +// Protocol defines network protocols supported for things like container ports. +type Protocol string + +const ( + // ProtocolTCP is the TCP protocol. + ProtocolTCP Protocol = "TCP" + // ProtocolUDP is the UDP protocol. + ProtocolUDP Protocol = "UDP" +) + +// Represents a Persistent Disk resource in Google Compute Engine. +// +// A GCE PD must exist before mounting to a container. The disk must +// also be in the same GCE project and zone as the kubelet. A GCE PD +// can only be mounted as read/write once or read-only many times. GCE +// PDs support ownership management and SELinux relabeling. +type GCEPersistentDiskVolumeSource struct { + // Unique name of the PD resource. Used to identify the disk in GCE + PDName string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string + // Optional: Partition on the disk to mount. + // If omitted, kubelet will attempt to mount the device name. + // Ex. For /dev/sda1, this field is "1", for /dev/sda, this field is 0 or empty. + // +optional + Partition int32 + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// Represents an ISCSI disk. +// ISCSI volumes can only be mounted as read/write once. +// ISCSI volumes support ownership management and SELinux relabeling. +type ISCSIVolumeSource struct { + // Required: iSCSI target portal + // the portal is either an IP or ip_addr:port if port is other than default (typically TCP ports 860 and 3260) + // +optional + TargetPortal string + // Required: target iSCSI Qualified Name + // +optional + IQN string + // Required: iSCSI target lun number + // +optional + Lun int32 + // Optional: Defaults to 'default' (tcp). iSCSI interface name that uses an iSCSI transport. + // +optional + ISCSIInterface string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool + // Optional: list of iSCSI target portal ips for high availability. + // the portal is either an IP or ip_addr:port if port is other than default (typically TCP ports 860 and 3260) + // +optional + Portals []string + // Optional: whether support iSCSI Discovery CHAP authentication + // +optional + DiscoveryCHAPAuth bool + // Optional: whether support iSCSI Session CHAP authentication + // +optional + SessionCHAPAuth bool + // Optional: CHAP secret for iSCSI target and initiator authentication. + // The secret is used if either DiscoveryCHAPAuth or SessionCHAPAuth is true + // +optional + SecretRef *LocalObjectReference + // Optional: Custom initiator name per volume. + // If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + // : will be created for the connection. + // +optional + InitiatorName *string +} + +// ISCSIPersistentVolumeSource represents an ISCSI disk. +// ISCSI volumes can only be mounted as read/write once. +// ISCSI volumes support ownership management and SELinux relabeling. +type ISCSIPersistentVolumeSource struct { + // Required: iSCSI target portal + // the portal is either an IP or ip_addr:port if port is other than default (typically TCP ports 860 and 3260) + // +optional + TargetPortal string + // Required: target iSCSI Qualified Name + // +optional + IQN string + // Required: iSCSI target lun number + // +optional + Lun int32 + // Optional: Defaults to 'default' (tcp). iSCSI interface name that uses an iSCSI transport. + // +optional + ISCSIInterface string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool + // Optional: list of iSCSI target portal ips for high availability. + // the portal is either an IP or ip_addr:port if port is other than default (typically TCP ports 860 and 3260) + // +optional + Portals []string + // Optional: whether support iSCSI Discovery CHAP authentication + // +optional + DiscoveryCHAPAuth bool + // Optional: whether support iSCSI Session CHAP authentication + // +optional + SessionCHAPAuth bool + // Optional: CHAP secret for iSCSI target and initiator authentication. + // The secret is used if either DiscoveryCHAPAuth or SessionCHAPAuth is true + // +optional + SecretRef *SecretReference + // Optional: Custom initiator name per volume. + // If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + // : will be created for the connection. + // +optional + InitiatorName *string +} + +// Represents a Fibre Channel volume. +// Fibre Channel volumes can only be mounted as read/write once. +// Fibre Channel volumes support ownership management and SELinux relabeling. +type FCVolumeSource struct { + // Optional: FC target worldwide names (WWNs) + // +optional + TargetWWNs []string + // Optional: FC target lun number + // +optional + Lun *int32 + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool + // Optional: FC volume World Wide Identifiers (WWIDs) + // Either WWIDs or TargetWWNs and Lun must be set, but not both simultaneously. + // +optional + WWIDs []string +} + +// FlexPersistentVolumeSource represents a generic persistent volume resource that is +// provisioned/attached using an exec based plugin. +type FlexPersistentVolumeSource struct { + // Driver is the name of the driver to use for this volume. + Driver string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + // +optional + FSType string + // Optional: SecretRef is reference to the secret object containing + // sensitive information to pass to the plugin scripts. This may be + // empty if no secret object is specified. If the secret object + // contains more than one secret, all secrets are passed to the plugin + // scripts. + // +optional + SecretRef *SecretReference + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool + // Optional: Extra driver options if any. + // +optional + Options map[string]string +} + +// FlexVolume represents a generic volume resource that is +// provisioned/attached using an exec based plugin. +type FlexVolumeSource struct { + // Driver is the name of the driver to use for this volume. + Driver string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + // +optional + FSType string + // Optional: SecretRef is reference to the secret object containing + // sensitive information to pass to the plugin scripts. This may be + // empty if no secret object is specified. If the secret object + // contains more than one secret, all secrets are passed to the plugin + // scripts. + // +optional + SecretRef *LocalObjectReference + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool + // Optional: Extra driver options if any. + // +optional + Options map[string]string +} + +// Represents a Persistent Disk resource in AWS. +// +// An AWS EBS disk must exist before mounting to a container. The disk +// must also be in the same AWS zone as the kubelet. An AWS EBS disk +// can only be mounted as read/write once. AWS EBS volumes support +// ownership management and SELinux relabeling. +type AWSElasticBlockStoreVolumeSource struct { + // Unique id of the persistent disk resource. Used to identify the disk in AWS + VolumeID string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string + // Optional: Partition on the disk to mount. + // If omitted, kubelet will attempt to mount the device name. + // Ex. For /dev/sda1, this field is "1", for /dev/sda, this field is 0 or empty. + // +optional + Partition int32 + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// Represents a volume that is populated with the contents of a git repository. +// Git repo volumes do not support ownership management. +// Git repo volumes support SELinux relabeling. +type GitRepoVolumeSource struct { + // Repository URL + Repository string + // Commit hash, this is optional + // +optional + Revision string + // Clone target, this is optional + // Must not contain or start with '..'. If '.' is supplied, the volume directory will be the + // git repository. Otherwise, if specified, the volume will contain the git repository in + // the subdirectory with the given name. + // +optional + Directory string + // TODO: Consider credentials here. +} + +// Adapts a Secret into a volume. +// +// The contents of the target Secret's Data field will be presented in a volume +// as files using the keys in the Data field as the file names. +// Secret volumes support ownership management and SELinux relabeling. +type SecretVolumeSource struct { + // Name of the secret in the pod's namespace to use. + // +optional + SecretName string + // If unspecified, each key-value pair in the Data field of the referenced + // Secret will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the Secret, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + Items []KeyToPath + // Mode bits to use on created files by default. Must be a value between + // 0 and 0777. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + DefaultMode *int32 + // Specify whether the Secret or its key must be defined + // +optional + Optional *bool +} + +// Adapts a secret into a projected volume. +// +// The contents of the target Secret's Data field will be presented in a +// projected volume as files using the keys in the Data field as the file names. +// Note that this is identical to a secret volume source without the default +// mode. +type SecretProjection struct { + LocalObjectReference + // If unspecified, each key-value pair in the Data field of the referenced + // Secret will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the Secret, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + Items []KeyToPath + // Specify whether the Secret or its key must be defined + // +optional + Optional *bool +} + +// Represents an NFS mount that lasts the lifetime of a pod. +// NFS volumes do not support ownership management or SELinux relabeling. +type NFSVolumeSource struct { + // Server is the hostname or IP address of the NFS server + Server string + + // Path is the exported NFS share + Path string + + // Optional: Defaults to false (read/write). ReadOnly here will force + // the NFS export to be mounted with read-only permissions + // +optional + ReadOnly bool +} + +// Represents a Quobyte mount that lasts the lifetime of a pod. +// Quobyte volumes do not support ownership management or SELinux relabeling. +type QuobyteVolumeSource struct { + // Registry represents a single or multiple Quobyte Registry services + // specified as a string as host:port pair (multiple entries are separated with commas) + // which acts as the central registry for volumes + Registry string + + // Volume is a string that references an already created Quobyte volume by name. + Volume string + + // Defaults to false (read/write). ReadOnly here will force + // the Quobyte to be mounted with read-only permissions + // +optional + ReadOnly bool + + // User to map volume access to + // Defaults to the root user + // +optional + User string + + // Group to map volume access to + // Default is no group + // +optional + Group string +} + +// Represents a Glusterfs mount that lasts the lifetime of a pod. +// Glusterfs volumes do not support ownership management or SELinux relabeling. +type GlusterfsVolumeSource struct { + // Required: EndpointsName is the endpoint name that details Glusterfs topology + EndpointsName string + + // Required: Path is the Glusterfs volume path + Path string + + // Optional: Defaults to false (read/write). ReadOnly here will force + // the Glusterfs to be mounted with read-only permissions + // +optional + ReadOnly bool +} + +// Represents a Rados Block Device mount that lasts the lifetime of a pod. +// RBD volumes support ownership management and SELinux relabeling. +type RBDVolumeSource struct { + // Required: CephMonitors is a collection of Ceph monitors + CephMonitors []string + // Required: RBDImage is the rados image name + RBDImage string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string + // Optional: RadosPool is the rados pool name,default is rbd + // +optional + RBDPool string + // Optional: RBDUser is the rados user name, default is admin + // +optional + RadosUser string + // Optional: Keyring is the path to key ring for RBDUser, default is /etc/ceph/keyring + // +optional + Keyring string + // Optional: SecretRef is name of the authentication secret for RBDUser, default is nil. + // +optional + SecretRef *LocalObjectReference + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// Represents a Rados Block Device mount that lasts the lifetime of a pod. +// RBD volumes support ownership management and SELinux relabeling. +type RBDPersistentVolumeSource struct { + // Required: CephMonitors is a collection of Ceph monitors + CephMonitors []string + // Required: RBDImage is the rados image name + RBDImage string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string + // Optional: RadosPool is the rados pool name,default is rbd + // +optional + RBDPool string + // Optional: RBDUser is the rados user name, default is admin + // +optional + RadosUser string + // Optional: Keyring is the path to key ring for RBDUser, default is /etc/ceph/keyring + // +optional + Keyring string + // Optional: SecretRef is reference to the authentication secret for User, default is empty. + // +optional + SecretRef *SecretReference + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// Represents a cinder volume resource in Openstack. A Cinder volume +// must exist before mounting to a container. The volume must also be +// in the same region as the kubelet. Cinder volumes support ownership +// management and SELinux relabeling. +type CinderVolumeSource struct { + // Unique id of the volume used to identify the cinder volume + VolumeID string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// Represents a Ceph Filesystem mount that lasts the lifetime of a pod +// Cephfs volumes do not support ownership management or SELinux relabeling. +type CephFSVolumeSource struct { + // Required: Monitors is a collection of Ceph monitors + Monitors []string + // Optional: Used as the mounted root, rather than the full Ceph tree, default is / + // +optional + Path string + // Optional: User is the rados user name, default is admin + // +optional + User string + // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + // +optional + SecretFile string + // Optional: SecretRef is reference to the authentication secret for User, default is empty. + // +optional + SecretRef *LocalObjectReference + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// SecretReference represents a Secret Reference. It has enough information to retrieve secret +// in any namespace +type SecretReference struct { + // Name is unique within a namespace to reference a secret resource. + // +optional + Name string + // Namespace defines the space within which the secret name must be unique. + // +optional + Namespace string +} + +// Represents a Ceph Filesystem mount that lasts the lifetime of a pod +// Cephfs volumes do not support ownership management or SELinux relabeling. +type CephFSPersistentVolumeSource struct { + // Required: Monitors is a collection of Ceph monitors + Monitors []string + // Optional: Used as the mounted root, rather than the full Ceph tree, default is / + // +optional + Path string + // Optional: User is the rados user name, default is admin + // +optional + User string + // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + // +optional + SecretFile string + // Optional: SecretRef is reference to the authentication secret for User, default is empty. + // +optional + SecretRef *SecretReference + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// Represents a Flocker volume mounted by the Flocker agent. +// One and only one of datasetName and datasetUUID should be set. +// Flocker volumes do not support ownership management or SELinux relabeling. +type FlockerVolumeSource struct { + // Name of the dataset stored as metadata -> name on the dataset for Flocker + // should be considered as deprecated + // +optional + DatasetName string + // UUID of the dataset. This is unique identifier of a Flocker dataset + // +optional + DatasetUUID string +} + +// Represents a volume containing downward API info. +// Downward API volumes support ownership management and SELinux relabeling. +type DownwardAPIVolumeSource struct { + // Items is a list of DownwardAPIVolume file + // +optional + Items []DownwardAPIVolumeFile + // Mode bits to use on created files by default. Must be a value between + // 0 and 0777. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + DefaultMode *int32 +} + +// Represents a single file containing information from the downward API +type DownwardAPIVolumeFile struct { + // Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..' + Path string + // Required: Selects a field of the pod: only annotations, labels, name, namespace and uid are supported. + // +optional + FieldRef *ObjectFieldSelector + // Selects a resource of the container: only resources limits and requests + // (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + // +optional + ResourceFieldRef *ResourceFieldSelector + // Optional: mode bits to use on this file, must be a value between 0 + // and 0777. If not specified, the volume defaultMode will be used. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + Mode *int32 +} + +// Represents downward API info for projecting into a projected volume. +// Note that this is identical to a downwardAPI volume source without the default +// mode. +type DownwardAPIProjection struct { + // Items is a list of DownwardAPIVolume file + // +optional + Items []DownwardAPIVolumeFile +} + +// AzureFile represents an Azure File Service mount on the host and bind mount to the pod. +type AzureFileVolumeSource struct { + // the name of secret that contains Azure Storage Account Name and Key + SecretName string + // Share Name + ShareName string + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// AzureFile represents an Azure File Service mount on the host and bind mount to the pod. +type AzureFilePersistentVolumeSource struct { + // the name of secret that contains Azure Storage Account Name and Key + SecretName string + // Share Name + ShareName string + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool + // the namespace of the secret that contains Azure Storage Account Name and Key + // default is the same as the Pod + // +optional + SecretNamespace *string +} + +// Represents a vSphere volume resource. +type VsphereVirtualDiskVolumeSource struct { + // Path that identifies vSphere volume vmdk + VolumePath string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string + // Storage Policy Based Management (SPBM) profile name. + // +optional + StoragePolicyName string + // Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName. + // +optional + StoragePolicyID string +} + +// Represents a Photon Controller persistent disk resource. +type PhotonPersistentDiskVolumeSource struct { + // ID that identifies Photon Controller persistent disk + PdID string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + FSType string +} + +// PortworxVolumeSource represents a Portworx volume resource. +type PortworxVolumeSource struct { + // VolumeID uniquely identifies a Portworx volume + VolumeID string + // FSType represents the filesystem type to mount + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +type AzureDataDiskCachingMode string +type AzureDataDiskKind string + +const ( + AzureDataDiskCachingNone AzureDataDiskCachingMode = "None" + AzureDataDiskCachingReadOnly AzureDataDiskCachingMode = "ReadOnly" + AzureDataDiskCachingReadWrite AzureDataDiskCachingMode = "ReadWrite" + + AzureSharedBlobDisk AzureDataDiskKind = "Shared" + AzureDedicatedBlobDisk AzureDataDiskKind = "Dedicated" + AzureManagedDisk AzureDataDiskKind = "Managed" +) + +// AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. +type AzureDiskVolumeSource struct { + // The Name of the data disk in the blob storage + DiskName string + // The URI of the data disk in the blob storage + DataDiskURI string + // Host Caching mode: None, Read Only, Read Write. + // +optional + CachingMode *AzureDataDiskCachingMode + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType *string + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly *bool + // Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared + Kind *AzureDataDiskKind +} + +// ScaleIOVolumeSource represents a persistent ScaleIO volume +type ScaleIOVolumeSource struct { + // The host address of the ScaleIO API Gateway. + Gateway string + // The name of the storage system as configured in ScaleIO. + System string + // SecretRef references to the secret for ScaleIO user and other + // sensitive information. If this is not provided, Login operation will fail. + SecretRef *LocalObjectReference + // Flag to enable/disable SSL communication with Gateway, default false + // +optional + SSLEnabled bool + // The name of the ScaleIO Protection Domain for the configured storage. + // +optional + ProtectionDomain string + // The ScaleIO Storage Pool associated with the protection domain. + // +optional + StoragePool string + // Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + // +optional + StorageMode string + // The name of a volume already created in the ScaleIO system + // that is associated with this volume source. + VolumeName string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// ScaleIOPersistentVolumeSource represents a persistent ScaleIO volume that can be defined +// by a an admin via a storage class, for instance. +type ScaleIOPersistentVolumeSource struct { + // The host address of the ScaleIO API Gateway. + Gateway string + // The name of the storage system as configured in ScaleIO. + System string + // SecretRef references to the secret for ScaleIO user and other + // sensitive information. If this is not provided, Login operation will fail. + SecretRef *SecretReference + // Flag to enable/disable SSL communication with Gateway, default false + // +optional + SSLEnabled bool + // The name of the ScaleIO Protection Domain for the configured storage. + // +optional + ProtectionDomain string + // The ScaleIO Storage Pool associated with the protection domain. + // +optional + StoragePool string + // Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + // +optional + StorageMode string + // The name of a volume created in the ScaleIO system + // that is associated with this volume source. + VolumeName string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// Represents a StorageOS persistent volume resource. +type StorageOSVolumeSource struct { + // VolumeName is the human-readable name of the StorageOS volume. Volume + // names are only unique within a namespace. + VolumeName string + // VolumeNamespace specifies the scope of the volume within StorageOS. If no + // namespace is specified then the Pod's namespace will be used. This allows the + // Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + // Set VolumeName to any name to override the default behaviour. + // Set to "default" if you are not using namespaces within StorageOS. + // Namespaces that do not pre-exist within StorageOS will be created. + // +optional + VolumeNamespace string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool + // SecretRef specifies the secret to use for obtaining the StorageOS API + // credentials. If not specified, default values will be attempted. + // +optional + SecretRef *LocalObjectReference +} + +// Represents a StorageOS persistent volume resource. +type StorageOSPersistentVolumeSource struct { + // VolumeName is the human-readable name of the StorageOS volume. Volume + // names are only unique within a namespace. + VolumeName string + // VolumeNamespace specifies the scope of the volume within StorageOS. If no + // namespace is specified then the Pod's namespace will be used. This allows the + // Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + // Set VolumeName to any name to override the default behaviour. + // Set to "default" if you are not using namespaces within StorageOS. + // Namespaces that do not pre-exist within StorageOS will be created. + // +optional + VolumeNamespace string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool + // SecretRef specifies the secret to use for obtaining the StorageOS API + // credentials. If not specified, default values will be attempted. + // +optional + SecretRef *ObjectReference +} + +// Adapts a ConfigMap into a volume. +// +// The contents of the target ConfigMap's Data field will be presented in a +// volume as files using the keys in the Data field as the file names, unless +// the items element is populated with specific mappings of keys to paths. +// ConfigMap volumes support ownership management and SELinux relabeling. +type ConfigMapVolumeSource struct { + LocalObjectReference + // If unspecified, each key-value pair in the Data field of the referenced + // ConfigMap will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the ConfigMap, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + Items []KeyToPath + // Mode bits to use on created files by default. Must be a value between + // 0 and 0777. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + DefaultMode *int32 + // Specify whether the ConfigMap or it's keys must be defined + // +optional + Optional *bool +} + +// Adapts a ConfigMap into a projected volume. +// +// The contents of the target ConfigMap's Data field will be presented in a +// projected volume as files using the keys in the Data field as the file names, +// unless the items element is populated with specific mappings of keys to paths. +// Note that this is identical to a configmap volume source without the default +// mode. +type ConfigMapProjection struct { + LocalObjectReference + // If unspecified, each key-value pair in the Data field of the referenced + // ConfigMap will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the ConfigMap, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + Items []KeyToPath + // Specify whether the ConfigMap or it's keys must be defined + // +optional + Optional *bool +} + +// Represents a projected volume source +type ProjectedVolumeSource struct { + // list of volume projections + Sources []VolumeProjection + // Mode bits to use on created files by default. Must be a value between + // 0 and 0777. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + DefaultMode *int32 +} + +// Projection that may be projected along with other supported volume types +type VolumeProjection struct { + // all types below are the supported types for projection into the same volume + + // information about the secret data to project + Secret *SecretProjection + // information about the downwardAPI data to project + DownwardAPI *DownwardAPIProjection + // information about the configMap data to project + ConfigMap *ConfigMapProjection +} + +// Maps a string key to a path within a volume. +type KeyToPath struct { + // The key to project. + Key string + + // The relative path of the file to map the key to. + // May not be an absolute path. + // May not contain the path element '..'. + // May not start with the string '..'. + Path string + // Optional: mode bits to use on this file, should be a value between 0 + // and 0777. If not specified, the volume defaultMode will be used. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + Mode *int32 +} + +// Local represents directly-attached storage with node affinity +type LocalVolumeSource struct { + // The full path to the volume on the node + // For alpha, this path must be a directory + // Once block as a source is supported, then this path can point to a block device + Path string +} + +// Represents storage that is managed by an external CSI volume driver (Beta feature) +type CSIPersistentVolumeSource struct { + // Driver is the name of the driver to use for this volume. + // Required. + Driver string + + // VolumeHandle is the unique volume name returned by the CSI volume + // plugin’s CreateVolume to refer to the volume on all subsequent calls. + // Required. + VolumeHandle string + + // Optional: The value to pass to ControllerPublishVolumeRequest. + // Defaults to false (read/write). + // +optional + ReadOnly bool + + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string + + // Attributes of the volume to publish. + // +optional + VolumeAttributes map[string]string + + // ControllerPublishSecretRef is a reference to the secret object containing + // sensitive information to pass to the CSI driver to complete the CSI + // ControllerPublishVolume and ControllerUnpublishVolume calls. + // This field is optional, and may be empty if no secret is required. If the + // secret object contains more than one secret, all secrets are passed. + // +optional + ControllerPublishSecretRef *SecretReference + + // NodeStageSecretRef is a reference to the secret object containing sensitive + // information to pass to the CSI driver to complete the CSI NodeStageVolume + // and NodeStageVolume and NodeUnstageVolume calls. + // This field is optional, and may be empty if no secret is required. If the + // secret object contains more than one secret, all secrets are passed. + // +optional + NodeStageSecretRef *SecretReference + + // NodePublishSecretRef is a reference to the secret object containing + // sensitive information to pass to the CSI driver to complete the CSI + // NodePublishVolume and NodeUnpublishVolume calls. + // This field is optional, and may be empty if no secret is required. If the + // secret object contains more than one secret, all secrets are passed. + // +optional + NodePublishSecretRef *SecretReference +} + +// ContainerPort represents a network port in a single container +type ContainerPort struct { + // Optional: If specified, this must be an IANA_SVC_NAME Each named port + // in a pod must have a unique name. + // +optional + Name string + // Optional: If specified, this must be a valid port number, 0 < x < 65536. + // If HostNetwork is specified, this must match ContainerPort. + // +optional + HostPort int32 + // Required: This must be a valid port number, 0 < x < 65536. + ContainerPort int32 + // Required: Supports "TCP" and "UDP". + // +optional + Protocol Protocol + // Optional: What host IP to bind the external port to. + // +optional + HostIP string +} + +// VolumeMount describes a mounting of a Volume within a container. +type VolumeMount struct { + // Required: This must match the Name of a Volume [above]. + Name string + // Optional: Defaults to false (read-write). + // +optional + ReadOnly bool + // Required. If the path is not an absolute path (e.g. some/path) it + // will be prepended with the appropriate root prefix for the operating + // system. On Linux this is '/', on Windows this is 'C:\'. + MountPath string + // Path within the volume from which the container's volume should be mounted. + // Defaults to "" (volume's root). + // +optional + SubPath string + // mountPropagation determines how mounts are propagated from the host + // to container and the other way around. + // When not set, MountPropagationHostToContainer is used. + // This field is beta in 1.10. + // +optional + MountPropagation *MountPropagationMode +} + +// MountPropagationMode describes mount propagation. +type MountPropagationMode string + +const ( + // MountPropagationHostToContainer means that the volume in a container will + // receive new mounts from the host or other containers, but filesystems + // mounted inside the container won't be propagated to the host or other + // containers. + // Note that this mode is recursively applied to all mounts in the volume + // ("rslave" in Linux terminology). + MountPropagationHostToContainer MountPropagationMode = "HostToContainer" + // MountPropagationBidirectional means that the volume in a container will + // receive new mounts from the host or other containers, and its own mounts + // will be propagated from the container to the host or other containers. + // Note that this mode is recursively applied to all mounts in the volume + // ("rshared" in Linux terminology). + MountPropagationBidirectional MountPropagationMode = "Bidirectional" +) + +// VolumeDevice describes a mapping of a raw block device within a container. +type VolumeDevice struct { + // name must match the name of a persistentVolumeClaim in the pod + Name string + // devicePath is the path inside of the container that the device will be mapped to. + DevicePath string +} + +// EnvVar represents an environment variable present in a Container. +type EnvVar struct { + // Required: This must be a C_IDENTIFIER. + Name string + // Optional: no more than one of the following may be specified. + // Optional: Defaults to ""; variable references $(VAR_NAME) are expanded + // using the previous defined environment variables in the container and + // any service environment variables. If a variable cannot be resolved, + // the reference in the input string will be unchanged. The $(VAR_NAME) + // syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped + // references will never be expanded, regardless of whether the variable + // exists or not. + // +optional + Value string + // Optional: Specifies a source the value of this var should come from. + // +optional + ValueFrom *EnvVarSource +} + +// EnvVarSource represents a source for the value of an EnvVar. +// Only one of its fields may be set. +type EnvVarSource struct { + // Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, + // metadata.uid, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP. + // +optional + FieldRef *ObjectFieldSelector + // Selects a resource of the container: only resources limits and requests + // (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + // +optional + ResourceFieldRef *ResourceFieldSelector + // Selects a key of a ConfigMap. + // +optional + ConfigMapKeyRef *ConfigMapKeySelector + // Selects a key of a secret in the pod's namespace. + // +optional + SecretKeyRef *SecretKeySelector +} + +// ObjectFieldSelector selects an APIVersioned field of an object. +type ObjectFieldSelector struct { + // Required: Version of the schema the FieldPath is written in terms of. + // If no value is specified, it will be defaulted to the APIVersion of the + // enclosing object. + APIVersion string + // Required: Path of the field to select in the specified API version + FieldPath string +} + +// ResourceFieldSelector represents container resources (cpu, memory) and their output format +type ResourceFieldSelector struct { + // Container name: required for volumes, optional for env vars + // +optional + ContainerName string + // Required: resource to select + Resource string + // Specifies the output format of the exposed resources, defaults to "1" + // +optional + Divisor resource.Quantity +} + +// Selects a key from a ConfigMap. +type ConfigMapKeySelector struct { + // The ConfigMap to select from. + LocalObjectReference + // The key to select. + Key string + // Specify whether the ConfigMap or it's key must be defined + // +optional + Optional *bool +} + +// SecretKeySelector selects a key of a Secret. +type SecretKeySelector struct { + // The name of the secret in the pod's namespace to select from. + LocalObjectReference + // The key of the secret to select from. Must be a valid secret key. + Key string + // Specify whether the Secret or it's key must be defined + // +optional + Optional *bool +} + +// EnvFromSource represents the source of a set of ConfigMaps +type EnvFromSource struct { + // An optional identifier to prepend to each key in the ConfigMap. + // +optional + Prefix string + // The ConfigMap to select from. + //+optional + ConfigMapRef *ConfigMapEnvSource + // The Secret to select from. + //+optional + SecretRef *SecretEnvSource +} + +// ConfigMapEnvSource selects a ConfigMap to populate the environment +// variables with. +// +// The contents of the target ConfigMap's Data field will represent the +// key-value pairs as environment variables. +type ConfigMapEnvSource struct { + // The ConfigMap to select from. + LocalObjectReference + // Specify whether the ConfigMap must be defined + // +optional + Optional *bool +} + +// SecretEnvSource selects a Secret to populate the environment +// variables with. +// +// The contents of the target Secret's Data field will represent the +// key-value pairs as environment variables. +type SecretEnvSource struct { + // The Secret to select from. + LocalObjectReference + // Specify whether the Secret must be defined + // +optional + Optional *bool +} + +// HTTPHeader describes a custom header to be used in HTTP probes +type HTTPHeader struct { + // The header field name + Name string + // The header field value + Value string +} + +// HTTPGetAction describes an action based on HTTP Get requests. +type HTTPGetAction struct { + // Optional: Path to access on the HTTP server. + // +optional + Path string + // Required: Name or number of the port to access on the container. + // +optional + Port intstr.IntOrString + // Optional: Host name to connect to, defaults to the pod IP. You + // probably want to set "Host" in httpHeaders instead. + // +optional + Host string + // Optional: Scheme to use for connecting to the host, defaults to HTTP. + // +optional + Scheme URIScheme + // Optional: Custom headers to set in the request. HTTP allows repeated headers. + // +optional + HTTPHeaders []HTTPHeader +} + +// URIScheme identifies the scheme used for connection to a host for Get actions +type URIScheme string + +const ( + // URISchemeHTTP means that the scheme used will be http:// + URISchemeHTTP URIScheme = "HTTP" + // URISchemeHTTPS means that the scheme used will be https:// + URISchemeHTTPS URIScheme = "HTTPS" +) + +// TCPSocketAction describes an action based on opening a socket +type TCPSocketAction struct { + // Required: Port to connect to. + // +optional + Port intstr.IntOrString + // Optional: Host name to connect to, defaults to the pod IP. + // +optional + Host string +} + +// ExecAction describes a "run in container" action. +type ExecAction struct { + // Command is the command line to execute inside the container, the working directory for the + // command is root ('/') in the container's filesystem. The command is simply exec'd, it is + // not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + // a shell, you need to explicitly call out to that shell. + // +optional + Command []string +} + +// Probe describes a health check to be performed against a container to determine whether it is +// alive or ready to receive traffic. +type Probe struct { + // The action taken to determine the health of a container + Handler + // Length of time before health checking is activated. In seconds. + // +optional + InitialDelaySeconds int32 + // Length of time before health checking times out. In seconds. + // +optional + TimeoutSeconds int32 + // How often (in seconds) to perform the probe. + // +optional + PeriodSeconds int32 + // Minimum consecutive successes for the probe to be considered successful after having failed. + // Must be 1 for liveness. + // +optional + SuccessThreshold int32 + // Minimum consecutive failures for the probe to be considered failed after having succeeded. + // +optional + FailureThreshold int32 +} + +// PullPolicy describes a policy for if/when to pull a container image +type PullPolicy string + +const ( + // PullAlways means that kubelet always attempts to pull the latest image. Container will fail If the pull fails. + PullAlways PullPolicy = "Always" + // PullNever means that kubelet never pulls an image, but only uses a local image. Container will fail if the image isn't present + PullNever PullPolicy = "Never" + // PullIfNotPresent means that kubelet pulls if the image isn't present on disk. Container will fail if the image isn't present and the pull fails. + PullIfNotPresent PullPolicy = "IfNotPresent" +) + +// TerminationMessagePolicy describes how termination messages are retrieved from a container. +type TerminationMessagePolicy string + +const ( + // TerminationMessageReadFile is the default behavior and will set the container status message to + // the contents of the container's terminationMessagePath when the container exits. + TerminationMessageReadFile TerminationMessagePolicy = "File" + // TerminationMessageFallbackToLogsOnError will read the most recent contents of the container logs + // for the container status message when the container exits with an error and the + // terminationMessagePath has no contents. + TerminationMessageFallbackToLogsOnError TerminationMessagePolicy = "FallbackToLogsOnError" +) + +// Capability represent POSIX capabilities type +type Capability string + +// Capabilities represent POSIX capabilities that can be added or removed to a running container. +type Capabilities struct { + // Added capabilities + // +optional + Add []Capability + // Removed capabilities + // +optional + Drop []Capability +} + +// ResourceRequirements describes the compute resource requirements. +type ResourceRequirements struct { + // Limits describes the maximum amount of compute resources allowed. + // +optional + Limits ResourceList + // Requests describes the minimum amount of compute resources required. + // If Request is omitted for a container, it defaults to Limits if that is explicitly specified, + // otherwise to an implementation-defined value + // +optional + Requests ResourceList +} + +// Container represents a single container that is expected to be run on the host. +type Container struct { + // Required: This must be a DNS_LABEL. Each container in a pod must + // have a unique name. + Name string + // Required. + Image string + // Optional: The docker image's entrypoint is used if this is not provided; cannot be updated. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax + // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, + // regardless of whether the variable exists or not. + // +optional + Command []string + // Optional: The docker image's cmd is used if this is not provided; cannot be updated. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax + // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, + // regardless of whether the variable exists or not. + // +optional + Args []string + // Optional: Defaults to Docker's default. + // +optional + WorkingDir string + // +optional + Ports []ContainerPort + // List of sources to populate environment variables in the container. + // The keys defined within a source must be a C_IDENTIFIER. All invalid keys + // will be reported as an event when the container is starting. When a key exists in multiple + // sources, the value associated with the last source will take precedence. + // Values defined by an Env with a duplicate key will take precedence. + // Cannot be updated. + // +optional + EnvFrom []EnvFromSource + // +optional + Env []EnvVar + // Compute resource requirements. + // +optional + Resources ResourceRequirements + // +optional + VolumeMounts []VolumeMount + // volumeDevices is the list of block devices to be used by the container. + // This is an alpha feature and may change in the future. + // +optional + VolumeDevices []VolumeDevice + // +optional + LivenessProbe *Probe + // +optional + ReadinessProbe *Probe + // +optional + Lifecycle *Lifecycle + // Required. + // +optional + TerminationMessagePath string + // +optional + TerminationMessagePolicy TerminationMessagePolicy + // Required: Policy for pulling images for this container + ImagePullPolicy PullPolicy + // Optional: SecurityContext defines the security options the container should be run with. + // If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + // +optional + SecurityContext *SecurityContext + + // Variables for interactive containers, these have very specialized use-cases (e.g. debugging) + // and shouldn't be used for general purpose containers. + // +optional + Stdin bool + // +optional + StdinOnce bool + // +optional + TTY bool +} + +// Handler defines a specific action that should be taken +// TODO: pass structured data to these actions, and document that data here. +type Handler struct { + // One and only one of the following should be specified. + // Exec specifies the action to take. + // +optional + Exec *ExecAction + // HTTPGet specifies the http request to perform. + // +optional + HTTPGet *HTTPGetAction + // TCPSocket specifies an action involving a TCP port. + // TODO: implement a realistic TCP lifecycle hook + // +optional + TCPSocket *TCPSocketAction +} + +// Lifecycle describes actions that the management system should take in response to container lifecycle +// events. For the PostStart and PreStop lifecycle handlers, management of the container blocks +// until the action is complete, unless the container process fails, in which case the handler is aborted. +type Lifecycle struct { + // PostStart is called immediately after a container is created. If the handler fails, the container + // is terminated and restarted. + // +optional + PostStart *Handler + // PreStop is called immediately before a container is terminated. The reason for termination is + // passed to the handler. Regardless of the outcome of the handler, the container is eventually terminated. + // +optional + PreStop *Handler +} + +// The below types are used by kube_client and api_server. + +type ConditionStatus string + +// These are valid condition statuses. "ConditionTrue" means a resource is in the condition; +// "ConditionFalse" means a resource is not in the condition; "ConditionUnknown" means kubernetes +// can't decide if a resource is in the condition or not. In the future, we could add other +// intermediate conditions, e.g. ConditionDegraded. +const ( + ConditionTrue ConditionStatus = "True" + ConditionFalse ConditionStatus = "False" + ConditionUnknown ConditionStatus = "Unknown" +) + +type ContainerStateWaiting struct { + // A brief CamelCase string indicating details about why the container is in waiting state. + // +optional + Reason string + // A human-readable message indicating details about why the container is in waiting state. + // +optional + Message string +} + +type ContainerStateRunning struct { + // +optional + StartedAt metav1.Time +} + +type ContainerStateTerminated struct { + ExitCode int32 + // +optional + Signal int32 + // +optional + Reason string + // +optional + Message string + // +optional + StartedAt metav1.Time + // +optional + FinishedAt metav1.Time + // +optional + ContainerID string +} + +// ContainerState holds a possible state of container. +// Only one of its members may be specified. +// If none of them is specified, the default one is ContainerStateWaiting. +type ContainerState struct { + // +optional + Waiting *ContainerStateWaiting + // +optional + Running *ContainerStateRunning + // +optional + Terminated *ContainerStateTerminated +} + +type ContainerStatus struct { + // Each container in a pod must have a unique name. + Name string + // +optional + State ContainerState + // +optional + LastTerminationState ContainerState + // Ready specifies whether the container has passed its readiness check. + Ready bool + // Note that this is calculated from dead containers. But those containers are subject to + // garbage collection. This value will get capped at 5 by GC. + RestartCount int32 + Image string + ImageID string + // +optional + ContainerID string +} + +// PodPhase is a label for the condition of a pod at the current time. +type PodPhase string + +// These are the valid statuses of pods. +const ( + // PodPending means the pod has been accepted by the system, but one or more of the containers + // has not been started. This includes time before being bound to a node, as well as time spent + // pulling images onto the host. + PodPending PodPhase = "Pending" + // PodRunning means the pod has been bound to a node and all of the containers have been started. + // At least one container is still running or is in the process of being restarted. + PodRunning PodPhase = "Running" + // PodSucceeded means that all containers in the pod have voluntarily terminated + // with a container exit code of 0, and the system is not going to restart any of these containers. + PodSucceeded PodPhase = "Succeeded" + // PodFailed means that all containers in the pod have terminated, and at least one container has + // terminated in a failure (exited with a non-zero exit code or was stopped by the system). + PodFailed PodPhase = "Failed" + // PodUnknown means that for some reason the state of the pod could not be obtained, typically due + // to an error in communicating with the host of the pod. + PodUnknown PodPhase = "Unknown" +) + +type PodConditionType string + +// These are valid conditions of pod. +const ( + // PodScheduled represents status of the scheduling process for this pod. + PodScheduled PodConditionType = "PodScheduled" + // PodReady means the pod is able to service requests and should be added to the + // load balancing pools of all matching services. + PodReady PodConditionType = "Ready" + // PodInitialized means that all init containers in the pod have started successfully. + PodInitialized PodConditionType = "Initialized" + // PodReasonUnschedulable reason in PodScheduled PodCondition means that the scheduler + // can't schedule the pod right now, for example due to insufficient resources in the cluster. + PodReasonUnschedulable = "Unschedulable" +) + +type PodCondition struct { + Type PodConditionType + Status ConditionStatus + // +optional + LastProbeTime metav1.Time + // +optional + LastTransitionTime metav1.Time + // +optional + Reason string + // +optional + Message string +} + +// RestartPolicy describes how the container should be restarted. +// Only one of the following restart policies may be specified. +// If none of the following policies is specified, the default one +// is RestartPolicyAlways. +type RestartPolicy string + +const ( + RestartPolicyAlways RestartPolicy = "Always" + RestartPolicyOnFailure RestartPolicy = "OnFailure" + RestartPolicyNever RestartPolicy = "Never" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodList is a list of Pods. +type PodList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []Pod +} + +// DNSPolicy defines how a pod's DNS will be configured. +type DNSPolicy string + +const ( + // DNSClusterFirstWithHostNet indicates that the pod should use cluster DNS + // first, if it is available, then fall back on the default + // (as determined by kubelet) DNS settings. + DNSClusterFirstWithHostNet DNSPolicy = "ClusterFirstWithHostNet" + + // DNSClusterFirst indicates that the pod should use cluster DNS + // first unless hostNetwork is true, if it is available, then + // fall back on the default (as determined by kubelet) DNS settings. + DNSClusterFirst DNSPolicy = "ClusterFirst" + + // DNSDefault indicates that the pod should use the default (as + // determined by kubelet) DNS settings. + DNSDefault DNSPolicy = "Default" + + // DNSNone indicates that the pod should use empty DNS settings. DNS + // parameters such as nameservers and search paths should be defined via + // DNSConfig. + DNSNone DNSPolicy = "None" +) + +// A node selector represents the union of the results of one or more label queries +// over a set of nodes; that is, it represents the OR of the selectors represented +// by the node selector terms. +type NodeSelector struct { + //Required. A list of node selector terms. The terms are ORed. + NodeSelectorTerms []NodeSelectorTerm +} + +// A null or empty node selector term matches no objects. +type NodeSelectorTerm struct { + //Required. A list of node selector requirements. The requirements are ANDed. + MatchExpressions []NodeSelectorRequirement +} + +// A node selector requirement is a selector that contains values, a key, and an operator +// that relates the key and values. +type NodeSelectorRequirement struct { + // The label key that the selector applies to. + Key string + // Represents a key's relationship to a set of values. + // Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + Operator NodeSelectorOperator + // An array of string values. If the operator is In or NotIn, + // the values array must be non-empty. If the operator is Exists or DoesNotExist, + // the values array must be empty. If the operator is Gt or Lt, the values + // array must have a single element, which will be interpreted as an integer. + // This array is replaced during a strategic merge patch. + // +optional + Values []string +} + +// A node selector operator is the set of operators that can be used in +// a node selector requirement. +type NodeSelectorOperator string + +const ( + NodeSelectorOpIn NodeSelectorOperator = "In" + NodeSelectorOpNotIn NodeSelectorOperator = "NotIn" + NodeSelectorOpExists NodeSelectorOperator = "Exists" + NodeSelectorOpDoesNotExist NodeSelectorOperator = "DoesNotExist" + NodeSelectorOpGt NodeSelectorOperator = "Gt" + NodeSelectorOpLt NodeSelectorOperator = "Lt" +) + +// Affinity is a group of affinity scheduling rules. +type Affinity struct { + // Describes node affinity scheduling rules for the pod. + // +optional + NodeAffinity *NodeAffinity + // Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + // +optional + PodAffinity *PodAffinity + // Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + // +optional + PodAntiAffinity *PodAntiAffinity +} + +// Pod affinity is a group of inter pod affinity scheduling rules. +type PodAffinity struct { + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system will try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // +optional + // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm + + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system may or may not try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // +optional + RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm + // The scheduler will prefer to schedule pods to nodes that satisfy + // the affinity expressions specified by this field, but it may choose + // a node that violates one or more of the expressions. The node that is + // most preferred is the one with the greatest sum of weights, i.e. + // for each node that meets all of the scheduling requirements (resource + // request, requiredDuringScheduling affinity expressions, etc.), + // compute a sum by iterating through the elements of this field and adding + // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + // node(s) with the highest sum are the most preferred. + // +optional + PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm +} + +// Pod anti affinity is a group of inter pod anti affinity scheduling rules. +type PodAntiAffinity struct { + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // If the anti-affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the anti-affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system will try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // +optional + // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm + + // If the anti-affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the anti-affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system may or may not try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // +optional + RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm + // The scheduler will prefer to schedule pods to nodes that satisfy + // the anti-affinity expressions specified by this field, but it may choose + // a node that violates one or more of the expressions. The node that is + // most preferred is the one with the greatest sum of weights, i.e. + // for each node that meets all of the scheduling requirements (resource + // request, requiredDuringScheduling anti-affinity expressions, etc.), + // compute a sum by iterating through the elements of this field and adding + // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + // node(s) with the highest sum are the most preferred. + // +optional + PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm +} + +// The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) +type WeightedPodAffinityTerm struct { + // weight associated with matching the corresponding podAffinityTerm, + // in the range 1-100. + Weight int32 + // Required. A pod affinity term, associated with the corresponding weight. + PodAffinityTerm PodAffinityTerm +} + +// Defines a set of pods (namely those matching the labelSelector +// relative to the given namespace(s)) that this pod should be +// co-located (affinity) or not co-located (anti-affinity) with, +// where co-located is defined as running on a node whose value of +// the label with key matches that of any node on which +// a pod of the set of pods is running. +type PodAffinityTerm struct { + // A label query over a set of resources, in this case pods. + // +optional + LabelSelector *metav1.LabelSelector + // namespaces specifies which namespaces the labelSelector applies to (matches against); + // null or empty list means "this pod's namespace" + // +optional + Namespaces []string + // This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + // the labelSelector in the specified namespaces, where co-located is defined as running on a node + // whose value of the label with key topologyKey matches that of any node on which any of the + // selected pods is running. + // Empty topologyKey is not allowed. + TopologyKey string +} + +// Node affinity is a group of node affinity scheduling rules. +type NodeAffinity struct { + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to an update), the system + // will try to eventually evict the pod from its node. + // +optional + // RequiredDuringSchedulingRequiredDuringExecution *NodeSelector + + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to an update), the system + // may or may not try to eventually evict the pod from its node. + // +optional + RequiredDuringSchedulingIgnoredDuringExecution *NodeSelector + // The scheduler will prefer to schedule pods to nodes that satisfy + // the affinity expressions specified by this field, but it may choose + // a node that violates one or more of the expressions. The node that is + // most preferred is the one with the greatest sum of weights, i.e. + // for each node that meets all of the scheduling requirements (resource + // request, requiredDuringScheduling affinity expressions, etc.), + // compute a sum by iterating through the elements of this field and adding + // "weight" to the sum if the node matches the corresponding matchExpressions; the + // node(s) with the highest sum are the most preferred. + // +optional + PreferredDuringSchedulingIgnoredDuringExecution []PreferredSchedulingTerm +} + +// An empty preferred scheduling term matches all objects with implicit weight 0 +// (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). +type PreferredSchedulingTerm struct { + // Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + Weight int32 + // A node selector term, associated with the corresponding weight. + Preference NodeSelectorTerm +} + +// The node this Taint is attached to has the "effect" on +// any pod that does not tolerate the Taint. +type Taint struct { + // Required. The taint key to be applied to a node. + Key string + // Required. The taint value corresponding to the taint key. + // +optional + Value string + // Required. The effect of the taint on pods + // that do not tolerate the taint. + // Valid effects are NoSchedule, PreferNoSchedule and NoExecute. + Effect TaintEffect + // TimeAdded represents the time at which the taint was added. + // It is only written for NoExecute taints. + // +optional + TimeAdded *metav1.Time +} + +type TaintEffect string + +const ( + // Do not allow new pods to schedule onto the node unless they tolerate the taint, + // but allow all pods submitted to Kubelet without going through the scheduler + // to start, and allow all already-running pods to continue running. + // Enforced by the scheduler. + TaintEffectNoSchedule TaintEffect = "NoSchedule" + // Like TaintEffectNoSchedule, but the scheduler tries not to schedule + // new pods onto the node, rather than prohibiting new pods from scheduling + // onto the node entirely. Enforced by the scheduler. + TaintEffectPreferNoSchedule TaintEffect = "PreferNoSchedule" + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // Like TaintEffectNoSchedule, but additionally do not allow pods submitted to + // Kubelet without going through the scheduler to start. + // Enforced by Kubelet and the scheduler. + // TaintEffectNoScheduleNoAdmit TaintEffect = "NoScheduleNoAdmit" + + // Evict any already-running pods that do not tolerate the taint. + // Currently enforced by NodeController. + TaintEffectNoExecute TaintEffect = "NoExecute" +) + +// The pod this Toleration is attached to tolerates any taint that matches +// the triple using the matching operator . +type Toleration struct { + // Key is the taint key that the toleration applies to. Empty means match all taint keys. + // If the key is empty, operator must be Exists; this combination means to match all values and all keys. + // +optional + Key string + // Operator represents a key's relationship to the value. + // Valid operators are Exists and Equal. Defaults to Equal. + // Exists is equivalent to wildcard for value, so that a pod can + // tolerate all taints of a particular category. + // +optional + Operator TolerationOperator + // Value is the taint value the toleration matches to. + // If the operator is Exists, the value should be empty, otherwise just a regular string. + // +optional + Value string + // Effect indicates the taint effect to match. Empty means match all taint effects. + // When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + // +optional + Effect TaintEffect + // TolerationSeconds represents the period of time the toleration (which must be + // of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + // it is not set, which means tolerate the taint forever (do not evict). Zero and + // negative values will be treated as 0 (evict immediately) by the system. + // +optional + TolerationSeconds *int64 +} + +// A toleration operator is the set of operators that can be used in a toleration. +type TolerationOperator string + +const ( + TolerationOpExists TolerationOperator = "Exists" + TolerationOpEqual TolerationOperator = "Equal" +) + +// PodSpec is a description of a pod +type PodSpec struct { + Volumes []Volume + // List of initialization containers belonging to the pod. + InitContainers []Container + // List of containers belonging to the pod. + Containers []Container + // +optional + RestartPolicy RestartPolicy + // Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. + // Value must be non-negative integer. The value zero indicates delete immediately. + // If this value is nil, the default grace period will be used instead. + // The grace period is the duration in seconds after the processes running in the pod are sent + // a termination signal and the time when the processes are forcibly halted with a kill signal. + // Set this value longer than the expected cleanup time for your process. + // +optional + TerminationGracePeriodSeconds *int64 + // Optional duration in seconds relative to the StartTime that the pod may be active on a node + // before the system actively tries to terminate the pod; value must be positive integer + // +optional + ActiveDeadlineSeconds *int64 + // Set DNS policy for the pod. + // Defaults to "ClusterFirst". + // Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. + // DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. + // To have DNS options set along with hostNetwork, you have to specify DNS policy + // explicitly to 'ClusterFirstWithHostNet'. + // +optional + DNSPolicy DNSPolicy + // NodeSelector is a selector which must be true for the pod to fit on a node + // +optional + NodeSelector map[string]string + + // ServiceAccountName is the name of the ServiceAccount to use to run this pod + // The pod will be allowed to use secrets referenced by the ServiceAccount + ServiceAccountName string + // AutomountServiceAccountToken indicates whether a service account token should be automatically mounted. + // +optional + AutomountServiceAccountToken *bool + + // NodeName is a request to schedule this pod onto a specific node. If it is non-empty, + // the scheduler simply schedules this pod onto that node, assuming that it fits resource + // requirements. + // +optional + NodeName string + // SecurityContext holds pod-level security attributes and common container settings. + // Optional: Defaults to empty. See type description for default values of each field. + // +optional + SecurityContext *PodSecurityContext + // ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. + // If specified, these secrets will be passed to individual puller implementations for them to use. For example, + // in the case of docker, only DockerConfig type secrets are honored. + // +optional + ImagePullSecrets []LocalObjectReference + // Specifies the hostname of the Pod. + // If not specified, the pod's hostname will be set to a system-defined value. + // +optional + Hostname string + // If specified, the fully qualified Pod hostname will be "...svc.". + // If not specified, the pod will not have a domainname at all. + // +optional + Subdomain string + // If specified, the pod's scheduling constraints + // +optional + Affinity *Affinity + // If specified, the pod will be dispatched by specified scheduler. + // If not specified, the pod will be dispatched by default scheduler. + // +optional + SchedulerName string + // If specified, the pod's tolerations. + // +optional + Tolerations []Toleration + // HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts + // file if specified. This is only valid for non-hostNetwork pods. + // +optional + HostAliases []HostAlias + // If specified, indicates the pod's priority. "system-node-critical" and + // "system-cluster-critical" are two special keywords which indicate the + // highest priorities with the former being the highest priority. Any other + // name must be defined by creating a PriorityClass object with that name. + // If not specified, the pod priority will be default or zero if there is no + // default. + // +optional + PriorityClassName string + // The priority value. Various system components use this field to find the + // priority of the pod. When Priority Admission Controller is enabled, it + // prevents users from setting this field. The admission controller populates + // this field from PriorityClassName. + // The higher the value, the higher the priority. + // +optional + Priority *int32 + // Specifies the DNS parameters of a pod. + // Parameters specified here will be merged to the generated DNS + // configuration based on DNSPolicy. + // +optional + DNSConfig *PodDNSConfig +} + +// HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the +// pod's hosts file. +type HostAlias struct { + IP string + Hostnames []string +} + +// Sysctl defines a kernel parameter to be set +type Sysctl struct { + // Name of a property to set + Name string + // Value of a property to set + Value string +} + +// PodSecurityContext holds pod-level security attributes and common container settings. +// Some fields are also present in container.securityContext. Field values of +// container.securityContext take precedence over field values of PodSecurityContext. +type PodSecurityContext struct { + // Use the host's network namespace. If this option is set, the ports that will be + // used must be specified. + // Optional: Default to false + // +k8s:conversion-gen=false + // +optional + HostNetwork bool + // Use the host's pid namespace. + // Optional: Default to false. + // +k8s:conversion-gen=false + // +optional + HostPID bool + // Use the host's ipc namespace. + // Optional: Default to false. + // +k8s:conversion-gen=false + // +optional + HostIPC bool + // Share a single process namespace between all of the containers in a pod. + // When this is set containers will be able to view and signal processes from other containers + // in the same pod, and the first process in each container will not be assigned PID 1. + // HostPID and ShareProcessNamespace cannot both be set. + // Optional: Default to false. + // This field is alpha-level and is honored only by servers that enable the PodShareProcessNamespace feature. + // +k8s:conversion-gen=false + // +optional + ShareProcessNamespace *bool + // The SELinux context to be applied to all containers. + // If unspecified, the container runtime will allocate a random SELinux context for each + // container. May also be set in SecurityContext. If set in + // both SecurityContext and PodSecurityContext, the value specified in SecurityContext + // takes precedence for that container. + // +optional + SELinuxOptions *SELinuxOptions + // The UID to run the entrypoint of the container process. + // Defaults to user specified in image metadata if unspecified. + // May also be set in SecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence + // for that container. + // +optional + RunAsUser *int64 + // The GID to run the entrypoint of the container process. + // Uses runtime default if unset. + // May also be set in SecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence + // for that container. + // +optional + RunAsGroup *int64 + // Indicates that the container must run as a non-root user. + // If true, the Kubelet will validate the image at runtime to ensure that it + // does not run as UID 0 (root) and fail to start the container if it does. + // If unset or false, no such validation will be performed. + // May also be set in SecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence + // for that container. + // +optional + RunAsNonRoot *bool + // A list of groups applied to the first process run in each container, in addition + // to the container's primary GID. If unspecified, no groups will be added to + // any container. + // +optional + SupplementalGroups []int64 + // A special supplemental group that applies to all containers in a pod. + // Some volume types allow the Kubelet to change the ownership of that volume + // to be owned by the pod: + // + // 1. The owning GID will be the FSGroup + // 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + // 3. The permission bits are OR'd with rw-rw---- + // + // If unset, the Kubelet will not modify the ownership and permissions of any volume. + // +optional + FSGroup *int64 +} + +// PodQOSClass defines the supported qos classes of Pods. +type PodQOSClass string + +const ( + // PodQOSGuaranteed is the Guaranteed qos class. + PodQOSGuaranteed PodQOSClass = "Guaranteed" + // PodQOSBurstable is the Burstable qos class. + PodQOSBurstable PodQOSClass = "Burstable" + // PodQOSBestEffort is the BestEffort qos class. + PodQOSBestEffort PodQOSClass = "BestEffort" +) + +// PodDNSConfig defines the DNS parameters of a pod in addition to +// those generated from DNSPolicy. +type PodDNSConfig struct { + // A list of DNS name server IP addresses. + // This will be appended to the base nameservers generated from DNSPolicy. + // Duplicated nameservers will be removed. + // +optional + Nameservers []string + // A list of DNS search domains for host-name lookup. + // This will be appended to the base search paths generated from DNSPolicy. + // Duplicated search paths will be removed. + // +optional + Searches []string + // A list of DNS resolver options. + // This will be merged with the base options generated from DNSPolicy. + // Duplicated entries will be removed. Resolution options given in Options + // will override those that appear in the base DNSPolicy. + // +optional + Options []PodDNSConfigOption +} + +// PodDNSConfigOption defines DNS resolver options of a pod. +type PodDNSConfigOption struct { + // Required. + Name string + // +optional + Value *string +} + +// PodStatus represents information about the status of a pod. Status may trail the actual +// state of a system. +type PodStatus struct { + // +optional + Phase PodPhase + // +optional + Conditions []PodCondition + // A human readable message indicating details about why the pod is in this state. + // +optional + Message string + // A brief CamelCase message indicating details about why the pod is in this state. e.g. 'Evicted' + // +optional + Reason string + // nominatedNodeName is set when this pod preempts other pods on the node, but it cannot be + // scheduled right away as preemption victims receive their graceful termination periods. + // This field does not guarantee that the pod will be scheduled on this node. Scheduler may decide + // to place the pod elsewhere if other nodes become available sooner. Scheduler may also decide to + // give the resources on this node to a higher priority pod that is created after preemption. + // +optional + NominatedNodeName string + + // +optional + HostIP string + // +optional + PodIP string + + // Date and time at which the object was acknowledged by the Kubelet. + // This is before the Kubelet pulled the container image(s) for the pod. + // +optional + StartTime *metav1.Time + // +optional + QOSClass PodQOSClass + + // The list has one entry per init container in the manifest. The most recent successful + // init container will have ready = true, the most recently started container will have + // startTime set. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-and-container-status + InitContainerStatuses []ContainerStatus + // The list has one entry per container in the manifest. Each entry is + // currently the output of `docker inspect`. This output format is *not* + // final and should not be relied upon. + // TODO: Make real decisions about what our info should look like. Re-enable fuzz test + // when we have done this. + // +optional + ContainerStatuses []ContainerStatus +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded +type PodStatusResult struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + // Status represents the current information about a pod. This data may not be up + // to date. + // +optional + Status PodStatus +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Pod is a collection of containers, used as either input (create, update) or as output (list, get). +type Pod struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the behavior of a pod. + // +optional + Spec PodSpec + + // Status represents the current information about a pod. This data may not be up + // to date. + // +optional + Status PodStatus +} + +// PodTemplateSpec describes the data a pod should have when created from a template +type PodTemplateSpec struct { + // Metadata of the pods created from this template. + // +optional + metav1.ObjectMeta + + // Spec defines the behavior of a pod. + // +optional + Spec PodSpec +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodTemplate describes a template for creating copies of a predefined pod. +type PodTemplate struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Template defines the pods that will be created from this pod template + // +optional + Template PodTemplateSpec +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodTemplateList is a list of PodTemplates. +type PodTemplateList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []PodTemplate +} + +// ReplicationControllerSpec is the specification of a replication controller. +// As the internal representation of a replication controller, it may have either +// a TemplateRef or a Template set. +type ReplicationControllerSpec struct { + // Replicas is the number of desired replicas. + Replicas int32 + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + MinReadySeconds int32 + + // Selector is a label query over pods that should match the Replicas count. + Selector map[string]string + + // TemplateRef is a reference to an object that describes the pod that will be created if + // insufficient replicas are detected. This reference is ignored if a Template is set. + // Must be set before converting to a versioned API object + // +optional + //TemplateRef *ObjectReference + + // Template is the object that describes the pod that will be created if + // insufficient replicas are detected. Internally, this takes precedence over a + // TemplateRef. + // +optional + Template *PodTemplateSpec +} + +// ReplicationControllerStatus represents the current status of a replication +// controller. +type ReplicationControllerStatus struct { + // Replicas is the number of actual replicas. + Replicas int32 + + // The number of pods that have labels matching the labels of the pod template of the replication controller. + // +optional + FullyLabeledReplicas int32 + + // The number of ready replicas for this replication controller. + // +optional + ReadyReplicas int32 + + // The number of available replicas (ready for at least minReadySeconds) for this replication controller. + // +optional + AvailableReplicas int32 + + // ObservedGeneration is the most recent generation observed by the controller. + // +optional + ObservedGeneration int64 + + // Represents the latest available observations of a replication controller's current state. + // +optional + Conditions []ReplicationControllerCondition +} + +type ReplicationControllerConditionType string + +// These are valid conditions of a replication controller. +const ( + // ReplicationControllerReplicaFailure is added in a replication controller when one of its pods + // fails to be created due to insufficient quota, limit ranges, pod security policy, node selectors, + // etc. or deleted due to kubelet being down or finalizers are failing. + ReplicationControllerReplicaFailure ReplicationControllerConditionType = "ReplicaFailure" +) + +// ReplicationControllerCondition describes the state of a replication controller at a certain point. +type ReplicationControllerCondition struct { + // Type of replication controller condition. + Type ReplicationControllerConditionType + // Status of the condition, one of True, False, Unknown. + Status ConditionStatus + // The last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time + // The reason for the condition's last transition. + // +optional + Reason string + // A human readable message indicating details about the transition. + // +optional + Message string +} + +// +genclient +// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/kubernetes/pkg/apis/autoscaling.Scale +// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/kubernetes/pkg/apis/autoscaling.Scale,result=k8s.io/kubernetes/pkg/apis/autoscaling.Scale +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ReplicationController represents the configuration of a replication controller. +type ReplicationController struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the desired behavior of this replication controller. + // +optional + Spec ReplicationControllerSpec + + // Status is the current status of this replication controller. This data may be + // out of date by some window of time. + // +optional + Status ReplicationControllerStatus +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ReplicationControllerList is a collection of replication controllers. +type ReplicationControllerList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []ReplicationController +} + +const ( + // ClusterIPNone - do not assign a cluster IP + // no proxying required and no environment variables should be created for pods + ClusterIPNone = "None" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ServiceList holds a list of services. +type ServiceList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []Service +} + +// Session Affinity Type string +type ServiceAffinity string + +const ( + // ServiceAffinityClientIP is the Client IP based. + ServiceAffinityClientIP ServiceAffinity = "ClientIP" + + // ServiceAffinityNone - no session affinity. + ServiceAffinityNone ServiceAffinity = "None" +) + +const ( + // DefaultClientIPServiceAffinitySeconds is the default timeout seconds + // of Client IP based session affinity - 3 hours. + DefaultClientIPServiceAffinitySeconds int32 = 10800 + // MaxClientIPServiceAffinitySeconds is the max timeout seconds + // of Client IP based session affinity - 1 day. + MaxClientIPServiceAffinitySeconds int32 = 86400 +) + +// SessionAffinityConfig represents the configurations of session affinity. +type SessionAffinityConfig struct { + // clientIP contains the configurations of Client IP based session affinity. + // +optional + ClientIP *ClientIPConfig +} + +// ClientIPConfig represents the configurations of Client IP based session affinity. +type ClientIPConfig struct { + // timeoutSeconds specifies the seconds of ClientIP type session sticky time. + // The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". + // Default value is 10800(for 3 hours). + // +optional + TimeoutSeconds *int32 +} + +// Service Type string describes ingress methods for a service +type ServiceType string + +const ( + // ServiceTypeClusterIP means a service will only be accessible inside the + // cluster, via the ClusterIP. + ServiceTypeClusterIP ServiceType = "ClusterIP" + + // ServiceTypeNodePort means a service will be exposed on one port of + // every node, in addition to 'ClusterIP' type. + ServiceTypeNodePort ServiceType = "NodePort" + + // ServiceTypeLoadBalancer means a service will be exposed via an + // external load balancer (if the cloud provider supports it), in addition + // to 'NodePort' type. + ServiceTypeLoadBalancer ServiceType = "LoadBalancer" + + // ServiceTypeExternalName means a service consists of only a reference to + // an external name that kubedns or equivalent will return as a CNAME + // record, with no exposing or proxying of any pods involved. + ServiceTypeExternalName ServiceType = "ExternalName" +) + +// Service External Traffic Policy Type string +type ServiceExternalTrafficPolicyType string + +const ( + // ServiceExternalTrafficPolicyTypeLocal specifies node-local endpoints behavior. + ServiceExternalTrafficPolicyTypeLocal ServiceExternalTrafficPolicyType = "Local" + // ServiceExternalTrafficPolicyTypeCluster specifies cluster-wide (legacy) behavior. + ServiceExternalTrafficPolicyTypeCluster ServiceExternalTrafficPolicyType = "Cluster" +) + +// ServiceStatus represents the current status of a service +type ServiceStatus struct { + // LoadBalancer contains the current status of the load-balancer, + // if one is present. + // +optional + LoadBalancer LoadBalancerStatus +} + +// LoadBalancerStatus represents the status of a load-balancer +type LoadBalancerStatus struct { + // Ingress is a list containing ingress points for the load-balancer; + // traffic intended for the service should be sent to these ingress points. + // +optional + Ingress []LoadBalancerIngress +} + +// LoadBalancerIngress represents the status of a load-balancer ingress point: +// traffic intended for the service should be sent to an ingress point. +type LoadBalancerIngress struct { + // IP is set for load-balancer ingress points that are IP based + // (typically GCE or OpenStack load-balancers) + // +optional + IP string + + // Hostname is set for load-balancer ingress points that are DNS based + // (typically AWS load-balancers) + // +optional + Hostname string +} + +// ServiceSpec describes the attributes that a user creates on a service +type ServiceSpec struct { + // Type determines how the Service is exposed. Defaults to ClusterIP. Valid + // options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + // "ExternalName" maps to the specified externalName. + // "ClusterIP" allocates a cluster-internal IP address for load-balancing to + // endpoints. Endpoints are determined by the selector or if that is not + // specified, by manual construction of an Endpoints object. If clusterIP is + // "None", no virtual IP is allocated and the endpoints are published as a + // set of endpoints rather than a stable IP. + // "NodePort" builds on ClusterIP and allocates a port on every node which + // routes to the clusterIP. + // "LoadBalancer" builds on NodePort and creates an + // external load-balancer (if supported in the current cloud) which routes + // to the clusterIP. + // More info: https://kubernetes.io/docs/concepts/services-networking/service/ + // +optional + Type ServiceType + + // Required: The list of ports that are exposed by this service. + Ports []ServicePort + + // Route service traffic to pods with label keys and values matching this + // selector. If empty or not present, the service is assumed to have an + // external process managing its endpoints, which Kubernetes will not + // modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. + // Ignored if type is ExternalName. + // More info: https://kubernetes.io/docs/concepts/services-networking/service/ + Selector map[string]string + + // ClusterIP is the IP address of the service and is usually assigned + // randomly by the master. If an address is specified manually and is not in + // use by others, it will be allocated to the service; otherwise, creation + // of the service will fail. This field can not be changed through updates. + // Valid values are "None", empty string (""), or a valid IP address. "None" + // can be specified for headless services when proxying is not required. + // Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if + // type is ExternalName. + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + // +optional + ClusterIP string + + // ExternalName is the external reference that kubedns or equivalent will + // return as a CNAME record for this service. No proxying will be involved. + // Must be a valid RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) + // and requires Type to be ExternalName. + ExternalName string + + // ExternalIPs are used by external load balancers, or can be set by + // users to handle external traffic that arrives at a node. + // +optional + ExternalIPs []string + + // Only applies to Service Type: LoadBalancer + // LoadBalancer will get created with the IP specified in this field. + // This feature depends on whether the underlying cloud-provider supports specifying + // the loadBalancerIP when a load balancer is created. + // This field will be ignored if the cloud-provider does not support the feature. + // +optional + LoadBalancerIP string + + // Optional: Supports "ClientIP" and "None". Used to maintain session affinity. + // +optional + SessionAffinity ServiceAffinity + + // sessionAffinityConfig contains the configurations of session affinity. + // +optional + SessionAffinityConfig *SessionAffinityConfig + + // Optional: If specified and supported by the platform, this will restrict traffic through the cloud-provider + // load-balancer will be restricted to the specified client IPs. This field will be ignored if the + // cloud-provider does not support the feature." + // +optional + LoadBalancerSourceRanges []string + + // externalTrafficPolicy denotes if this Service desires to route external + // traffic to node-local or cluster-wide endpoints. "Local" preserves the + // client source IP and avoids a second hop for LoadBalancer and Nodeport + // type services, but risks potentially imbalanced traffic spreading. + // "Cluster" obscures the client source IP and may cause a second hop to + // another node, but should have good overall load-spreading. + // +optional + ExternalTrafficPolicy ServiceExternalTrafficPolicyType + + // healthCheckNodePort specifies the healthcheck nodePort for the service. + // If not specified, HealthCheckNodePort is created by the service api + // backend with the allocated nodePort. Will use user-specified nodePort value + // if specified by the client. Only effects when Type is set to LoadBalancer + // and ExternalTrafficPolicy is set to Local. + // +optional + HealthCheckNodePort int32 + + // publishNotReadyAddresses, when set to true, indicates that DNS implementations + // must publish the notReadyAddresses of subsets for the Endpoints associated with + // the Service. The default value is false. + // The primary use case for setting this field is to use a StatefulSet's Headless Service + // to propagate SRV records for its Pods without respect to their readiness for purpose + // of peer discovery. + // This field will replace the service.alpha.kubernetes.io/tolerate-unready-endpoints + // when that annotation is deprecated and all clients have been converted to use this + // field. + // +optional + PublishNotReadyAddresses bool +} + +type ServicePort struct { + // Optional if only one ServicePort is defined on this service: The + // name of this port within the service. This must be a DNS_LABEL. + // All ports within a ServiceSpec must have unique names. This maps to + // the 'Name' field in EndpointPort objects. + Name string + + // The IP protocol for this port. Supports "TCP" and "UDP". + Protocol Protocol + + // The port that will be exposed on the service. + Port int32 + + // Optional: The target port on pods selected by this service. If this + // is a string, it will be looked up as a named port in the target + // Pod's container ports. If this is not specified, the value + // of the 'port' field is used (an identity map). + // This field is ignored for services with clusterIP=None, and should be + // omitted or set equal to the 'port' field. + TargetPort intstr.IntOrString + + // The port on each node on which this service is exposed. + // Default is to auto-allocate a port if the ServiceType of this Service requires one. + NodePort int32 +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Service is a named abstraction of software service (for example, mysql) consisting of local port +// (for example 3306) that the proxy listens on, and the selector that determines which pods +// will answer requests sent through the proxy. +type Service struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the behavior of a service. + // +optional + Spec ServiceSpec + + // Status represents the current status of a service. + // +optional + Status ServiceStatus +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ServiceAccount binds together: +// * a name, understood by users, and perhaps by peripheral systems, for an identity +// * a principal that can be authenticated and authorized +// * a set of secrets +type ServiceAccount struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount + Secrets []ObjectReference + + // ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images + // in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets + // can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. + // +optional + ImagePullSecrets []LocalObjectReference + + // AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted. + // Can be overridden at the pod level. + // +optional + AutomountServiceAccountToken *bool +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ServiceAccountList is a list of ServiceAccount objects +type ServiceAccountList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []ServiceAccount +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Endpoints is a collection of endpoints that implement the actual service. Example: +// Name: "mysvc", +// Subsets: [ +// { +// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], +// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] +// }, +// { +// Addresses: [{"ip": "10.10.3.3"}], +// Ports: [{"name": "a", "port": 93}, {"name": "b", "port": 76}] +// }, +// ] +type Endpoints struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // The set of all endpoints is the union of all subsets. + Subsets []EndpointSubset +} + +// EndpointSubset is a group of addresses with a common set of ports. The +// expanded set of endpoints is the Cartesian product of Addresses x Ports. +// For example, given: +// { +// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], +// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] +// } +// The resulting set of endpoints can be viewed as: +// a: [ 10.10.1.1:8675, 10.10.2.2:8675 ], +// b: [ 10.10.1.1:309, 10.10.2.2:309 ] +type EndpointSubset struct { + Addresses []EndpointAddress + NotReadyAddresses []EndpointAddress + Ports []EndpointPort +} + +// EndpointAddress is a tuple that describes single IP address. +type EndpointAddress struct { + // The IP of this endpoint. + // IPv6 is also accepted but not fully supported on all platforms. Also, certain + // kubernetes components, like kube-proxy, are not IPv6 ready. + // TODO: This should allow hostname or IP, see #4447. + IP string + // Optional: Hostname of this endpoint + // Meant to be used by DNS servers etc. + // +optional + Hostname string + // Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node. + // +optional + NodeName *string + // Optional: The kubernetes object related to the entry point. + TargetRef *ObjectReference +} + +// EndpointPort is a tuple that describes a single port. +type EndpointPort struct { + // The name of this port (corresponds to ServicePort.Name). Optional + // if only one port is defined. Must be a DNS_LABEL. + Name string + + // The port number. + Port int32 + + // The IP protocol for this port. + Protocol Protocol +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// EndpointsList is a list of endpoints. +type EndpointsList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []Endpoints +} + +// NodeSpec describes the attributes that a node is created with. +type NodeSpec struct { + // PodCIDR represents the pod IP range assigned to the node + // Note: assigning IP ranges to nodes might need to be revisited when we support migratable IPs. + // +optional + PodCIDR string + + // External ID of the node assigned by some machine database (e.g. a cloud provider) + // +optional + ExternalID string + + // ID of the node assigned by the cloud provider + // Note: format is "://" + // +optional + ProviderID string + + // Unschedulable controls node schedulability of new pods. By default node is schedulable. + // +optional + Unschedulable bool + + // If specified, the node's taints. + // +optional + Taints []Taint + + // If specified, the source to get node configuration from + // The DynamicKubeletConfig feature gate must be enabled for the Kubelet to use this field + // +optional + ConfigSource *NodeConfigSource +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NodeConfigSource specifies a source of node configuration. Exactly one subfield must be non-nil. +type NodeConfigSource struct { + metav1.TypeMeta + ConfigMapRef *ObjectReference +} + +// DaemonEndpoint contains information about a single Daemon endpoint. +type DaemonEndpoint struct { + /* + The port tag was not properly in quotes in earlier releases, so it must be + uppercase for backwards compatibility (since it was falling back to var name of + 'Port'). + */ + + // Port number of the given endpoint. + Port int32 +} + +// NodeDaemonEndpoints lists ports opened by daemons running on the Node. +type NodeDaemonEndpoints struct { + // Endpoint on which Kubelet is listening. + // +optional + KubeletEndpoint DaemonEndpoint +} + +// NodeSystemInfo is a set of ids/uuids to uniquely identify the node. +type NodeSystemInfo struct { + // MachineID reported by the node. For unique machine identification + // in the cluster this field is preferred. Learn more from man(5) + // machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html + MachineID string + // SystemUUID reported by the node. For unique machine identification + // MachineID is preferred. This field is specific to Red Hat hosts + // https://access.redhat.com/documentation/en-US/Red_Hat_Subscription_Management/1/html/RHSM/getting-system-uuid.html + SystemUUID string + // Boot ID reported by the node. + BootID string + // Kernel Version reported by the node. + KernelVersion string + // OS Image reported by the node. + OSImage string + // ContainerRuntime Version reported by the node. + ContainerRuntimeVersion string + // Kubelet Version reported by the node. + KubeletVersion string + // KubeProxy Version reported by the node. + KubeProxyVersion string + // The Operating System reported by the node + OperatingSystem string + // The Architecture reported by the node + Architecture string +} + +// NodeStatus is information about the current status of a node. +type NodeStatus struct { + // Capacity represents the total resources of a node. + // +optional + Capacity ResourceList + // Allocatable represents the resources of a node that are available for scheduling. + // +optional + Allocatable ResourceList + // NodePhase is the current lifecycle phase of the node. + // +optional + Phase NodePhase + // Conditions is an array of current node conditions. + // +optional + Conditions []NodeCondition + // Queried from cloud provider, if available. + // +optional + Addresses []NodeAddress + // Endpoints of daemons running on the Node. + // +optional + DaemonEndpoints NodeDaemonEndpoints + // Set of ids/uuids to uniquely identify the node. + // +optional + NodeInfo NodeSystemInfo + // List of container images on this node + // +optional + Images []ContainerImage + // List of attachable volumes in use (mounted) by the node. + // +optional + VolumesInUse []UniqueVolumeName + // List of volumes that are attached to the node. + // +optional + VolumesAttached []AttachedVolume +} + +type UniqueVolumeName string + +// AttachedVolume describes a volume attached to a node +type AttachedVolume struct { + // Name of the attached volume + Name UniqueVolumeName + + // DevicePath represents the device path where the volume should be available + DevicePath string +} + +// AvoidPods describes pods that should avoid this node. This is the value for a +// Node annotation with key scheduler.alpha.kubernetes.io/preferAvoidPods and +// will eventually become a field of NodeStatus. +type AvoidPods struct { + // Bounded-sized list of signatures of pods that should avoid this node, sorted + // in timestamp order from oldest to newest. Size of the slice is unspecified. + // +optional + PreferAvoidPods []PreferAvoidPodsEntry +} + +// Describes a class of pods that should avoid this node. +type PreferAvoidPodsEntry struct { + // The class of pods. + PodSignature PodSignature + // Time at which this entry was added to the list. + // +optional + EvictionTime metav1.Time + // (brief) reason why this entry was added to the list. + // +optional + Reason string + // Human readable message indicating why this entry was added to the list. + // +optional + Message string +} + +// Describes the class of pods that should avoid this node. +// Exactly one field should be set. +type PodSignature struct { + // Reference to controller whose pods should avoid this node. + // +optional + PodController *metav1.OwnerReference +} + +// Describe a container image +type ContainerImage struct { + // Names by which this image is known. + Names []string + // The size of the image in bytes. + // +optional + SizeBytes int64 +} + +type NodePhase string + +// These are the valid phases of node. +const ( + // NodePending means the node has been created/added by the system, but not configured. + NodePending NodePhase = "Pending" + // NodeRunning means the node has been configured and has Kubernetes components running. + NodeRunning NodePhase = "Running" + // NodeTerminated means the node has been removed from the cluster. + NodeTerminated NodePhase = "Terminated" +) + +type NodeConditionType string + +// These are valid conditions of node. Currently, we don't have enough information to decide +// node condition. In the future, we will add more. The proposed set of conditions are: +// NodeReady, NodeReachable +const ( + // NodeReady means kubelet is healthy and ready to accept pods. + NodeReady NodeConditionType = "Ready" + // NodeOutOfDisk means the kubelet will not accept new pods due to insufficient free disk + // space on the node. + NodeOutOfDisk NodeConditionType = "OutOfDisk" + // NodeMemoryPressure means the kubelet is under pressure due to insufficient available memory. + NodeMemoryPressure NodeConditionType = "MemoryPressure" + // NodeDiskPressure means the kubelet is under pressure due to insufficient available disk. + NodeDiskPressure NodeConditionType = "DiskPressure" + // NodeNetworkUnavailable means that network for the node is not correctly configured. + NodeNetworkUnavailable NodeConditionType = "NetworkUnavailable" + // NodeKubeletConfigOk indicates whether the kubelet is correctly configured + NodeKubeletConfigOk NodeConditionType = "KubeletConfigOk" +) + +type NodeCondition struct { + Type NodeConditionType + Status ConditionStatus + // +optional + LastHeartbeatTime metav1.Time + // +optional + LastTransitionTime metav1.Time + // +optional + Reason string + // +optional + Message string +} + +type NodeAddressType string + +const ( + NodeHostName NodeAddressType = "Hostname" + NodeExternalIP NodeAddressType = "ExternalIP" + NodeInternalIP NodeAddressType = "InternalIP" + NodeExternalDNS NodeAddressType = "ExternalDNS" + NodeInternalDNS NodeAddressType = "InternalDNS" +) + +type NodeAddress struct { + Type NodeAddressType + Address string +} + +// NodeResources is an object for conveying resource information about a node. +// see http://releases.k8s.io/HEAD/docs/design/resources.md for more details. +type NodeResources struct { + // Capacity represents the available resources of a node + // +optional + Capacity ResourceList +} + +// ResourceName is the name identifying various resources in a ResourceList. +type ResourceName string + +// Resource names must be not more than 63 characters, consisting of upper- or lower-case alphanumeric characters, +// with the -, _, and . characters allowed anywhere, except the first or last character. +// The default convention, matching that for annotations, is to use lower-case names, with dashes, rather than +// camel case, separating compound words. +// Fully-qualified resource typenames are constructed from a DNS-style subdomain, followed by a slash `/` and a name. +const ( + // CPU, in cores. (500m = .5 cores) + ResourceCPU ResourceName = "cpu" + // Memory, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + ResourceMemory ResourceName = "memory" + // Volume size, in bytes (e,g. 5Gi = 5GiB = 5 * 1024 * 1024 * 1024) + ResourceStorage ResourceName = "storage" + // Local ephemeral storage, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + // The resource name for ResourceEphemeralStorage is alpha and it can change across releases. + ResourceEphemeralStorage ResourceName = "ephemeral-storage" + // NVIDIA GPU, in devices. Alpha, might change: although fractional and allowing values >1, only one whole device per node is assigned. + ResourceNvidiaGPU ResourceName = "alpha.kubernetes.io/nvidia-gpu" +) + +const ( + // Default namespace prefix. + ResourceDefaultNamespacePrefix = "kubernetes.io/" + // Name prefix for huge page resources (alpha). + ResourceHugePagesPrefix = "hugepages-" +) + +// ResourceList is a set of (resource name, quantity) pairs. +type ResourceList map[ResourceName]resource.Quantity + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Node is a worker node in Kubernetes +// The name of the node according to etcd is in ObjectMeta.Name. +type Node struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the behavior of a node. + // +optional + Spec NodeSpec + + // Status describes the current status of a Node + // +optional + Status NodeStatus +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NodeList is a list of nodes. +type NodeList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []Node +} + +// NamespaceSpec describes the attributes on a Namespace +type NamespaceSpec struct { + // Finalizers is an opaque list of values that must be empty to permanently remove object from storage + Finalizers []FinalizerName +} + +// FinalizerName is the name identifying a finalizer during namespace lifecycle. +type FinalizerName string + +// These are internal finalizer values to Kubernetes, must be qualified name unless defined here or +// in metav1. +const ( + FinalizerKubernetes FinalizerName = "kubernetes" +) + +// NamespaceStatus is information about the current status of a Namespace. +type NamespaceStatus struct { + // Phase is the current lifecycle phase of the namespace. + // +optional + Phase NamespacePhase +} + +type NamespacePhase string + +// These are the valid phases of a namespace. +const ( + // NamespaceActive means the namespace is available for use in the system + NamespaceActive NamespacePhase = "Active" + // NamespaceTerminating means the namespace is undergoing graceful termination + NamespaceTerminating NamespacePhase = "Terminating" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// A namespace provides a scope for Names. +// Use of multiple namespaces is optional +type Namespace struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the behavior of the Namespace. + // +optional + Spec NamespaceSpec + + // Status describes the current status of a Namespace + // +optional + Status NamespaceStatus +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NamespaceList is a list of Namespaces. +type NamespaceList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []Namespace +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Binding ties one object to another; for example, a pod is bound to a node by a scheduler. +// Deprecated in 1.7, please use the bindings subresource of pods instead. +type Binding struct { + metav1.TypeMeta + // ObjectMeta describes the object that is being bound. + // +optional + metav1.ObjectMeta + + // Target is the object to bind to. + Target ObjectReference +} + +// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. +type Preconditions struct { + // Specifies the target UID. + // +optional + UID *types.UID +} + +// DeletionPropagation decides whether and how garbage collection will be performed. +type DeletionPropagation string + +const ( + // Orphans the dependents. + DeletePropagationOrphan DeletionPropagation = "Orphan" + // Deletes the object from the key-value store, the garbage collector will delete the dependents in the background. + DeletePropagationBackground DeletionPropagation = "Background" + // The object exists in the key-value store until the garbage collector deletes all the dependents whose ownerReference.blockOwnerDeletion=true from the key-value store. + // API sever will put the "DeletingDependents" finalizer on the object, and sets its deletionTimestamp. + // This policy is cascading, i.e., the dependents will be deleted with Foreground. + DeletePropagationForeground DeletionPropagation = "Foreground" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DeleteOptions may be provided when deleting an API object +// DEPRECATED: This type has been moved to meta/v1 and will be removed soon. +type DeleteOptions struct { + metav1.TypeMeta + + // Optional duration in seconds before the object should be deleted. Value must be non-negative integer. + // The value zero indicates delete immediately. If this value is nil, the default grace period for the + // specified type will be used. + // +optional + GracePeriodSeconds *int64 + + // Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be + // returned. + // +optional + Preconditions *Preconditions + + // Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. + // Should the dependent objects be orphaned. If true/false, the "orphan" + // finalizer will be added to/removed from the object's finalizers list. + // Either this field or PropagationPolicy may be set, but not both. + // +optional + OrphanDependents *bool + + // Whether and how garbage collection will be performed. + // Either this field or OrphanDependents may be set, but not both. + // The default policy is decided by the existing finalizer set in the + // metadata.finalizers and the resource-specific default policy. + // Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - + // allow the garbage collector to delete the dependents in the background; + // 'Foreground' - a cascading policy that deletes all dependents in the + // foreground. + // +optional + PropagationPolicy *DeletionPropagation +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ListOptions is the query options to a standard REST list call, and has future support for +// watch calls. +// DEPRECATED: This type has been moved to meta/v1 and will be removed soon. +type ListOptions struct { + metav1.TypeMeta + + // A selector based on labels + LabelSelector labels.Selector + // A selector based on fields + FieldSelector fields.Selector + + // If true, partially initialized resources are included in the response. + IncludeUninitialized bool + + // If true, watch for changes to this list + Watch bool + // When specified with a watch call, shows changes that occur after that particular version of a resource. + // Defaults to changes from the beginning of history. + // When specified for list: + // - if unset, then the result is returned from remote storage based on quorum-read flag; + // - if it's 0, then we simply return what we currently have in cache, no guarantee; + // - if set to non zero, then the result is at least as fresh as given rv. + ResourceVersion string + // Timeout for the list/watch call. + TimeoutSeconds *int64 +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodLogOptions is the query options for a Pod's logs REST call +type PodLogOptions struct { + metav1.TypeMeta + + // Container for which to return logs + Container string + // If true, follow the logs for the pod + Follow bool + // If true, return previous terminated container logs + Previous bool + // A relative time in seconds before the current time from which to show logs. If this value + // precedes the time a pod was started, only logs since the pod start will be returned. + // If this value is in the future, no logs will be returned. + // Only one of sinceSeconds or sinceTime may be specified. + SinceSeconds *int64 + // An RFC3339 timestamp from which to show logs. If this value + // precedes the time a pod was started, only logs since the pod start will be returned. + // If this value is in the future, no logs will be returned. + // Only one of sinceSeconds or sinceTime may be specified. + SinceTime *metav1.Time + // If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line + // of log output. + Timestamps bool + // If set, the number of lines from the end of the logs to show. If not specified, + // logs are shown from the creation of the container or sinceSeconds or sinceTime + TailLines *int64 + // If set, the number of bytes to read from the server before terminating the + // log output. This may not display a complete final line of logging, and may return + // slightly more or slightly less than the specified limit. + LimitBytes *int64 +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodAttachOptions is the query options to a Pod's remote attach call +// TODO: merge w/ PodExecOptions below for stdin, stdout, etc +type PodAttachOptions struct { + metav1.TypeMeta + + // Stdin if true indicates that stdin is to be redirected for the attach call + // +optional + Stdin bool + + // Stdout if true indicates that stdout is to be redirected for the attach call + // +optional + Stdout bool + + // Stderr if true indicates that stderr is to be redirected for the attach call + // +optional + Stderr bool + + // TTY if true indicates that a tty will be allocated for the attach call + // +optional + TTY bool + + // Container to attach to. + // +optional + Container string +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodExecOptions is the query options to a Pod's remote exec call +type PodExecOptions struct { + metav1.TypeMeta + + // Stdin if true indicates that stdin is to be redirected for the exec call + Stdin bool + + // Stdout if true indicates that stdout is to be redirected for the exec call + Stdout bool + + // Stderr if true indicates that stderr is to be redirected for the exec call + Stderr bool + + // TTY if true indicates that a tty will be allocated for the exec call + TTY bool + + // Container in which to execute the command. + Container string + + // Command is the remote command to execute; argv array; not executed within a shell. + Command []string +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodPortForwardOptions is the query options to a Pod's port forward call +type PodPortForwardOptions struct { + metav1.TypeMeta + + // The list of ports to forward + // +optional + Ports []int32 +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodProxyOptions is the query options to a Pod's proxy call +type PodProxyOptions struct { + metav1.TypeMeta + + // Path is the URL path to use for the current proxy request + Path string +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NodeProxyOptions is the query options to a Node's proxy call +type NodeProxyOptions struct { + metav1.TypeMeta + + // Path is the URL path to use for the current proxy request + Path string +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ServiceProxyOptions is the query options to a Service's proxy call. +type ServiceProxyOptions struct { + metav1.TypeMeta + + // Path is the part of URLs that include service endpoints, suffixes, + // and parameters to use for the current proxy request to service. + // For example, the whole request URL is + // http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. + // Path is _search?q=user:kimchy. + Path string +} + +// ObjectReference contains enough information to let you inspect or modify the referred object. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type ObjectReference struct { + // +optional + Kind string + // +optional + Namespace string + // +optional + Name string + // +optional + UID types.UID + // +optional + APIVersion string + // +optional + ResourceVersion string + + // Optional. If referring to a piece of an object instead of an entire object, this string + // should contain information to identify the sub-object. For example, if the object + // reference is to a container within a pod, this would take on a value like: + // "spec.containers{name}" (where "name" refers to the name of the container that triggered + // the event) or if no container name is specified "spec.containers[2]" (container with + // index 2 in this pod). This syntax is chosen only to have some well-defined way of + // referencing a part of an object. + // TODO: this design is not final and this field is subject to change in the future. + // +optional + FieldPath string +} + +// LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. +type LocalObjectReference struct { + //TODO: Add other useful fields. apiVersion, kind, uid? + Name string +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type SerializedReference struct { + metav1.TypeMeta + // +optional + Reference ObjectReference +} + +type EventSource struct { + // Component from which the event is generated. + // +optional + Component string + // Node name on which the event is generated. + // +optional + Host string +} + +// Valid values for event types (new types could be added in future) +const ( + // Information only and will not cause any problems + EventTypeNormal string = "Normal" + // These events are to warn that something might go wrong + EventTypeWarning string = "Warning" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Event is a report of an event somewhere in the cluster. +// TODO: Decide whether to store these separately or with the object they apply to. +type Event struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Required. The object that this event is about. Mapped to events.Event.regarding + // +optional + InvolvedObject ObjectReference + + // Optional; this should be a short, machine understandable string that gives the reason + // for this event being generated. For example, if the event is reporting that a container + // can't start, the Reason might be "ImageNotFound". + // TODO: provide exact specification for format. + // +optional + Reason string + + // Optional. A human-readable description of the status of this operation. + // TODO: decide on maximum length. Mapped to events.Event.note + // +optional + Message string + + // Optional. The component reporting this event. Should be a short machine understandable string. + // +optional + Source EventSource + + // The time at which the event was first recorded. (Time of server receipt is in TypeMeta.) + // +optional + FirstTimestamp metav1.Time + + // The time at which the most recent occurrence of this event was recorded. + // +optional + LastTimestamp metav1.Time + + // The number of times this event has occurred. + // +optional + Count int32 + + // Type of this event (Normal, Warning), new types could be added in the future. + // +optional + Type string + + // Time when this Event was first observed. + // +optional + EventTime metav1.MicroTime + + // Data about the Event series this event represents or nil if it's a singleton Event. + // +optional + Series *EventSeries + + // What action was taken/failed regarding to the Regarding object. + // +optional + Action string + + // Optional secondary object for more complex actions. + // +optional + Related *ObjectReference + + // Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`. + // +optional + ReportingController string + + // ID of the controller instance, e.g. `kubelet-xyzf`. + // +optional + ReportingInstance string +} + +type EventSeries struct { + // Number of occurrences in this series up to the last heartbeat time + Count int32 + // Time of the last occurrence observed + LastObservedTime metav1.MicroTime + // State of this Series: Ongoing or Finished + State EventSeriesState +} + +type EventSeriesState string + +const ( + EventSeriesStateOngoing EventSeriesState = "Ongoing" + EventSeriesStateFinished EventSeriesState = "Finished" + EventSeriesStateUnknown EventSeriesState = "Unknown" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// EventList is a list of events. +type EventList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []Event +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// List holds a list of objects, which may not be known by the server. +type List metainternalversion.List + +// A type of object that is limited +type LimitType string + +const ( + // Limit that applies to all pods in a namespace + LimitTypePod LimitType = "Pod" + // Limit that applies to all containers in a namespace + LimitTypeContainer LimitType = "Container" + // Limit that applies to all persistent volume claims in a namespace + LimitTypePersistentVolumeClaim LimitType = "PersistentVolumeClaim" +) + +// LimitRangeItem defines a min/max usage limit for any resource that matches on kind +type LimitRangeItem struct { + // Type of resource that this limit applies to + // +optional + Type LimitType + // Max usage constraints on this kind by resource name + // +optional + Max ResourceList + // Min usage constraints on this kind by resource name + // +optional + Min ResourceList + // Default resource requirement limit value by resource name. + // +optional + Default ResourceList + // DefaultRequest resource requirement request value by resource name. + // +optional + DefaultRequest ResourceList + // MaxLimitRequestRatio represents the max burst value for the named resource + // +optional + MaxLimitRequestRatio ResourceList +} + +// LimitRangeSpec defines a min/max usage limit for resources that match on kind +type LimitRangeSpec struct { + // Limits is the list of LimitRangeItem objects that are enforced + Limits []LimitRangeItem +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// LimitRange sets resource usage limits for each kind of resource in a Namespace +type LimitRange struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the limits enforced + // +optional + Spec LimitRangeSpec +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// LimitRangeList is a list of LimitRange items. +type LimitRangeList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + // Items is a list of LimitRange objects + Items []LimitRange +} + +// The following identify resource constants for Kubernetes object types +const ( + // Pods, number + ResourcePods ResourceName = "pods" + // Services, number + ResourceServices ResourceName = "services" + // ReplicationControllers, number + ResourceReplicationControllers ResourceName = "replicationcontrollers" + // ResourceQuotas, number + ResourceQuotas ResourceName = "resourcequotas" + // ResourceSecrets, number + ResourceSecrets ResourceName = "secrets" + // ResourceConfigMaps, number + ResourceConfigMaps ResourceName = "configmaps" + // ResourcePersistentVolumeClaims, number + ResourcePersistentVolumeClaims ResourceName = "persistentvolumeclaims" + // ResourceServicesNodePorts, number + ResourceServicesNodePorts ResourceName = "services.nodeports" + // ResourceServicesLoadBalancers, number + ResourceServicesLoadBalancers ResourceName = "services.loadbalancers" + // CPU request, in cores. (500m = .5 cores) + ResourceRequestsCPU ResourceName = "requests.cpu" + // Memory request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + ResourceRequestsMemory ResourceName = "requests.memory" + // Storage request, in bytes + ResourceRequestsStorage ResourceName = "requests.storage" + // Local ephemeral storage request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + ResourceRequestsEphemeralStorage ResourceName = "requests.ephemeral-storage" + // CPU limit, in cores. (500m = .5 cores) + ResourceLimitsCPU ResourceName = "limits.cpu" + // Memory limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + ResourceLimitsMemory ResourceName = "limits.memory" + // Local ephemeral storage limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + ResourceLimitsEphemeralStorage ResourceName = "limits.ephemeral-storage" +) + +// The following identify resource prefix for Kubernetes object types +const ( + // HugePages request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + // As burst is not supported for HugePages, we would only quota its request, and ignore the limit. + ResourceRequestsHugePagesPrefix = "requests.hugepages-" + // Default resource requests prefix + DefaultResourceRequestsPrefix = "requests." +) + +// A ResourceQuotaScope defines a filter that must match each object tracked by a quota +type ResourceQuotaScope string + +const ( + // Match all pod objects where spec.activeDeadlineSeconds + ResourceQuotaScopeTerminating ResourceQuotaScope = "Terminating" + // Match all pod objects where !spec.activeDeadlineSeconds + ResourceQuotaScopeNotTerminating ResourceQuotaScope = "NotTerminating" + // Match all pod objects that have best effort quality of service + ResourceQuotaScopeBestEffort ResourceQuotaScope = "BestEffort" + // Match all pod objects that do not have best effort quality of service + ResourceQuotaScopeNotBestEffort ResourceQuotaScope = "NotBestEffort" +) + +// ResourceQuotaSpec defines the desired hard limits to enforce for Quota +type ResourceQuotaSpec struct { + // Hard is the set of desired hard limits for each named resource + // +optional + Hard ResourceList + // A collection of filters that must match each object tracked by a quota. + // If not specified, the quota matches all objects. + // +optional + Scopes []ResourceQuotaScope +} + +// ResourceQuotaStatus defines the enforced hard limits and observed use +type ResourceQuotaStatus struct { + // Hard is the set of enforced hard limits for each named resource + // +optional + Hard ResourceList + // Used is the current observed total usage of the resource in the namespace + // +optional + Used ResourceList +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ResourceQuota sets aggregate quota restrictions enforced per namespace +type ResourceQuota struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the desired quota + // +optional + Spec ResourceQuotaSpec + + // Status defines the actual enforced quota and its current usage + // +optional + Status ResourceQuotaStatus +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ResourceQuotaList is a list of ResourceQuota items +type ResourceQuotaList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + // Items is a list of ResourceQuota objects + Items []ResourceQuota +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Secret holds secret data of a certain type. The total bytes of the values in +// the Data field must be less than MaxSecretSize bytes. +type Secret struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Data contains the secret data. Each key must consist of alphanumeric + // characters, '-', '_' or '.'. The serialized form of the secret data is a + // base64 encoded string, representing the arbitrary (possibly non-string) + // data value here. + // +optional + Data map[string][]byte + + // Used to facilitate programmatic handling of secret data. + // +optional + Type SecretType +} + +const MaxSecretSize = 1 * 1024 * 1024 + +type SecretType string + +const ( + // SecretTypeOpaque is the default; arbitrary user-defined data + SecretTypeOpaque SecretType = "Opaque" + + // SecretTypeServiceAccountToken contains a token that identifies a service account to the API + // + // Required fields: + // - Secret.Annotations["kubernetes.io/service-account.name"] - the name of the ServiceAccount the token identifies + // - Secret.Annotations["kubernetes.io/service-account.uid"] - the UID of the ServiceAccount the token identifies + // - Secret.Data["token"] - a token that identifies the service account to the API + SecretTypeServiceAccountToken SecretType = "kubernetes.io/service-account-token" + + // ServiceAccountNameKey is the key of the required annotation for SecretTypeServiceAccountToken secrets + ServiceAccountNameKey = "kubernetes.io/service-account.name" + // ServiceAccountUIDKey is the key of the required annotation for SecretTypeServiceAccountToken secrets + ServiceAccountUIDKey = "kubernetes.io/service-account.uid" + // ServiceAccountTokenKey is the key of the required data for SecretTypeServiceAccountToken secrets + ServiceAccountTokenKey = "token" + // ServiceAccountKubeconfigKey is the key of the optional kubeconfig data for SecretTypeServiceAccountToken secrets + ServiceAccountKubeconfigKey = "kubernetes.kubeconfig" + // ServiceAccountRootCAKey is the key of the optional root certificate authority for SecretTypeServiceAccountToken secrets + ServiceAccountRootCAKey = "ca.crt" + // ServiceAccountNamespaceKey is the key of the optional namespace to use as the default for namespaced API calls + ServiceAccountNamespaceKey = "namespace" + + // SecretTypeDockercfg contains a dockercfg file that follows the same format rules as ~/.dockercfg + // + // Required fields: + // - Secret.Data[".dockercfg"] - a serialized ~/.dockercfg file + SecretTypeDockercfg SecretType = "kubernetes.io/dockercfg" + + // DockerConfigKey is the key of the required data for SecretTypeDockercfg secrets + DockerConfigKey = ".dockercfg" + + // SecretTypeDockerConfigJson contains a dockercfg file that follows the same format rules as ~/.docker/config.json + // + // Required fields: + // - Secret.Data[".dockerconfigjson"] - a serialized ~/.docker/config.json file + SecretTypeDockerConfigJson SecretType = "kubernetes.io/dockerconfigjson" + + // DockerConfigJsonKey is the key of the required data for SecretTypeDockerConfigJson secrets + DockerConfigJsonKey = ".dockerconfigjson" + + // SecretTypeBasicAuth contains data needed for basic authentication. + // + // Required at least one of fields: + // - Secret.Data["username"] - username used for authentication + // - Secret.Data["password"] - password or token needed for authentication + SecretTypeBasicAuth SecretType = "kubernetes.io/basic-auth" + + // BasicAuthUsernameKey is the key of the username for SecretTypeBasicAuth secrets + BasicAuthUsernameKey = "username" + // BasicAuthPasswordKey is the key of the password or token for SecretTypeBasicAuth secrets + BasicAuthPasswordKey = "password" + + // SecretTypeSSHAuth contains data needed for SSH authentication. + // + // Required field: + // - Secret.Data["ssh-privatekey"] - private SSH key needed for authentication + SecretTypeSSHAuth SecretType = "kubernetes.io/ssh-auth" + + // SSHAuthPrivateKey is the key of the required SSH private key for SecretTypeSSHAuth secrets + SSHAuthPrivateKey = "ssh-privatekey" + + // SecretTypeTLS contains information about a TLS client or server secret. It + // is primarily used with TLS termination of the Ingress resource, but may be + // used in other types. + // + // Required fields: + // - Secret.Data["tls.key"] - TLS private key. + // Secret.Data["tls.crt"] - TLS certificate. + // TODO: Consider supporting different formats, specifying CA/destinationCA. + SecretTypeTLS SecretType = "kubernetes.io/tls" + + // TLSCertKey is the key for tls certificates in a TLS secret. + TLSCertKey = "tls.crt" + // TLSPrivateKeyKey is the key for the private key field in a TLS secret. + TLSPrivateKeyKey = "tls.key" + // SecretTypeBootstrapToken is used during the automated bootstrap process (first + // implemented by kubeadm). It stores tokens that are used to sign well known + // ConfigMaps. They are used for authn. + SecretTypeBootstrapToken SecretType = "bootstrap.kubernetes.io/token" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type SecretList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []Secret +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ConfigMap holds configuration data for components or applications to consume. +type ConfigMap struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Data contains the configuration data. + // Each key must consist of alphanumeric characters, '-', '_' or '.'. + // Values with non-UTF-8 byte sequences must use the BinaryData field. + // The keys stored in Data must not overlap with the keys in + // the BinaryData field, this is enforced during validation process. + // +optional + Data map[string]string + + // BinaryData contains the binary data. + // Each key must consist of alphanumeric characters, '-', '_' or '.'. + // BinaryData can contain byte sequences that are not in the UTF-8 range. + // The keys stored in BinaryData must not overlap with the ones in + // the Data field, this is enforced during validation process. + // Using this field will require 1.10+ apiserver and + // kubelet. + // +optional + BinaryData map[string][]byte +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ConfigMapList is a resource containing a list of ConfigMap objects. +type ConfigMapList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + // Items is the list of ConfigMaps. + Items []ConfigMap +} + +// These constants are for remote command execution and port forwarding and are +// used by both the client side and server side components. +// +// This is probably not the ideal place for them, but it didn't seem worth it +// to create pkg/exec and pkg/portforward just to contain a single file with +// constants in it. Suggestions for more appropriate alternatives are +// definitely welcome! +const ( + // Enable stdin for remote command execution + ExecStdinParam = "input" + // Enable stdout for remote command execution + ExecStdoutParam = "output" + // Enable stderr for remote command execution + ExecStderrParam = "error" + // Enable TTY for remote command execution + ExecTTYParam = "tty" + // Command to run for remote command execution + ExecCommandParam = "command" + + // Name of header that specifies stream type + StreamType = "streamType" + // Value for streamType header for stdin stream + StreamTypeStdin = "stdin" + // Value for streamType header for stdout stream + StreamTypeStdout = "stdout" + // Value for streamType header for stderr stream + StreamTypeStderr = "stderr" + // Value for streamType header for data stream + StreamTypeData = "data" + // Value for streamType header for error stream + StreamTypeError = "error" + // Value for streamType header for terminal resize stream + StreamTypeResize = "resize" + + // Name of header that specifies the port being forwarded + PortHeader = "port" + // Name of header that specifies a request ID used to associate the error + // and data streams for a single forwarded connection + PortForwardRequestIDHeader = "requestID" +) + +// Type and constants for component health validation. +type ComponentConditionType string + +// These are the valid conditions for the component. +const ( + ComponentHealthy ComponentConditionType = "Healthy" +) + +type ComponentCondition struct { + Type ComponentConditionType + Status ConditionStatus + // +optional + Message string + // +optional + Error string +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ComponentStatus (and ComponentStatusList) holds the cluster validation info. +type ComponentStatus struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // +optional + Conditions []ComponentCondition +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type ComponentStatusList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []ComponentStatus +} + +// SecurityContext holds security configuration that will be applied to a container. +// Some fields are present in both SecurityContext and PodSecurityContext. When both +// are set, the values in SecurityContext take precedence. +type SecurityContext struct { + // The capabilities to add/drop when running containers. + // Defaults to the default set of capabilities granted by the container runtime. + // +optional + Capabilities *Capabilities + // Run container in privileged mode. + // Processes in privileged containers are essentially equivalent to root on the host. + // Defaults to false. + // +optional + Privileged *bool + // The SELinux context to be applied to the container. + // If unspecified, the container runtime will allocate a random SELinux context for each + // container. May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + SELinuxOptions *SELinuxOptions + // The UID to run the entrypoint of the container process. + // Defaults to user specified in image metadata if unspecified. + // May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + RunAsUser *int64 + // The GID to run the entrypoint of the container process. + // Uses runtime default if unset. + // May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + RunAsGroup *int64 + // Indicates that the container must run as a non-root user. + // If true, the Kubelet will validate the image at runtime to ensure that it + // does not run as UID 0 (root) and fail to start the container if it does. + // If unset or false, no such validation will be performed. + // May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + RunAsNonRoot *bool + // The read-only root filesystem allows you to restrict the locations that an application can write + // files to, ensuring the persistent data can only be written to mounts. + // +optional + ReadOnlyRootFilesystem *bool + // AllowPrivilegeEscalation controls whether a process can gain more + // privileges than its parent process. This bool directly controls if + // the no_new_privs flag will be set on the container process. + // +optional + AllowPrivilegeEscalation *bool +} + +// SELinuxOptions are the labels to be applied to the container. +type SELinuxOptions struct { + // SELinux user label + // +optional + User string + // SELinux role label + // +optional + Role string + // SELinux type label + // +optional + Type string + // SELinux level label. + // +optional + Level string +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RangeAllocation is an opaque API object (not exposed to end users) that can be persisted to record +// the global allocation state of the cluster. The schema of Range and Data generic, in that Range +// should be a string representation of the inputs to a range (for instance, for IP allocation it +// might be a CIDR) and Data is an opaque blob understood by an allocator which is typically a +// binary range. Consumers should use annotations to record additional information (schema version, +// data encoding hints). A range allocation should *ALWAYS* be recreatable at any time by observation +// of the cluster, thus the object is less strongly typed than most. +type RangeAllocation struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + // A string representing a unique label for a range of resources, such as a CIDR "10.0.0.0/8" or + // port range "10000-30000". Range is not strongly schema'd here. The Range is expected to define + // a start and end unless there is an implicit end. + Range string + // A byte array representing the serialized state of a range allocation. Additional clarifiers on + // the type or format of data should be represented with annotations. For IP allocations, this is + // represented as a bit array starting at the base IP of the CIDR in Range, with each bit representing + // a single allocated address (the fifth bit on CIDR 10.0.0.0/8 is 10.0.0.4). + Data []byte +} + +const ( + // "default-scheduler" is the name of default scheduler. + DefaultSchedulerName = "default-scheduler" + + // RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule + // corresponding to every RequiredDuringScheduling affinity rule. + // When the --hard-pod-affinity-weight scheduler flag is not specified, + // DefaultHardPodAffinityWeight defines the weight of the implicit PreferredDuringScheduling affinity rule. + DefaultHardPodAffinitySymmetricWeight int32 = 1 +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go new file mode 100644 index 00000000..35225755 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go @@ -0,0 +1,5970 @@ +// +build !ignore_autogenerated + +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package core + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + types "k8s.io/apimachinery/pkg/types" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSElasticBlockStoreVolumeSource) DeepCopyInto(out *AWSElasticBlockStoreVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSElasticBlockStoreVolumeSource. +func (in *AWSElasticBlockStoreVolumeSource) DeepCopy() *AWSElasticBlockStoreVolumeSource { + if in == nil { + return nil + } + out := new(AWSElasticBlockStoreVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Affinity) DeepCopyInto(out *Affinity) { + *out = *in + if in.NodeAffinity != nil { + in, out := &in.NodeAffinity, &out.NodeAffinity + if *in == nil { + *out = nil + } else { + *out = new(NodeAffinity) + (*in).DeepCopyInto(*out) + } + } + if in.PodAffinity != nil { + in, out := &in.PodAffinity, &out.PodAffinity + if *in == nil { + *out = nil + } else { + *out = new(PodAffinity) + (*in).DeepCopyInto(*out) + } + } + if in.PodAntiAffinity != nil { + in, out := &in.PodAntiAffinity, &out.PodAntiAffinity + if *in == nil { + *out = nil + } else { + *out = new(PodAntiAffinity) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Affinity. +func (in *Affinity) DeepCopy() *Affinity { + if in == nil { + return nil + } + out := new(Affinity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AttachedVolume) DeepCopyInto(out *AttachedVolume) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AttachedVolume. +func (in *AttachedVolume) DeepCopy() *AttachedVolume { + if in == nil { + return nil + } + out := new(AttachedVolume) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AvoidPods) DeepCopyInto(out *AvoidPods) { + *out = *in + if in.PreferAvoidPods != nil { + in, out := &in.PreferAvoidPods, &out.PreferAvoidPods + *out = make([]PreferAvoidPodsEntry, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AvoidPods. +func (in *AvoidPods) DeepCopy() *AvoidPods { + if in == nil { + return nil + } + out := new(AvoidPods) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureDiskVolumeSource) DeepCopyInto(out *AzureDiskVolumeSource) { + *out = *in + if in.CachingMode != nil { + in, out := &in.CachingMode, &out.CachingMode + if *in == nil { + *out = nil + } else { + *out = new(AzureDataDiskCachingMode) + **out = **in + } + } + if in.FSType != nil { + in, out := &in.FSType, &out.FSType + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } + if in.ReadOnly != nil { + in, out := &in.ReadOnly, &out.ReadOnly + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + if in.Kind != nil { + in, out := &in.Kind, &out.Kind + if *in == nil { + *out = nil + } else { + *out = new(AzureDataDiskKind) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureDiskVolumeSource. +func (in *AzureDiskVolumeSource) DeepCopy() *AzureDiskVolumeSource { + if in == nil { + return nil + } + out := new(AzureDiskVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureFilePersistentVolumeSource) DeepCopyInto(out *AzureFilePersistentVolumeSource) { + *out = *in + if in.SecretNamespace != nil { + in, out := &in.SecretNamespace, &out.SecretNamespace + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureFilePersistentVolumeSource. +func (in *AzureFilePersistentVolumeSource) DeepCopy() *AzureFilePersistentVolumeSource { + if in == nil { + return nil + } + out := new(AzureFilePersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureFileVolumeSource) DeepCopyInto(out *AzureFileVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureFileVolumeSource. +func (in *AzureFileVolumeSource) DeepCopy() *AzureFileVolumeSource { + if in == nil { + return nil + } + out := new(AzureFileVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Binding) DeepCopyInto(out *Binding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Target = in.Target + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Binding. +func (in *Binding) DeepCopy() *Binding { + if in == nil { + return nil + } + out := new(Binding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Binding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSIPersistentVolumeSource) DeepCopyInto(out *CSIPersistentVolumeSource) { + *out = *in + if in.VolumeAttributes != nil { + in, out := &in.VolumeAttributes, &out.VolumeAttributes + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ControllerPublishSecretRef != nil { + in, out := &in.ControllerPublishSecretRef, &out.ControllerPublishSecretRef + if *in == nil { + *out = nil + } else { + *out = new(SecretReference) + **out = **in + } + } + if in.NodeStageSecretRef != nil { + in, out := &in.NodeStageSecretRef, &out.NodeStageSecretRef + if *in == nil { + *out = nil + } else { + *out = new(SecretReference) + **out = **in + } + } + if in.NodePublishSecretRef != nil { + in, out := &in.NodePublishSecretRef, &out.NodePublishSecretRef + if *in == nil { + *out = nil + } else { + *out = new(SecretReference) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIPersistentVolumeSource. +func (in *CSIPersistentVolumeSource) DeepCopy() *CSIPersistentVolumeSource { + if in == nil { + return nil + } + out := new(CSIPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Capabilities) DeepCopyInto(out *Capabilities) { + *out = *in + if in.Add != nil { + in, out := &in.Add, &out.Add + *out = make([]Capability, len(*in)) + copy(*out, *in) + } + if in.Drop != nil { + in, out := &in.Drop, &out.Drop + *out = make([]Capability, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Capabilities. +func (in *Capabilities) DeepCopy() *Capabilities { + if in == nil { + return nil + } + out := new(Capabilities) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephFSPersistentVolumeSource) DeepCopyInto(out *CephFSPersistentVolumeSource) { + *out = *in + if in.Monitors != nil { + in, out := &in.Monitors, &out.Monitors + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + if *in == nil { + *out = nil + } else { + *out = new(SecretReference) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephFSPersistentVolumeSource. +func (in *CephFSPersistentVolumeSource) DeepCopy() *CephFSPersistentVolumeSource { + if in == nil { + return nil + } + out := new(CephFSPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephFSVolumeSource) DeepCopyInto(out *CephFSVolumeSource) { + *out = *in + if in.Monitors != nil { + in, out := &in.Monitors, &out.Monitors + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + if *in == nil { + *out = nil + } else { + *out = new(LocalObjectReference) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephFSVolumeSource. +func (in *CephFSVolumeSource) DeepCopy() *CephFSVolumeSource { + if in == nil { + return nil + } + out := new(CephFSVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CinderVolumeSource) DeepCopyInto(out *CinderVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CinderVolumeSource. +func (in *CinderVolumeSource) DeepCopy() *CinderVolumeSource { + if in == nil { + return nil + } + out := new(CinderVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientIPConfig) DeepCopyInto(out *ClientIPConfig) { + *out = *in + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientIPConfig. +func (in *ClientIPConfig) DeepCopy() *ClientIPConfig { + if in == nil { + return nil + } + out := new(ClientIPConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentCondition) DeepCopyInto(out *ComponentCondition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentCondition. +func (in *ComponentCondition) DeepCopy() *ComponentCondition { + if in == nil { + return nil + } + out := new(ComponentCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentStatus) DeepCopyInto(out *ComponentStatus) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ComponentCondition, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentStatus. +func (in *ComponentStatus) DeepCopy() *ComponentStatus { + if in == nil { + return nil + } + out := new(ComponentStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ComponentStatus) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentStatusList) DeepCopyInto(out *ComponentStatusList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ComponentStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentStatusList. +func (in *ComponentStatusList) DeepCopy() *ComponentStatusList { + if in == nil { + return nil + } + out := new(ComponentStatusList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ComponentStatusList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMap) DeepCopyInto(out *ConfigMap) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.BinaryData != nil { + in, out := &in.BinaryData, &out.BinaryData + *out = make(map[string][]byte, len(*in)) + for key, val := range *in { + if val == nil { + (*out)[key] = nil + } else { + (*out)[key] = make([]byte, len(val)) + copy((*out)[key], val) + } + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMap. +func (in *ConfigMap) DeepCopy() *ConfigMap { + if in == nil { + return nil + } + out := new(ConfigMap) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConfigMap) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMapEnvSource) DeepCopyInto(out *ConfigMapEnvSource) { + *out = *in + out.LocalObjectReference = in.LocalObjectReference + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapEnvSource. +func (in *ConfigMapEnvSource) DeepCopy() *ConfigMapEnvSource { + if in == nil { + return nil + } + out := new(ConfigMapEnvSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMapKeySelector) DeepCopyInto(out *ConfigMapKeySelector) { + *out = *in + out.LocalObjectReference = in.LocalObjectReference + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapKeySelector. +func (in *ConfigMapKeySelector) DeepCopy() *ConfigMapKeySelector { + if in == nil { + return nil + } + out := new(ConfigMapKeySelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMapList) DeepCopyInto(out *ConfigMapList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ConfigMap, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapList. +func (in *ConfigMapList) DeepCopy() *ConfigMapList { + if in == nil { + return nil + } + out := new(ConfigMapList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConfigMapList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMapProjection) DeepCopyInto(out *ConfigMapProjection) { + *out = *in + out.LocalObjectReference = in.LocalObjectReference + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KeyToPath, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapProjection. +func (in *ConfigMapProjection) DeepCopy() *ConfigMapProjection { + if in == nil { + return nil + } + out := new(ConfigMapProjection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMapVolumeSource) DeepCopyInto(out *ConfigMapVolumeSource) { + *out = *in + out.LocalObjectReference = in.LocalObjectReference + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KeyToPath, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultMode != nil { + in, out := &in.DefaultMode, &out.DefaultMode + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapVolumeSource. +func (in *ConfigMapVolumeSource) DeepCopy() *ConfigMapVolumeSource { + if in == nil { + return nil + } + out := new(ConfigMapVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Container) DeepCopyInto(out *Container) { + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]ContainerPort, len(*in)) + copy(*out, *in) + } + if in.EnvFrom != nil { + in, out := &in.EnvFrom, &out.EnvFrom + *out = make([]EnvFromSource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Resources.DeepCopyInto(&out.Resources) + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]VolumeMount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VolumeDevices != nil { + in, out := &in.VolumeDevices, &out.VolumeDevices + *out = make([]VolumeDevice, len(*in)) + copy(*out, *in) + } + if in.LivenessProbe != nil { + in, out := &in.LivenessProbe, &out.LivenessProbe + if *in == nil { + *out = nil + } else { + *out = new(Probe) + (*in).DeepCopyInto(*out) + } + } + if in.ReadinessProbe != nil { + in, out := &in.ReadinessProbe, &out.ReadinessProbe + if *in == nil { + *out = nil + } else { + *out = new(Probe) + (*in).DeepCopyInto(*out) + } + } + if in.Lifecycle != nil { + in, out := &in.Lifecycle, &out.Lifecycle + if *in == nil { + *out = nil + } else { + *out = new(Lifecycle) + (*in).DeepCopyInto(*out) + } + } + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + if *in == nil { + *out = nil + } else { + *out = new(SecurityContext) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Container. +func (in *Container) DeepCopy() *Container { + if in == nil { + return nil + } + out := new(Container) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerImage) DeepCopyInto(out *ContainerImage) { + *out = *in + if in.Names != nil { + in, out := &in.Names, &out.Names + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerImage. +func (in *ContainerImage) DeepCopy() *ContainerImage { + if in == nil { + return nil + } + out := new(ContainerImage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerPort) DeepCopyInto(out *ContainerPort) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerPort. +func (in *ContainerPort) DeepCopy() *ContainerPort { + if in == nil { + return nil + } + out := new(ContainerPort) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerState) DeepCopyInto(out *ContainerState) { + *out = *in + if in.Waiting != nil { + in, out := &in.Waiting, &out.Waiting + if *in == nil { + *out = nil + } else { + *out = new(ContainerStateWaiting) + **out = **in + } + } + if in.Running != nil { + in, out := &in.Running, &out.Running + if *in == nil { + *out = nil + } else { + *out = new(ContainerStateRunning) + (*in).DeepCopyInto(*out) + } + } + if in.Terminated != nil { + in, out := &in.Terminated, &out.Terminated + if *in == nil { + *out = nil + } else { + *out = new(ContainerStateTerminated) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerState. +func (in *ContainerState) DeepCopy() *ContainerState { + if in == nil { + return nil + } + out := new(ContainerState) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerStateRunning) DeepCopyInto(out *ContainerStateRunning) { + *out = *in + in.StartedAt.DeepCopyInto(&out.StartedAt) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerStateRunning. +func (in *ContainerStateRunning) DeepCopy() *ContainerStateRunning { + if in == nil { + return nil + } + out := new(ContainerStateRunning) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerStateTerminated) DeepCopyInto(out *ContainerStateTerminated) { + *out = *in + in.StartedAt.DeepCopyInto(&out.StartedAt) + in.FinishedAt.DeepCopyInto(&out.FinishedAt) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerStateTerminated. +func (in *ContainerStateTerminated) DeepCopy() *ContainerStateTerminated { + if in == nil { + return nil + } + out := new(ContainerStateTerminated) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerStateWaiting) DeepCopyInto(out *ContainerStateWaiting) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerStateWaiting. +func (in *ContainerStateWaiting) DeepCopy() *ContainerStateWaiting { + if in == nil { + return nil + } + out := new(ContainerStateWaiting) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerStatus) DeepCopyInto(out *ContainerStatus) { + *out = *in + in.State.DeepCopyInto(&out.State) + in.LastTerminationState.DeepCopyInto(&out.LastTerminationState) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerStatus. +func (in *ContainerStatus) DeepCopy() *ContainerStatus { + if in == nil { + return nil + } + out := new(ContainerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonEndpoint) DeepCopyInto(out *DaemonEndpoint) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonEndpoint. +func (in *DaemonEndpoint) DeepCopy() *DaemonEndpoint { + if in == nil { + return nil + } + out := new(DaemonEndpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeleteOptions) DeepCopyInto(out *DeleteOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.GracePeriodSeconds != nil { + in, out := &in.GracePeriodSeconds, &out.GracePeriodSeconds + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + if in.Preconditions != nil { + in, out := &in.Preconditions, &out.Preconditions + if *in == nil { + *out = nil + } else { + *out = new(Preconditions) + (*in).DeepCopyInto(*out) + } + } + if in.OrphanDependents != nil { + in, out := &in.OrphanDependents, &out.OrphanDependents + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + if in.PropagationPolicy != nil { + in, out := &in.PropagationPolicy, &out.PropagationPolicy + if *in == nil { + *out = nil + } else { + *out = new(DeletionPropagation) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeleteOptions. +func (in *DeleteOptions) DeepCopy() *DeleteOptions { + if in == nil { + return nil + } + out := new(DeleteOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeleteOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DownwardAPIProjection) DeepCopyInto(out *DownwardAPIProjection) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DownwardAPIVolumeFile, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownwardAPIProjection. +func (in *DownwardAPIProjection) DeepCopy() *DownwardAPIProjection { + if in == nil { + return nil + } + out := new(DownwardAPIProjection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DownwardAPIVolumeFile) DeepCopyInto(out *DownwardAPIVolumeFile) { + *out = *in + if in.FieldRef != nil { + in, out := &in.FieldRef, &out.FieldRef + if *in == nil { + *out = nil + } else { + *out = new(ObjectFieldSelector) + **out = **in + } + } + if in.ResourceFieldRef != nil { + in, out := &in.ResourceFieldRef, &out.ResourceFieldRef + if *in == nil { + *out = nil + } else { + *out = new(ResourceFieldSelector) + (*in).DeepCopyInto(*out) + } + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownwardAPIVolumeFile. +func (in *DownwardAPIVolumeFile) DeepCopy() *DownwardAPIVolumeFile { + if in == nil { + return nil + } + out := new(DownwardAPIVolumeFile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DownwardAPIVolumeSource) DeepCopyInto(out *DownwardAPIVolumeSource) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DownwardAPIVolumeFile, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultMode != nil { + in, out := &in.DefaultMode, &out.DefaultMode + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownwardAPIVolumeSource. +func (in *DownwardAPIVolumeSource) DeepCopy() *DownwardAPIVolumeSource { + if in == nil { + return nil + } + out := new(DownwardAPIVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EmptyDirVolumeSource) DeepCopyInto(out *EmptyDirVolumeSource) { + *out = *in + if in.SizeLimit != nil { + in, out := &in.SizeLimit, &out.SizeLimit + if *in == nil { + *out = nil + } else { + x := (*in).DeepCopy() + *out = &x + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmptyDirVolumeSource. +func (in *EmptyDirVolumeSource) DeepCopy() *EmptyDirVolumeSource { + if in == nil { + return nil + } + out := new(EmptyDirVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointAddress) DeepCopyInto(out *EndpointAddress) { + *out = *in + if in.NodeName != nil { + in, out := &in.NodeName, &out.NodeName + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } + if in.TargetRef != nil { + in, out := &in.TargetRef, &out.TargetRef + if *in == nil { + *out = nil + } else { + *out = new(ObjectReference) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointAddress. +func (in *EndpointAddress) DeepCopy() *EndpointAddress { + if in == nil { + return nil + } + out := new(EndpointAddress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointPort) DeepCopyInto(out *EndpointPort) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointPort. +func (in *EndpointPort) DeepCopy() *EndpointPort { + if in == nil { + return nil + } + out := new(EndpointPort) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointSubset) DeepCopyInto(out *EndpointSubset) { + *out = *in + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]EndpointAddress, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NotReadyAddresses != nil { + in, out := &in.NotReadyAddresses, &out.NotReadyAddresses + *out = make([]EndpointAddress, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]EndpointPort, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointSubset. +func (in *EndpointSubset) DeepCopy() *EndpointSubset { + if in == nil { + return nil + } + out := new(EndpointSubset) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Endpoints) DeepCopyInto(out *Endpoints) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Subsets != nil { + in, out := &in.Subsets, &out.Subsets + *out = make([]EndpointSubset, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Endpoints. +func (in *Endpoints) DeepCopy() *Endpoints { + if in == nil { + return nil + } + out := new(Endpoints) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Endpoints) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointsList) DeepCopyInto(out *EndpointsList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Endpoints, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointsList. +func (in *EndpointsList) DeepCopy() *EndpointsList { + if in == nil { + return nil + } + out := new(EndpointsList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EndpointsList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvFromSource) DeepCopyInto(out *EnvFromSource) { + *out = *in + if in.ConfigMapRef != nil { + in, out := &in.ConfigMapRef, &out.ConfigMapRef + if *in == nil { + *out = nil + } else { + *out = new(ConfigMapEnvSource) + (*in).DeepCopyInto(*out) + } + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + if *in == nil { + *out = nil + } else { + *out = new(SecretEnvSource) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvFromSource. +func (in *EnvFromSource) DeepCopy() *EnvFromSource { + if in == nil { + return nil + } + out := new(EnvFromSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvVar) DeepCopyInto(out *EnvVar) { + *out = *in + if in.ValueFrom != nil { + in, out := &in.ValueFrom, &out.ValueFrom + if *in == nil { + *out = nil + } else { + *out = new(EnvVarSource) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvVar. +func (in *EnvVar) DeepCopy() *EnvVar { + if in == nil { + return nil + } + out := new(EnvVar) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvVarSource) DeepCopyInto(out *EnvVarSource) { + *out = *in + if in.FieldRef != nil { + in, out := &in.FieldRef, &out.FieldRef + if *in == nil { + *out = nil + } else { + *out = new(ObjectFieldSelector) + **out = **in + } + } + if in.ResourceFieldRef != nil { + in, out := &in.ResourceFieldRef, &out.ResourceFieldRef + if *in == nil { + *out = nil + } else { + *out = new(ResourceFieldSelector) + (*in).DeepCopyInto(*out) + } + } + if in.ConfigMapKeyRef != nil { + in, out := &in.ConfigMapKeyRef, &out.ConfigMapKeyRef + if *in == nil { + *out = nil + } else { + *out = new(ConfigMapKeySelector) + (*in).DeepCopyInto(*out) + } + } + if in.SecretKeyRef != nil { + in, out := &in.SecretKeyRef, &out.SecretKeyRef + if *in == nil { + *out = nil + } else { + *out = new(SecretKeySelector) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvVarSource. +func (in *EnvVarSource) DeepCopy() *EnvVarSource { + if in == nil { + return nil + } + out := new(EnvVarSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Event) DeepCopyInto(out *Event) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.InvolvedObject = in.InvolvedObject + out.Source = in.Source + in.FirstTimestamp.DeepCopyInto(&out.FirstTimestamp) + in.LastTimestamp.DeepCopyInto(&out.LastTimestamp) + in.EventTime.DeepCopyInto(&out.EventTime) + if in.Series != nil { + in, out := &in.Series, &out.Series + if *in == nil { + *out = nil + } else { + *out = new(EventSeries) + (*in).DeepCopyInto(*out) + } + } + if in.Related != nil { + in, out := &in.Related, &out.Related + if *in == nil { + *out = nil + } else { + *out = new(ObjectReference) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Event. +func (in *Event) DeepCopy() *Event { + if in == nil { + return nil + } + out := new(Event) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Event) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventList) DeepCopyInto(out *EventList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Event, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventList. +func (in *EventList) DeepCopy() *EventList { + if in == nil { + return nil + } + out := new(EventList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EventList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventSeries) DeepCopyInto(out *EventSeries) { + *out = *in + in.LastObservedTime.DeepCopyInto(&out.LastObservedTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventSeries. +func (in *EventSeries) DeepCopy() *EventSeries { + if in == nil { + return nil + } + out := new(EventSeries) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventSource) DeepCopyInto(out *EventSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventSource. +func (in *EventSource) DeepCopy() *EventSource { + if in == nil { + return nil + } + out := new(EventSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecAction) DeepCopyInto(out *ExecAction) { + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecAction. +func (in *ExecAction) DeepCopy() *ExecAction { + if in == nil { + return nil + } + out := new(ExecAction) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FCVolumeSource) DeepCopyInto(out *FCVolumeSource) { + *out = *in + if in.TargetWWNs != nil { + in, out := &in.TargetWWNs, &out.TargetWWNs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Lun != nil { + in, out := &in.Lun, &out.Lun + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + if in.WWIDs != nil { + in, out := &in.WWIDs, &out.WWIDs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FCVolumeSource. +func (in *FCVolumeSource) DeepCopy() *FCVolumeSource { + if in == nil { + return nil + } + out := new(FCVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlexPersistentVolumeSource) DeepCopyInto(out *FlexPersistentVolumeSource) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + if *in == nil { + *out = nil + } else { + *out = new(SecretReference) + **out = **in + } + } + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlexPersistentVolumeSource. +func (in *FlexPersistentVolumeSource) DeepCopy() *FlexPersistentVolumeSource { + if in == nil { + return nil + } + out := new(FlexPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlexVolumeSource) DeepCopyInto(out *FlexVolumeSource) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + if *in == nil { + *out = nil + } else { + *out = new(LocalObjectReference) + **out = **in + } + } + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlexVolumeSource. +func (in *FlexVolumeSource) DeepCopy() *FlexVolumeSource { + if in == nil { + return nil + } + out := new(FlexVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlockerVolumeSource) DeepCopyInto(out *FlockerVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlockerVolumeSource. +func (in *FlockerVolumeSource) DeepCopy() *FlockerVolumeSource { + if in == nil { + return nil + } + out := new(FlockerVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCEPersistentDiskVolumeSource) DeepCopyInto(out *GCEPersistentDiskVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCEPersistentDiskVolumeSource. +func (in *GCEPersistentDiskVolumeSource) DeepCopy() *GCEPersistentDiskVolumeSource { + if in == nil { + return nil + } + out := new(GCEPersistentDiskVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitRepoVolumeSource) DeepCopyInto(out *GitRepoVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepoVolumeSource. +func (in *GitRepoVolumeSource) DeepCopy() *GitRepoVolumeSource { + if in == nil { + return nil + } + out := new(GitRepoVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlusterfsVolumeSource) DeepCopyInto(out *GlusterfsVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlusterfsVolumeSource. +func (in *GlusterfsVolumeSource) DeepCopy() *GlusterfsVolumeSource { + if in == nil { + return nil + } + out := new(GlusterfsVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPGetAction) DeepCopyInto(out *HTTPGetAction) { + *out = *in + out.Port = in.Port + if in.HTTPHeaders != nil { + in, out := &in.HTTPHeaders, &out.HTTPHeaders + *out = make([]HTTPHeader, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPGetAction. +func (in *HTTPGetAction) DeepCopy() *HTTPGetAction { + if in == nil { + return nil + } + out := new(HTTPGetAction) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPHeader) DeepCopyInto(out *HTTPHeader) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPHeader. +func (in *HTTPHeader) DeepCopy() *HTTPHeader { + if in == nil { + return nil + } + out := new(HTTPHeader) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Handler) DeepCopyInto(out *Handler) { + *out = *in + if in.Exec != nil { + in, out := &in.Exec, &out.Exec + if *in == nil { + *out = nil + } else { + *out = new(ExecAction) + (*in).DeepCopyInto(*out) + } + } + if in.HTTPGet != nil { + in, out := &in.HTTPGet, &out.HTTPGet + if *in == nil { + *out = nil + } else { + *out = new(HTTPGetAction) + (*in).DeepCopyInto(*out) + } + } + if in.TCPSocket != nil { + in, out := &in.TCPSocket, &out.TCPSocket + if *in == nil { + *out = nil + } else { + *out = new(TCPSocketAction) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Handler. +func (in *Handler) DeepCopy() *Handler { + if in == nil { + return nil + } + out := new(Handler) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostAlias) DeepCopyInto(out *HostAlias) { + *out = *in + if in.Hostnames != nil { + in, out := &in.Hostnames, &out.Hostnames + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostAlias. +func (in *HostAlias) DeepCopy() *HostAlias { + if in == nil { + return nil + } + out := new(HostAlias) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostPathVolumeSource) DeepCopyInto(out *HostPathVolumeSource) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + if *in == nil { + *out = nil + } else { + *out = new(HostPathType) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostPathVolumeSource. +func (in *HostPathVolumeSource) DeepCopy() *HostPathVolumeSource { + if in == nil { + return nil + } + out := new(HostPathVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ISCSIPersistentVolumeSource) DeepCopyInto(out *ISCSIPersistentVolumeSource) { + *out = *in + if in.Portals != nil { + in, out := &in.Portals, &out.Portals + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + if *in == nil { + *out = nil + } else { + *out = new(SecretReference) + **out = **in + } + } + if in.InitiatorName != nil { + in, out := &in.InitiatorName, &out.InitiatorName + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ISCSIPersistentVolumeSource. +func (in *ISCSIPersistentVolumeSource) DeepCopy() *ISCSIPersistentVolumeSource { + if in == nil { + return nil + } + out := new(ISCSIPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ISCSIVolumeSource) DeepCopyInto(out *ISCSIVolumeSource) { + *out = *in + if in.Portals != nil { + in, out := &in.Portals, &out.Portals + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + if *in == nil { + *out = nil + } else { + *out = new(LocalObjectReference) + **out = **in + } + } + if in.InitiatorName != nil { + in, out := &in.InitiatorName, &out.InitiatorName + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ISCSIVolumeSource. +func (in *ISCSIVolumeSource) DeepCopy() *ISCSIVolumeSource { + if in == nil { + return nil + } + out := new(ISCSIVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KeyToPath) DeepCopyInto(out *KeyToPath) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeyToPath. +func (in *KeyToPath) DeepCopy() *KeyToPath { + if in == nil { + return nil + } + out := new(KeyToPath) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Lifecycle) DeepCopyInto(out *Lifecycle) { + *out = *in + if in.PostStart != nil { + in, out := &in.PostStart, &out.PostStart + if *in == nil { + *out = nil + } else { + *out = new(Handler) + (*in).DeepCopyInto(*out) + } + } + if in.PreStop != nil { + in, out := &in.PreStop, &out.PreStop + if *in == nil { + *out = nil + } else { + *out = new(Handler) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Lifecycle. +func (in *Lifecycle) DeepCopy() *Lifecycle { + if in == nil { + return nil + } + out := new(Lifecycle) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LimitRange) DeepCopyInto(out *LimitRange) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitRange. +func (in *LimitRange) DeepCopy() *LimitRange { + if in == nil { + return nil + } + out := new(LimitRange) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LimitRange) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LimitRangeItem) DeepCopyInto(out *LimitRangeItem) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Default != nil { + in, out := &in.Default, &out.Default + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.DefaultRequest != nil { + in, out := &in.DefaultRequest, &out.DefaultRequest + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.MaxLimitRequestRatio != nil { + in, out := &in.MaxLimitRequestRatio, &out.MaxLimitRequestRatio + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitRangeItem. +func (in *LimitRangeItem) DeepCopy() *LimitRangeItem { + if in == nil { + return nil + } + out := new(LimitRangeItem) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LimitRangeList) DeepCopyInto(out *LimitRangeList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LimitRange, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitRangeList. +func (in *LimitRangeList) DeepCopy() *LimitRangeList { + if in == nil { + return nil + } + out := new(LimitRangeList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LimitRangeList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LimitRangeSpec) DeepCopyInto(out *LimitRangeSpec) { + *out = *in + if in.Limits != nil { + in, out := &in.Limits, &out.Limits + *out = make([]LimitRangeItem, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitRangeSpec. +func (in *LimitRangeSpec) DeepCopy() *LimitRangeSpec { + if in == nil { + return nil + } + out := new(LimitRangeSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *List) DeepCopyInto(out *List) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]runtime.Object, len(*in)) + for i := range *in { + if (*in)[i] == nil { + (*out)[i] = nil + } else { + (*out)[i] = (*in)[i].DeepCopyObject() + } + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new List. +func (in *List) DeepCopy() *List { + if in == nil { + return nil + } + out := new(List) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *List) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListOptions) DeepCopyInto(out *ListOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.LabelSelector == nil { + out.LabelSelector = nil + } else { + out.LabelSelector = in.LabelSelector.DeepCopySelector() + } + if in.FieldSelector == nil { + out.FieldSelector = nil + } else { + out.FieldSelector = in.FieldSelector.DeepCopySelector() + } + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListOptions. +func (in *ListOptions) DeepCopy() *ListOptions { + if in == nil { + return nil + } + out := new(ListOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ListOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadBalancerIngress) DeepCopyInto(out *LoadBalancerIngress) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerIngress. +func (in *LoadBalancerIngress) DeepCopy() *LoadBalancerIngress { + if in == nil { + return nil + } + out := new(LoadBalancerIngress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadBalancerStatus) DeepCopyInto(out *LoadBalancerStatus) { + *out = *in + if in.Ingress != nil { + in, out := &in.Ingress, &out.Ingress + *out = make([]LoadBalancerIngress, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerStatus. +func (in *LoadBalancerStatus) DeepCopy() *LoadBalancerStatus { + if in == nil { + return nil + } + out := new(LoadBalancerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalObjectReference) DeepCopyInto(out *LocalObjectReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalObjectReference. +func (in *LocalObjectReference) DeepCopy() *LocalObjectReference { + if in == nil { + return nil + } + out := new(LocalObjectReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalVolumeSource) DeepCopyInto(out *LocalVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalVolumeSource. +func (in *LocalVolumeSource) DeepCopy() *LocalVolumeSource { + if in == nil { + return nil + } + out := new(LocalVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NFSVolumeSource) DeepCopyInto(out *NFSVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NFSVolumeSource. +func (in *NFSVolumeSource) DeepCopy() *NFSVolumeSource { + if in == nil { + return nil + } + out := new(NFSVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Namespace) DeepCopyInto(out *Namespace) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Namespace. +func (in *Namespace) DeepCopy() *Namespace { + if in == nil { + return nil + } + out := new(Namespace) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Namespace) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamespaceList) DeepCopyInto(out *NamespaceList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Namespace, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceList. +func (in *NamespaceList) DeepCopy() *NamespaceList { + if in == nil { + return nil + } + out := new(NamespaceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NamespaceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamespaceSpec) DeepCopyInto(out *NamespaceSpec) { + *out = *in + if in.Finalizers != nil { + in, out := &in.Finalizers, &out.Finalizers + *out = make([]FinalizerName, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceSpec. +func (in *NamespaceSpec) DeepCopy() *NamespaceSpec { + if in == nil { + return nil + } + out := new(NamespaceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamespaceStatus) DeepCopyInto(out *NamespaceStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceStatus. +func (in *NamespaceStatus) DeepCopy() *NamespaceStatus { + if in == nil { + return nil + } + out := new(NamespaceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Node) DeepCopyInto(out *Node) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Node. +func (in *Node) DeepCopy() *Node { + if in == nil { + return nil + } + out := new(Node) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Node) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeAddress) DeepCopyInto(out *NodeAddress) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeAddress. +func (in *NodeAddress) DeepCopy() *NodeAddress { + if in == nil { + return nil + } + out := new(NodeAddress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeAffinity) DeepCopyInto(out *NodeAffinity) { + *out = *in + if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution + if *in == nil { + *out = nil + } else { + *out = new(NodeSelector) + (*in).DeepCopyInto(*out) + } + } + if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution + *out = make([]PreferredSchedulingTerm, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeAffinity. +func (in *NodeAffinity) DeepCopy() *NodeAffinity { + if in == nil { + return nil + } + out := new(NodeAffinity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeCondition) DeepCopyInto(out *NodeCondition) { + *out = *in + in.LastHeartbeatTime.DeepCopyInto(&out.LastHeartbeatTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeCondition. +func (in *NodeCondition) DeepCopy() *NodeCondition { + if in == nil { + return nil + } + out := new(NodeCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeConfigSource) DeepCopyInto(out *NodeConfigSource) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.ConfigMapRef != nil { + in, out := &in.ConfigMapRef, &out.ConfigMapRef + if *in == nil { + *out = nil + } else { + *out = new(ObjectReference) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeConfigSource. +func (in *NodeConfigSource) DeepCopy() *NodeConfigSource { + if in == nil { + return nil + } + out := new(NodeConfigSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NodeConfigSource) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeDaemonEndpoints) DeepCopyInto(out *NodeDaemonEndpoints) { + *out = *in + out.KubeletEndpoint = in.KubeletEndpoint + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeDaemonEndpoints. +func (in *NodeDaemonEndpoints) DeepCopy() *NodeDaemonEndpoints { + if in == nil { + return nil + } + out := new(NodeDaemonEndpoints) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeList) DeepCopyInto(out *NodeList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Node, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeList. +func (in *NodeList) DeepCopy() *NodeList { + if in == nil { + return nil + } + out := new(NodeList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NodeList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeProxyOptions) DeepCopyInto(out *NodeProxyOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeProxyOptions. +func (in *NodeProxyOptions) DeepCopy() *NodeProxyOptions { + if in == nil { + return nil + } + out := new(NodeProxyOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NodeProxyOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeResources) DeepCopyInto(out *NodeResources) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeResources. +func (in *NodeResources) DeepCopy() *NodeResources { + if in == nil { + return nil + } + out := new(NodeResources) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeSelector) DeepCopyInto(out *NodeSelector) { + *out = *in + if in.NodeSelectorTerms != nil { + in, out := &in.NodeSelectorTerms, &out.NodeSelectorTerms + *out = make([]NodeSelectorTerm, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSelector. +func (in *NodeSelector) DeepCopy() *NodeSelector { + if in == nil { + return nil + } + out := new(NodeSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeSelectorRequirement) DeepCopyInto(out *NodeSelectorRequirement) { + *out = *in + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSelectorRequirement. +func (in *NodeSelectorRequirement) DeepCopy() *NodeSelectorRequirement { + if in == nil { + return nil + } + out := new(NodeSelectorRequirement) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeSelectorTerm) DeepCopyInto(out *NodeSelectorTerm) { + *out = *in + if in.MatchExpressions != nil { + in, out := &in.MatchExpressions, &out.MatchExpressions + *out = make([]NodeSelectorRequirement, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSelectorTerm. +func (in *NodeSelectorTerm) DeepCopy() *NodeSelectorTerm { + if in == nil { + return nil + } + out := new(NodeSelectorTerm) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeSpec) DeepCopyInto(out *NodeSpec) { + *out = *in + if in.Taints != nil { + in, out := &in.Taints, &out.Taints + *out = make([]Taint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ConfigSource != nil { + in, out := &in.ConfigSource, &out.ConfigSource + if *in == nil { + *out = nil + } else { + *out = new(NodeConfigSource) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSpec. +func (in *NodeSpec) DeepCopy() *NodeSpec { + if in == nil { + return nil + } + out := new(NodeSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeStatus) DeepCopyInto(out *NodeStatus) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Allocatable != nil { + in, out := &in.Allocatable, &out.Allocatable + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]NodeCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]NodeAddress, len(*in)) + copy(*out, *in) + } + out.DaemonEndpoints = in.DaemonEndpoints + out.NodeInfo = in.NodeInfo + if in.Images != nil { + in, out := &in.Images, &out.Images + *out = make([]ContainerImage, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VolumesInUse != nil { + in, out := &in.VolumesInUse, &out.VolumesInUse + *out = make([]UniqueVolumeName, len(*in)) + copy(*out, *in) + } + if in.VolumesAttached != nil { + in, out := &in.VolumesAttached, &out.VolumesAttached + *out = make([]AttachedVolume, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeStatus. +func (in *NodeStatus) DeepCopy() *NodeStatus { + if in == nil { + return nil + } + out := new(NodeStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeSystemInfo) DeepCopyInto(out *NodeSystemInfo) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSystemInfo. +func (in *NodeSystemInfo) DeepCopy() *NodeSystemInfo { + if in == nil { + return nil + } + out := new(NodeSystemInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectFieldSelector) DeepCopyInto(out *ObjectFieldSelector) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectFieldSelector. +func (in *ObjectFieldSelector) DeepCopy() *ObjectFieldSelector { + if in == nil { + return nil + } + out := new(ObjectFieldSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectMeta) DeepCopyInto(out *ObjectMeta) { + *out = *in + in.CreationTimestamp.DeepCopyInto(&out.CreationTimestamp) + if in.DeletionTimestamp != nil { + in, out := &in.DeletionTimestamp, &out.DeletionTimestamp + if *in == nil { + *out = nil + } else { + *out = (*in).DeepCopy() + } + } + if in.DeletionGracePeriodSeconds != nil { + in, out := &in.DeletionGracePeriodSeconds, &out.DeletionGracePeriodSeconds + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.OwnerReferences != nil { + in, out := &in.OwnerReferences, &out.OwnerReferences + *out = make([]v1.OwnerReference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Initializers != nil { + in, out := &in.Initializers, &out.Initializers + if *in == nil { + *out = nil + } else { + *out = new(v1.Initializers) + (*in).DeepCopyInto(*out) + } + } + if in.Finalizers != nil { + in, out := &in.Finalizers, &out.Finalizers + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectMeta. +func (in *ObjectMeta) DeepCopy() *ObjectMeta { + if in == nil { + return nil + } + out := new(ObjectMeta) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectReference) DeepCopyInto(out *ObjectReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectReference. +func (in *ObjectReference) DeepCopy() *ObjectReference { + if in == nil { + return nil + } + out := new(ObjectReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ObjectReference) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolume) DeepCopyInto(out *PersistentVolume) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolume. +func (in *PersistentVolume) DeepCopy() *PersistentVolume { + if in == nil { + return nil + } + out := new(PersistentVolume) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PersistentVolume) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeClaim) DeepCopyInto(out *PersistentVolumeClaim) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaim. +func (in *PersistentVolumeClaim) DeepCopy() *PersistentVolumeClaim { + if in == nil { + return nil + } + out := new(PersistentVolumeClaim) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PersistentVolumeClaim) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeClaimCondition) DeepCopyInto(out *PersistentVolumeClaimCondition) { + *out = *in + in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimCondition. +func (in *PersistentVolumeClaimCondition) DeepCopy() *PersistentVolumeClaimCondition { + if in == nil { + return nil + } + out := new(PersistentVolumeClaimCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeClaimList) DeepCopyInto(out *PersistentVolumeClaimList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PersistentVolumeClaim, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimList. +func (in *PersistentVolumeClaimList) DeepCopy() *PersistentVolumeClaimList { + if in == nil { + return nil + } + out := new(PersistentVolumeClaimList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PersistentVolumeClaimList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeClaimSpec) DeepCopyInto(out *PersistentVolumeClaimSpec) { + *out = *in + if in.AccessModes != nil { + in, out := &in.AccessModes, &out.AccessModes + *out = make([]PersistentVolumeAccessMode, len(*in)) + copy(*out, *in) + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + if *in == nil { + *out = nil + } else { + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + } + in.Resources.DeepCopyInto(&out.Resources) + if in.StorageClassName != nil { + in, out := &in.StorageClassName, &out.StorageClassName + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } + if in.VolumeMode != nil { + in, out := &in.VolumeMode, &out.VolumeMode + if *in == nil { + *out = nil + } else { + *out = new(PersistentVolumeMode) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimSpec. +func (in *PersistentVolumeClaimSpec) DeepCopy() *PersistentVolumeClaimSpec { + if in == nil { + return nil + } + out := new(PersistentVolumeClaimSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeClaimStatus) DeepCopyInto(out *PersistentVolumeClaimStatus) { + *out = *in + if in.AccessModes != nil { + in, out := &in.AccessModes, &out.AccessModes + *out = make([]PersistentVolumeAccessMode, len(*in)) + copy(*out, *in) + } + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]PersistentVolumeClaimCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimStatus. +func (in *PersistentVolumeClaimStatus) DeepCopy() *PersistentVolumeClaimStatus { + if in == nil { + return nil + } + out := new(PersistentVolumeClaimStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeClaimVolumeSource) DeepCopyInto(out *PersistentVolumeClaimVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimVolumeSource. +func (in *PersistentVolumeClaimVolumeSource) DeepCopy() *PersistentVolumeClaimVolumeSource { + if in == nil { + return nil + } + out := new(PersistentVolumeClaimVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeList) DeepCopyInto(out *PersistentVolumeList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PersistentVolume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeList. +func (in *PersistentVolumeList) DeepCopy() *PersistentVolumeList { + if in == nil { + return nil + } + out := new(PersistentVolumeList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PersistentVolumeList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeSource) DeepCopyInto(out *PersistentVolumeSource) { + *out = *in + if in.GCEPersistentDisk != nil { + in, out := &in.GCEPersistentDisk, &out.GCEPersistentDisk + if *in == nil { + *out = nil + } else { + *out = new(GCEPersistentDiskVolumeSource) + **out = **in + } + } + if in.AWSElasticBlockStore != nil { + in, out := &in.AWSElasticBlockStore, &out.AWSElasticBlockStore + if *in == nil { + *out = nil + } else { + *out = new(AWSElasticBlockStoreVolumeSource) + **out = **in + } + } + if in.HostPath != nil { + in, out := &in.HostPath, &out.HostPath + if *in == nil { + *out = nil + } else { + *out = new(HostPathVolumeSource) + (*in).DeepCopyInto(*out) + } + } + if in.Glusterfs != nil { + in, out := &in.Glusterfs, &out.Glusterfs + if *in == nil { + *out = nil + } else { + *out = new(GlusterfsVolumeSource) + **out = **in + } + } + if in.NFS != nil { + in, out := &in.NFS, &out.NFS + if *in == nil { + *out = nil + } else { + *out = new(NFSVolumeSource) + **out = **in + } + } + if in.RBD != nil { + in, out := &in.RBD, &out.RBD + if *in == nil { + *out = nil + } else { + *out = new(RBDPersistentVolumeSource) + (*in).DeepCopyInto(*out) + } + } + if in.Quobyte != nil { + in, out := &in.Quobyte, &out.Quobyte + if *in == nil { + *out = nil + } else { + *out = new(QuobyteVolumeSource) + **out = **in + } + } + if in.ISCSI != nil { + in, out := &in.ISCSI, &out.ISCSI + if *in == nil { + *out = nil + } else { + *out = new(ISCSIPersistentVolumeSource) + (*in).DeepCopyInto(*out) + } + } + if in.FlexVolume != nil { + in, out := &in.FlexVolume, &out.FlexVolume + if *in == nil { + *out = nil + } else { + *out = new(FlexPersistentVolumeSource) + (*in).DeepCopyInto(*out) + } + } + if in.Cinder != nil { + in, out := &in.Cinder, &out.Cinder + if *in == nil { + *out = nil + } else { + *out = new(CinderVolumeSource) + **out = **in + } + } + if in.CephFS != nil { + in, out := &in.CephFS, &out.CephFS + if *in == nil { + *out = nil + } else { + *out = new(CephFSPersistentVolumeSource) + (*in).DeepCopyInto(*out) + } + } + if in.FC != nil { + in, out := &in.FC, &out.FC + if *in == nil { + *out = nil + } else { + *out = new(FCVolumeSource) + (*in).DeepCopyInto(*out) + } + } + if in.Flocker != nil { + in, out := &in.Flocker, &out.Flocker + if *in == nil { + *out = nil + } else { + *out = new(FlockerVolumeSource) + **out = **in + } + } + if in.AzureFile != nil { + in, out := &in.AzureFile, &out.AzureFile + if *in == nil { + *out = nil + } else { + *out = new(AzureFilePersistentVolumeSource) + (*in).DeepCopyInto(*out) + } + } + if in.VsphereVolume != nil { + in, out := &in.VsphereVolume, &out.VsphereVolume + if *in == nil { + *out = nil + } else { + *out = new(VsphereVirtualDiskVolumeSource) + **out = **in + } + } + if in.AzureDisk != nil { + in, out := &in.AzureDisk, &out.AzureDisk + if *in == nil { + *out = nil + } else { + *out = new(AzureDiskVolumeSource) + (*in).DeepCopyInto(*out) + } + } + if in.PhotonPersistentDisk != nil { + in, out := &in.PhotonPersistentDisk, &out.PhotonPersistentDisk + if *in == nil { + *out = nil + } else { + *out = new(PhotonPersistentDiskVolumeSource) + **out = **in + } + } + if in.PortworxVolume != nil { + in, out := &in.PortworxVolume, &out.PortworxVolume + if *in == nil { + *out = nil + } else { + *out = new(PortworxVolumeSource) + **out = **in + } + } + if in.ScaleIO != nil { + in, out := &in.ScaleIO, &out.ScaleIO + if *in == nil { + *out = nil + } else { + *out = new(ScaleIOPersistentVolumeSource) + (*in).DeepCopyInto(*out) + } + } + if in.Local != nil { + in, out := &in.Local, &out.Local + if *in == nil { + *out = nil + } else { + *out = new(LocalVolumeSource) + **out = **in + } + } + if in.StorageOS != nil { + in, out := &in.StorageOS, &out.StorageOS + if *in == nil { + *out = nil + } else { + *out = new(StorageOSPersistentVolumeSource) + (*in).DeepCopyInto(*out) + } + } + if in.CSI != nil { + in, out := &in.CSI, &out.CSI + if *in == nil { + *out = nil + } else { + *out = new(CSIPersistentVolumeSource) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeSource. +func (in *PersistentVolumeSource) DeepCopy() *PersistentVolumeSource { + if in == nil { + return nil + } + out := new(PersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeSpec) DeepCopyInto(out *PersistentVolumeSpec) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + in.PersistentVolumeSource.DeepCopyInto(&out.PersistentVolumeSource) + if in.AccessModes != nil { + in, out := &in.AccessModes, &out.AccessModes + *out = make([]PersistentVolumeAccessMode, len(*in)) + copy(*out, *in) + } + if in.ClaimRef != nil { + in, out := &in.ClaimRef, &out.ClaimRef + if *in == nil { + *out = nil + } else { + *out = new(ObjectReference) + **out = **in + } + } + if in.MountOptions != nil { + in, out := &in.MountOptions, &out.MountOptions + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.VolumeMode != nil { + in, out := &in.VolumeMode, &out.VolumeMode + if *in == nil { + *out = nil + } else { + *out = new(PersistentVolumeMode) + **out = **in + } + } + if in.NodeAffinity != nil { + in, out := &in.NodeAffinity, &out.NodeAffinity + if *in == nil { + *out = nil + } else { + *out = new(VolumeNodeAffinity) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeSpec. +func (in *PersistentVolumeSpec) DeepCopy() *PersistentVolumeSpec { + if in == nil { + return nil + } + out := new(PersistentVolumeSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeStatus) DeepCopyInto(out *PersistentVolumeStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeStatus. +func (in *PersistentVolumeStatus) DeepCopy() *PersistentVolumeStatus { + if in == nil { + return nil + } + out := new(PersistentVolumeStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PhotonPersistentDiskVolumeSource) DeepCopyInto(out *PhotonPersistentDiskVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PhotonPersistentDiskVolumeSource. +func (in *PhotonPersistentDiskVolumeSource) DeepCopy() *PhotonPersistentDiskVolumeSource { + if in == nil { + return nil + } + out := new(PhotonPersistentDiskVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Pod) DeepCopyInto(out *Pod) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Pod. +func (in *Pod) DeepCopy() *Pod { + if in == nil { + return nil + } + out := new(Pod) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Pod) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodAffinity) DeepCopyInto(out *PodAffinity) { + *out = *in + if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution + *out = make([]PodAffinityTerm, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution + *out = make([]WeightedPodAffinityTerm, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodAffinity. +func (in *PodAffinity) DeepCopy() *PodAffinity { + if in == nil { + return nil + } + out := new(PodAffinity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodAffinityTerm) DeepCopyInto(out *PodAffinityTerm) { + *out = *in + if in.LabelSelector != nil { + in, out := &in.LabelSelector, &out.LabelSelector + if *in == nil { + *out = nil + } else { + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + } + if in.Namespaces != nil { + in, out := &in.Namespaces, &out.Namespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodAffinityTerm. +func (in *PodAffinityTerm) DeepCopy() *PodAffinityTerm { + if in == nil { + return nil + } + out := new(PodAffinityTerm) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodAntiAffinity) DeepCopyInto(out *PodAntiAffinity) { + *out = *in + if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution + *out = make([]PodAffinityTerm, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution + *out = make([]WeightedPodAffinityTerm, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodAntiAffinity. +func (in *PodAntiAffinity) DeepCopy() *PodAntiAffinity { + if in == nil { + return nil + } + out := new(PodAntiAffinity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodAttachOptions) DeepCopyInto(out *PodAttachOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodAttachOptions. +func (in *PodAttachOptions) DeepCopy() *PodAttachOptions { + if in == nil { + return nil + } + out := new(PodAttachOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodAttachOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodCondition) DeepCopyInto(out *PodCondition) { + *out = *in + in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodCondition. +func (in *PodCondition) DeepCopy() *PodCondition { + if in == nil { + return nil + } + out := new(PodCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodDNSConfig) DeepCopyInto(out *PodDNSConfig) { + *out = *in + if in.Nameservers != nil { + in, out := &in.Nameservers, &out.Nameservers + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Searches != nil { + in, out := &in.Searches, &out.Searches + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = make([]PodDNSConfigOption, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodDNSConfig. +func (in *PodDNSConfig) DeepCopy() *PodDNSConfig { + if in == nil { + return nil + } + out := new(PodDNSConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodDNSConfigOption) DeepCopyInto(out *PodDNSConfigOption) { + *out = *in + if in.Value != nil { + in, out := &in.Value, &out.Value + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodDNSConfigOption. +func (in *PodDNSConfigOption) DeepCopy() *PodDNSConfigOption { + if in == nil { + return nil + } + out := new(PodDNSConfigOption) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodExecOptions) DeepCopyInto(out *PodExecOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodExecOptions. +func (in *PodExecOptions) DeepCopy() *PodExecOptions { + if in == nil { + return nil + } + out := new(PodExecOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodExecOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodList) DeepCopyInto(out *PodList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Pod, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodList. +func (in *PodList) DeepCopy() *PodList { + if in == nil { + return nil + } + out := new(PodList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodLogOptions) DeepCopyInto(out *PodLogOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.SinceSeconds != nil { + in, out := &in.SinceSeconds, &out.SinceSeconds + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + if in.SinceTime != nil { + in, out := &in.SinceTime, &out.SinceTime + if *in == nil { + *out = nil + } else { + *out = (*in).DeepCopy() + } + } + if in.TailLines != nil { + in, out := &in.TailLines, &out.TailLines + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + if in.LimitBytes != nil { + in, out := &in.LimitBytes, &out.LimitBytes + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodLogOptions. +func (in *PodLogOptions) DeepCopy() *PodLogOptions { + if in == nil { + return nil + } + out := new(PodLogOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodLogOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodPortForwardOptions) DeepCopyInto(out *PodPortForwardOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]int32, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodPortForwardOptions. +func (in *PodPortForwardOptions) DeepCopy() *PodPortForwardOptions { + if in == nil { + return nil + } + out := new(PodPortForwardOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodPortForwardOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodProxyOptions) DeepCopyInto(out *PodProxyOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodProxyOptions. +func (in *PodProxyOptions) DeepCopy() *PodProxyOptions { + if in == nil { + return nil + } + out := new(PodProxyOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodProxyOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodSecurityContext) DeepCopyInto(out *PodSecurityContext) { + *out = *in + if in.ShareProcessNamespace != nil { + in, out := &in.ShareProcessNamespace, &out.ShareProcessNamespace + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + if in.SELinuxOptions != nil { + in, out := &in.SELinuxOptions, &out.SELinuxOptions + if *in == nil { + *out = nil + } else { + *out = new(SELinuxOptions) + **out = **in + } + } + if in.RunAsUser != nil { + in, out := &in.RunAsUser, &out.RunAsUser + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + if in.RunAsGroup != nil { + in, out := &in.RunAsGroup, &out.RunAsGroup + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + if in.RunAsNonRoot != nil { + in, out := &in.RunAsNonRoot, &out.RunAsNonRoot + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + if in.SupplementalGroups != nil { + in, out := &in.SupplementalGroups, &out.SupplementalGroups + *out = make([]int64, len(*in)) + copy(*out, *in) + } + if in.FSGroup != nil { + in, out := &in.FSGroup, &out.FSGroup + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityContext. +func (in *PodSecurityContext) DeepCopy() *PodSecurityContext { + if in == nil { + return nil + } + out := new(PodSecurityContext) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodSignature) DeepCopyInto(out *PodSignature) { + *out = *in + if in.PodController != nil { + in, out := &in.PodController, &out.PodController + if *in == nil { + *out = nil + } else { + *out = new(v1.OwnerReference) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSignature. +func (in *PodSignature) DeepCopy() *PodSignature { + if in == nil { + return nil + } + out := new(PodSignature) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodSpec) DeepCopyInto(out *PodSpec) { + *out = *in + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]Volume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InitContainers != nil { + in, out := &in.InitContainers, &out.InitContainers + *out = make([]Container, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Containers != nil { + in, out := &in.Containers, &out.Containers + *out = make([]Container, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TerminationGracePeriodSeconds != nil { + in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + if in.ActiveDeadlineSeconds != nil { + in, out := &in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.AutomountServiceAccountToken != nil { + in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + if *in == nil { + *out = nil + } else { + *out = new(PodSecurityContext) + (*in).DeepCopyInto(*out) + } + } + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]LocalObjectReference, len(*in)) + copy(*out, *in) + } + if in.Affinity != nil { + in, out := &in.Affinity, &out.Affinity + if *in == nil { + *out = nil + } else { + *out = new(Affinity) + (*in).DeepCopyInto(*out) + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HostAliases != nil { + in, out := &in.HostAliases, &out.HostAliases + *out = make([]HostAlias, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + if in.DNSConfig != nil { + in, out := &in.DNSConfig, &out.DNSConfig + if *in == nil { + *out = nil + } else { + *out = new(PodDNSConfig) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSpec. +func (in *PodSpec) DeepCopy() *PodSpec { + if in == nil { + return nil + } + out := new(PodSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodStatus) DeepCopyInto(out *PodStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]PodCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + if *in == nil { + *out = nil + } else { + *out = (*in).DeepCopy() + } + } + if in.InitContainerStatuses != nil { + in, out := &in.InitContainerStatuses, &out.InitContainerStatuses + *out = make([]ContainerStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ContainerStatuses != nil { + in, out := &in.ContainerStatuses, &out.ContainerStatuses + *out = make([]ContainerStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodStatus. +func (in *PodStatus) DeepCopy() *PodStatus { + if in == nil { + return nil + } + out := new(PodStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodStatusResult) DeepCopyInto(out *PodStatusResult) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodStatusResult. +func (in *PodStatusResult) DeepCopy() *PodStatusResult { + if in == nil { + return nil + } + out := new(PodStatusResult) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodStatusResult) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodTemplate) DeepCopyInto(out *PodTemplate) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Template.DeepCopyInto(&out.Template) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodTemplate. +func (in *PodTemplate) DeepCopy() *PodTemplate { + if in == nil { + return nil + } + out := new(PodTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodTemplate) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodTemplateList) DeepCopyInto(out *PodTemplateList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PodTemplate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodTemplateList. +func (in *PodTemplateList) DeepCopy() *PodTemplateList { + if in == nil { + return nil + } + out := new(PodTemplateList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodTemplateList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodTemplateSpec) DeepCopyInto(out *PodTemplateSpec) { + *out = *in + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodTemplateSpec. +func (in *PodTemplateSpec) DeepCopy() *PodTemplateSpec { + if in == nil { + return nil + } + out := new(PodTemplateSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PortworxVolumeSource) DeepCopyInto(out *PortworxVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortworxVolumeSource. +func (in *PortworxVolumeSource) DeepCopy() *PortworxVolumeSource { + if in == nil { + return nil + } + out := new(PortworxVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Preconditions) DeepCopyInto(out *Preconditions) { + *out = *in + if in.UID != nil { + in, out := &in.UID, &out.UID + if *in == nil { + *out = nil + } else { + *out = new(types.UID) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Preconditions. +func (in *Preconditions) DeepCopy() *Preconditions { + if in == nil { + return nil + } + out := new(Preconditions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PreferAvoidPodsEntry) DeepCopyInto(out *PreferAvoidPodsEntry) { + *out = *in + in.PodSignature.DeepCopyInto(&out.PodSignature) + in.EvictionTime.DeepCopyInto(&out.EvictionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreferAvoidPodsEntry. +func (in *PreferAvoidPodsEntry) DeepCopy() *PreferAvoidPodsEntry { + if in == nil { + return nil + } + out := new(PreferAvoidPodsEntry) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PreferredSchedulingTerm) DeepCopyInto(out *PreferredSchedulingTerm) { + *out = *in + in.Preference.DeepCopyInto(&out.Preference) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreferredSchedulingTerm. +func (in *PreferredSchedulingTerm) DeepCopy() *PreferredSchedulingTerm { + if in == nil { + return nil + } + out := new(PreferredSchedulingTerm) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Probe) DeepCopyInto(out *Probe) { + *out = *in + in.Handler.DeepCopyInto(&out.Handler) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Probe. +func (in *Probe) DeepCopy() *Probe { + if in == nil { + return nil + } + out := new(Probe) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectedVolumeSource) DeepCopyInto(out *ProjectedVolumeSource) { + *out = *in + if in.Sources != nil { + in, out := &in.Sources, &out.Sources + *out = make([]VolumeProjection, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultMode != nil { + in, out := &in.DefaultMode, &out.DefaultMode + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectedVolumeSource. +func (in *ProjectedVolumeSource) DeepCopy() *ProjectedVolumeSource { + if in == nil { + return nil + } + out := new(ProjectedVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QuobyteVolumeSource) DeepCopyInto(out *QuobyteVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuobyteVolumeSource. +func (in *QuobyteVolumeSource) DeepCopy() *QuobyteVolumeSource { + if in == nil { + return nil + } + out := new(QuobyteVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RBDPersistentVolumeSource) DeepCopyInto(out *RBDPersistentVolumeSource) { + *out = *in + if in.CephMonitors != nil { + in, out := &in.CephMonitors, &out.CephMonitors + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + if *in == nil { + *out = nil + } else { + *out = new(SecretReference) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RBDPersistentVolumeSource. +func (in *RBDPersistentVolumeSource) DeepCopy() *RBDPersistentVolumeSource { + if in == nil { + return nil + } + out := new(RBDPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RBDVolumeSource) DeepCopyInto(out *RBDVolumeSource) { + *out = *in + if in.CephMonitors != nil { + in, out := &in.CephMonitors, &out.CephMonitors + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + if *in == nil { + *out = nil + } else { + *out = new(LocalObjectReference) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RBDVolumeSource. +func (in *RBDVolumeSource) DeepCopy() *RBDVolumeSource { + if in == nil { + return nil + } + out := new(RBDVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RangeAllocation) DeepCopyInto(out *RangeAllocation) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RangeAllocation. +func (in *RangeAllocation) DeepCopy() *RangeAllocation { + if in == nil { + return nil + } + out := new(RangeAllocation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RangeAllocation) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicationController) DeepCopyInto(out *ReplicationController) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationController. +func (in *ReplicationController) DeepCopy() *ReplicationController { + if in == nil { + return nil + } + out := new(ReplicationController) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ReplicationController) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicationControllerCondition) DeepCopyInto(out *ReplicationControllerCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationControllerCondition. +func (in *ReplicationControllerCondition) DeepCopy() *ReplicationControllerCondition { + if in == nil { + return nil + } + out := new(ReplicationControllerCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicationControllerList) DeepCopyInto(out *ReplicationControllerList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ReplicationController, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationControllerList. +func (in *ReplicationControllerList) DeepCopy() *ReplicationControllerList { + if in == nil { + return nil + } + out := new(ReplicationControllerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ReplicationControllerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicationControllerSpec) DeepCopyInto(out *ReplicationControllerSpec) { + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Template != nil { + in, out := &in.Template, &out.Template + if *in == nil { + *out = nil + } else { + *out = new(PodTemplateSpec) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationControllerSpec. +func (in *ReplicationControllerSpec) DeepCopy() *ReplicationControllerSpec { + if in == nil { + return nil + } + out := new(ReplicationControllerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicationControllerStatus) DeepCopyInto(out *ReplicationControllerStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ReplicationControllerCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationControllerStatus. +func (in *ReplicationControllerStatus) DeepCopy() *ReplicationControllerStatus { + if in == nil { + return nil + } + out := new(ReplicationControllerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceFieldSelector) DeepCopyInto(out *ResourceFieldSelector) { + *out = *in + out.Divisor = in.Divisor.DeepCopy() + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceFieldSelector. +func (in *ResourceFieldSelector) DeepCopy() *ResourceFieldSelector { + if in == nil { + return nil + } + out := new(ResourceFieldSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in ResourceList) DeepCopyInto(out *ResourceList) { + { + in := &in + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceList. +func (in ResourceList) DeepCopy() ResourceList { + if in == nil { + return nil + } + out := new(ResourceList) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceQuota) DeepCopyInto(out *ResourceQuota) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuota. +func (in *ResourceQuota) DeepCopy() *ResourceQuota { + if in == nil { + return nil + } + out := new(ResourceQuota) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceQuota) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceQuotaList) DeepCopyInto(out *ResourceQuotaList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ResourceQuota, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuotaList. +func (in *ResourceQuotaList) DeepCopy() *ResourceQuotaList { + if in == nil { + return nil + } + out := new(ResourceQuotaList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceQuotaList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceQuotaSpec) DeepCopyInto(out *ResourceQuotaSpec) { + *out = *in + if in.Hard != nil { + in, out := &in.Hard, &out.Hard + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]ResourceQuotaScope, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuotaSpec. +func (in *ResourceQuotaSpec) DeepCopy() *ResourceQuotaSpec { + if in == nil { + return nil + } + out := new(ResourceQuotaSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceQuotaStatus) DeepCopyInto(out *ResourceQuotaStatus) { + *out = *in + if in.Hard != nil { + in, out := &in.Hard, &out.Hard + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Used != nil { + in, out := &in.Used, &out.Used + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuotaStatus. +func (in *ResourceQuotaStatus) DeepCopy() *ResourceQuotaStatus { + if in == nil { + return nil + } + out := new(ResourceQuotaStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceRequirements) DeepCopyInto(out *ResourceRequirements) { + *out = *in + if in.Limits != nil { + in, out := &in.Limits, &out.Limits + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceRequirements. +func (in *ResourceRequirements) DeepCopy() *ResourceRequirements { + if in == nil { + return nil + } + out := new(ResourceRequirements) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SELinuxOptions) DeepCopyInto(out *SELinuxOptions) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SELinuxOptions. +func (in *SELinuxOptions) DeepCopy() *SELinuxOptions { + if in == nil { + return nil + } + out := new(SELinuxOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScaleIOPersistentVolumeSource) DeepCopyInto(out *ScaleIOPersistentVolumeSource) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + if *in == nil { + *out = nil + } else { + *out = new(SecretReference) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleIOPersistentVolumeSource. +func (in *ScaleIOPersistentVolumeSource) DeepCopy() *ScaleIOPersistentVolumeSource { + if in == nil { + return nil + } + out := new(ScaleIOPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScaleIOVolumeSource) DeepCopyInto(out *ScaleIOVolumeSource) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + if *in == nil { + *out = nil + } else { + *out = new(LocalObjectReference) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleIOVolumeSource. +func (in *ScaleIOVolumeSource) DeepCopy() *ScaleIOVolumeSource { + if in == nil { + return nil + } + out := new(ScaleIOVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Secret) DeepCopyInto(out *Secret) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = make(map[string][]byte, len(*in)) + for key, val := range *in { + if val == nil { + (*out)[key] = nil + } else { + (*out)[key] = make([]byte, len(val)) + copy((*out)[key], val) + } + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Secret. +func (in *Secret) DeepCopy() *Secret { + if in == nil { + return nil + } + out := new(Secret) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Secret) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretEnvSource) DeepCopyInto(out *SecretEnvSource) { + *out = *in + out.LocalObjectReference = in.LocalObjectReference + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretEnvSource. +func (in *SecretEnvSource) DeepCopy() *SecretEnvSource { + if in == nil { + return nil + } + out := new(SecretEnvSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretKeySelector) DeepCopyInto(out *SecretKeySelector) { + *out = *in + out.LocalObjectReference = in.LocalObjectReference + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretKeySelector. +func (in *SecretKeySelector) DeepCopy() *SecretKeySelector { + if in == nil { + return nil + } + out := new(SecretKeySelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretList) DeepCopyInto(out *SecretList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Secret, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretList. +func (in *SecretList) DeepCopy() *SecretList { + if in == nil { + return nil + } + out := new(SecretList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SecretList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretProjection) DeepCopyInto(out *SecretProjection) { + *out = *in + out.LocalObjectReference = in.LocalObjectReference + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KeyToPath, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretProjection. +func (in *SecretProjection) DeepCopy() *SecretProjection { + if in == nil { + return nil + } + out := new(SecretProjection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretReference) DeepCopyInto(out *SecretReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretReference. +func (in *SecretReference) DeepCopy() *SecretReference { + if in == nil { + return nil + } + out := new(SecretReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretVolumeSource) DeepCopyInto(out *SecretVolumeSource) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KeyToPath, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultMode != nil { + in, out := &in.DefaultMode, &out.DefaultMode + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretVolumeSource. +func (in *SecretVolumeSource) DeepCopy() *SecretVolumeSource { + if in == nil { + return nil + } + out := new(SecretVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityContext) DeepCopyInto(out *SecurityContext) { + *out = *in + if in.Capabilities != nil { + in, out := &in.Capabilities, &out.Capabilities + if *in == nil { + *out = nil + } else { + *out = new(Capabilities) + (*in).DeepCopyInto(*out) + } + } + if in.Privileged != nil { + in, out := &in.Privileged, &out.Privileged + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + if in.SELinuxOptions != nil { + in, out := &in.SELinuxOptions, &out.SELinuxOptions + if *in == nil { + *out = nil + } else { + *out = new(SELinuxOptions) + **out = **in + } + } + if in.RunAsUser != nil { + in, out := &in.RunAsUser, &out.RunAsUser + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + if in.RunAsGroup != nil { + in, out := &in.RunAsGroup, &out.RunAsGroup + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + if in.RunAsNonRoot != nil { + in, out := &in.RunAsNonRoot, &out.RunAsNonRoot + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + if in.ReadOnlyRootFilesystem != nil { + in, out := &in.ReadOnlyRootFilesystem, &out.ReadOnlyRootFilesystem + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + if in.AllowPrivilegeEscalation != nil { + in, out := &in.AllowPrivilegeEscalation, &out.AllowPrivilegeEscalation + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityContext. +func (in *SecurityContext) DeepCopy() *SecurityContext { + if in == nil { + return nil + } + out := new(SecurityContext) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SerializedReference) DeepCopyInto(out *SerializedReference) { + *out = *in + out.TypeMeta = in.TypeMeta + out.Reference = in.Reference + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SerializedReference. +func (in *SerializedReference) DeepCopy() *SerializedReference { + if in == nil { + return nil + } + out := new(SerializedReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SerializedReference) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Service) DeepCopyInto(out *Service) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Service. +func (in *Service) DeepCopy() *Service { + if in == nil { + return nil + } + out := new(Service) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Service) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccount) DeepCopyInto(out *ServiceAccount) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]ObjectReference, len(*in)) + copy(*out, *in) + } + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]LocalObjectReference, len(*in)) + copy(*out, *in) + } + if in.AutomountServiceAccountToken != nil { + in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccount. +func (in *ServiceAccount) DeepCopy() *ServiceAccount { + if in == nil { + return nil + } + out := new(ServiceAccount) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceAccount) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountList) DeepCopyInto(out *ServiceAccountList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ServiceAccount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountList. +func (in *ServiceAccountList) DeepCopy() *ServiceAccountList { + if in == nil { + return nil + } + out := new(ServiceAccountList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceAccountList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceList) DeepCopyInto(out *ServiceList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Service, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceList. +func (in *ServiceList) DeepCopy() *ServiceList { + if in == nil { + return nil + } + out := new(ServiceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServicePort) DeepCopyInto(out *ServicePort) { + *out = *in + out.TargetPort = in.TargetPort + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServicePort. +func (in *ServicePort) DeepCopy() *ServicePort { + if in == nil { + return nil + } + out := new(ServicePort) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceProxyOptions) DeepCopyInto(out *ServiceProxyOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceProxyOptions. +func (in *ServiceProxyOptions) DeepCopy() *ServiceProxyOptions { + if in == nil { + return nil + } + out := new(ServiceProxyOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceProxyOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) { + *out = *in + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]ServicePort, len(*in)) + copy(*out, *in) + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ExternalIPs != nil { + in, out := &in.ExternalIPs, &out.ExternalIPs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SessionAffinityConfig != nil { + in, out := &in.SessionAffinityConfig, &out.SessionAffinityConfig + if *in == nil { + *out = nil + } else { + *out = new(SessionAffinityConfig) + (*in).DeepCopyInto(*out) + } + } + if in.LoadBalancerSourceRanges != nil { + in, out := &in.LoadBalancerSourceRanges, &out.LoadBalancerSourceRanges + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceSpec. +func (in *ServiceSpec) DeepCopy() *ServiceSpec { + if in == nil { + return nil + } + out := new(ServiceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceStatus) DeepCopyInto(out *ServiceStatus) { + *out = *in + in.LoadBalancer.DeepCopyInto(&out.LoadBalancer) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceStatus. +func (in *ServiceStatus) DeepCopy() *ServiceStatus { + if in == nil { + return nil + } + out := new(ServiceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SessionAffinityConfig) DeepCopyInto(out *SessionAffinityConfig) { + *out = *in + if in.ClientIP != nil { + in, out := &in.ClientIP, &out.ClientIP + if *in == nil { + *out = nil + } else { + *out = new(ClientIPConfig) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SessionAffinityConfig. +func (in *SessionAffinityConfig) DeepCopy() *SessionAffinityConfig { + if in == nil { + return nil + } + out := new(SessionAffinityConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageOSPersistentVolumeSource) DeepCopyInto(out *StorageOSPersistentVolumeSource) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + if *in == nil { + *out = nil + } else { + *out = new(ObjectReference) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageOSPersistentVolumeSource. +func (in *StorageOSPersistentVolumeSource) DeepCopy() *StorageOSPersistentVolumeSource { + if in == nil { + return nil + } + out := new(StorageOSPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageOSVolumeSource) DeepCopyInto(out *StorageOSVolumeSource) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + if *in == nil { + *out = nil + } else { + *out = new(LocalObjectReference) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageOSVolumeSource. +func (in *StorageOSVolumeSource) DeepCopy() *StorageOSVolumeSource { + if in == nil { + return nil + } + out := new(StorageOSVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Sysctl) DeepCopyInto(out *Sysctl) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Sysctl. +func (in *Sysctl) DeepCopy() *Sysctl { + if in == nil { + return nil + } + out := new(Sysctl) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPSocketAction) DeepCopyInto(out *TCPSocketAction) { + *out = *in + out.Port = in.Port + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPSocketAction. +func (in *TCPSocketAction) DeepCopy() *TCPSocketAction { + if in == nil { + return nil + } + out := new(TCPSocketAction) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Taint) DeepCopyInto(out *Taint) { + *out = *in + if in.TimeAdded != nil { + in, out := &in.TimeAdded, &out.TimeAdded + if *in == nil { + *out = nil + } else { + *out = (*in).DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Taint. +func (in *Taint) DeepCopy() *Taint { + if in == nil { + return nil + } + out := new(Taint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Toleration) DeepCopyInto(out *Toleration) { + *out = *in + if in.TolerationSeconds != nil { + in, out := &in.TolerationSeconds, &out.TolerationSeconds + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Toleration. +func (in *Toleration) DeepCopy() *Toleration { + if in == nil { + return nil + } + out := new(Toleration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Volume) DeepCopyInto(out *Volume) { + *out = *in + in.VolumeSource.DeepCopyInto(&out.VolumeSource) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Volume. +func (in *Volume) DeepCopy() *Volume { + if in == nil { + return nil + } + out := new(Volume) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeDevice) DeepCopyInto(out *VolumeDevice) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeDevice. +func (in *VolumeDevice) DeepCopy() *VolumeDevice { + if in == nil { + return nil + } + out := new(VolumeDevice) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeMount) DeepCopyInto(out *VolumeMount) { + *out = *in + if in.MountPropagation != nil { + in, out := &in.MountPropagation, &out.MountPropagation + if *in == nil { + *out = nil + } else { + *out = new(MountPropagationMode) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeMount. +func (in *VolumeMount) DeepCopy() *VolumeMount { + if in == nil { + return nil + } + out := new(VolumeMount) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeNodeAffinity) DeepCopyInto(out *VolumeNodeAffinity) { + *out = *in + if in.Required != nil { + in, out := &in.Required, &out.Required + if *in == nil { + *out = nil + } else { + *out = new(NodeSelector) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeNodeAffinity. +func (in *VolumeNodeAffinity) DeepCopy() *VolumeNodeAffinity { + if in == nil { + return nil + } + out := new(VolumeNodeAffinity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeProjection) DeepCopyInto(out *VolumeProjection) { + *out = *in + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + if *in == nil { + *out = nil + } else { + *out = new(SecretProjection) + (*in).DeepCopyInto(*out) + } + } + if in.DownwardAPI != nil { + in, out := &in.DownwardAPI, &out.DownwardAPI + if *in == nil { + *out = nil + } else { + *out = new(DownwardAPIProjection) + (*in).DeepCopyInto(*out) + } + } + if in.ConfigMap != nil { + in, out := &in.ConfigMap, &out.ConfigMap + if *in == nil { + *out = nil + } else { + *out = new(ConfigMapProjection) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeProjection. +func (in *VolumeProjection) DeepCopy() *VolumeProjection { + if in == nil { + return nil + } + out := new(VolumeProjection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeSource) DeepCopyInto(out *VolumeSource) { + *out = *in + if in.HostPath != nil { + in, out := &in.HostPath, &out.HostPath + if *in == nil { + *out = nil + } else { + *out = new(HostPathVolumeSource) + (*in).DeepCopyInto(*out) + } + } + if in.EmptyDir != nil { + in, out := &in.EmptyDir, &out.EmptyDir + if *in == nil { + *out = nil + } else { + *out = new(EmptyDirVolumeSource) + (*in).DeepCopyInto(*out) + } + } + if in.GCEPersistentDisk != nil { + in, out := &in.GCEPersistentDisk, &out.GCEPersistentDisk + if *in == nil { + *out = nil + } else { + *out = new(GCEPersistentDiskVolumeSource) + **out = **in + } + } + if in.AWSElasticBlockStore != nil { + in, out := &in.AWSElasticBlockStore, &out.AWSElasticBlockStore + if *in == nil { + *out = nil + } else { + *out = new(AWSElasticBlockStoreVolumeSource) + **out = **in + } + } + if in.GitRepo != nil { + in, out := &in.GitRepo, &out.GitRepo + if *in == nil { + *out = nil + } else { + *out = new(GitRepoVolumeSource) + **out = **in + } + } + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + if *in == nil { + *out = nil + } else { + *out = new(SecretVolumeSource) + (*in).DeepCopyInto(*out) + } + } + if in.NFS != nil { + in, out := &in.NFS, &out.NFS + if *in == nil { + *out = nil + } else { + *out = new(NFSVolumeSource) + **out = **in + } + } + if in.ISCSI != nil { + in, out := &in.ISCSI, &out.ISCSI + if *in == nil { + *out = nil + } else { + *out = new(ISCSIVolumeSource) + (*in).DeepCopyInto(*out) + } + } + if in.Glusterfs != nil { + in, out := &in.Glusterfs, &out.Glusterfs + if *in == nil { + *out = nil + } else { + *out = new(GlusterfsVolumeSource) + **out = **in + } + } + if in.PersistentVolumeClaim != nil { + in, out := &in.PersistentVolumeClaim, &out.PersistentVolumeClaim + if *in == nil { + *out = nil + } else { + *out = new(PersistentVolumeClaimVolumeSource) + **out = **in + } + } + if in.RBD != nil { + in, out := &in.RBD, &out.RBD + if *in == nil { + *out = nil + } else { + *out = new(RBDVolumeSource) + (*in).DeepCopyInto(*out) + } + } + if in.Quobyte != nil { + in, out := &in.Quobyte, &out.Quobyte + if *in == nil { + *out = nil + } else { + *out = new(QuobyteVolumeSource) + **out = **in + } + } + if in.FlexVolume != nil { + in, out := &in.FlexVolume, &out.FlexVolume + if *in == nil { + *out = nil + } else { + *out = new(FlexVolumeSource) + (*in).DeepCopyInto(*out) + } + } + if in.Cinder != nil { + in, out := &in.Cinder, &out.Cinder + if *in == nil { + *out = nil + } else { + *out = new(CinderVolumeSource) + **out = **in + } + } + if in.CephFS != nil { + in, out := &in.CephFS, &out.CephFS + if *in == nil { + *out = nil + } else { + *out = new(CephFSVolumeSource) + (*in).DeepCopyInto(*out) + } + } + if in.Flocker != nil { + in, out := &in.Flocker, &out.Flocker + if *in == nil { + *out = nil + } else { + *out = new(FlockerVolumeSource) + **out = **in + } + } + if in.DownwardAPI != nil { + in, out := &in.DownwardAPI, &out.DownwardAPI + if *in == nil { + *out = nil + } else { + *out = new(DownwardAPIVolumeSource) + (*in).DeepCopyInto(*out) + } + } + if in.FC != nil { + in, out := &in.FC, &out.FC + if *in == nil { + *out = nil + } else { + *out = new(FCVolumeSource) + (*in).DeepCopyInto(*out) + } + } + if in.AzureFile != nil { + in, out := &in.AzureFile, &out.AzureFile + if *in == nil { + *out = nil + } else { + *out = new(AzureFileVolumeSource) + **out = **in + } + } + if in.ConfigMap != nil { + in, out := &in.ConfigMap, &out.ConfigMap + if *in == nil { + *out = nil + } else { + *out = new(ConfigMapVolumeSource) + (*in).DeepCopyInto(*out) + } + } + if in.VsphereVolume != nil { + in, out := &in.VsphereVolume, &out.VsphereVolume + if *in == nil { + *out = nil + } else { + *out = new(VsphereVirtualDiskVolumeSource) + **out = **in + } + } + if in.AzureDisk != nil { + in, out := &in.AzureDisk, &out.AzureDisk + if *in == nil { + *out = nil + } else { + *out = new(AzureDiskVolumeSource) + (*in).DeepCopyInto(*out) + } + } + if in.PhotonPersistentDisk != nil { + in, out := &in.PhotonPersistentDisk, &out.PhotonPersistentDisk + if *in == nil { + *out = nil + } else { + *out = new(PhotonPersistentDiskVolumeSource) + **out = **in + } + } + if in.Projected != nil { + in, out := &in.Projected, &out.Projected + if *in == nil { + *out = nil + } else { + *out = new(ProjectedVolumeSource) + (*in).DeepCopyInto(*out) + } + } + if in.PortworxVolume != nil { + in, out := &in.PortworxVolume, &out.PortworxVolume + if *in == nil { + *out = nil + } else { + *out = new(PortworxVolumeSource) + **out = **in + } + } + if in.ScaleIO != nil { + in, out := &in.ScaleIO, &out.ScaleIO + if *in == nil { + *out = nil + } else { + *out = new(ScaleIOVolumeSource) + (*in).DeepCopyInto(*out) + } + } + if in.StorageOS != nil { + in, out := &in.StorageOS, &out.StorageOS + if *in == nil { + *out = nil + } else { + *out = new(StorageOSVolumeSource) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSource. +func (in *VolumeSource) DeepCopy() *VolumeSource { + if in == nil { + return nil + } + out := new(VolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VsphereVirtualDiskVolumeSource) DeepCopyInto(out *VsphereVirtualDiskVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VsphereVirtualDiskVolumeSource. +func (in *VsphereVirtualDiskVolumeSource) DeepCopy() *VsphereVirtualDiskVolumeSource { + if in == nil { + return nil + } + out := new(VsphereVirtualDiskVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WeightedPodAffinityTerm) DeepCopyInto(out *WeightedPodAffinityTerm) { + *out = *in + in.PodAffinityTerm.DeepCopyInto(&out.PodAffinityTerm) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WeightedPodAffinityTerm. +func (in *WeightedPodAffinityTerm) DeepCopy() *WeightedPodAffinityTerm { + if in == nil { + return nil + } + out := new(WeightedPodAffinityTerm) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/kubernetes/pkg/features/BUILD b/vendor/k8s.io/kubernetes/pkg/features/BUILD new file mode 100644 index 00000000..c88d15f2 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/features/BUILD @@ -0,0 +1,30 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = ["kube_features.go"], + importpath = "k8s.io/kubernetes/pkg/features", + deps = [ + "//vendor/k8s.io/apiextensions-apiserver/pkg/features:go_default_library", + "//vendor/k8s.io/apiserver/pkg/features:go_default_library", + "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/features/kube_features.go b/vendor/k8s.io/kubernetes/pkg/features/kube_features.go new file mode 100644 index 00000000..dd8f24c2 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/features/kube_features.go @@ -0,0 +1,341 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package features + +import ( + apiextensionsfeatures "k8s.io/apiextensions-apiserver/pkg/features" + genericfeatures "k8s.io/apiserver/pkg/features" + utilfeature "k8s.io/apiserver/pkg/util/feature" +) + +const ( + // Every feature gate should add method here following this template: + // + // // owner: @username + // // alpha: v1.X + // MyFeature utilfeature.Feature = "MyFeature" + + // owner: @tallclair + // beta: v1.4 + AppArmor utilfeature.Feature = "AppArmor" + + // owner: @mtaufen + // alpha: v1.4 + DynamicKubeletConfig utilfeature.Feature = "DynamicKubeletConfig" + + // owner: @pweil- + // alpha: v1.5 + // + // Default userns=host for containers that are using other host namespaces, host mounts, the pod + // contains a privileged container, or specific non-namespaced capabilities (MKNOD, SYS_MODULE, + // SYS_TIME). This should only be enabled if user namespace remapping is enabled in the docker daemon. + ExperimentalHostUserNamespaceDefaultingGate utilfeature.Feature = "ExperimentalHostUserNamespaceDefaulting" + + // owner: @vishh + // alpha: v1.5 + // + // Ensures guaranteed scheduling of pods marked with a special pod annotation `scheduler.alpha.kubernetes.io/critical-pod` + // and also prevents them from being evicted from a node. + // Note: This feature is not supported for `BestEffort` pods. + ExperimentalCriticalPodAnnotation utilfeature.Feature = "ExperimentalCriticalPodAnnotation" + + // owner: @vishh + // alpha: v1.6 + // + // This is deprecated and will be removed in v1.11. Use DevicePlugins instead. + // + // Enables support for GPUs as a schedulable resource. + // Only Nvidia GPUs are supported as of v1.6. + // Works only with Docker Container Runtime. + Accelerators utilfeature.Feature = "Accelerators" + + // owner: @jiayingz + // beta: v1.10 + // + // Enables support for Device Plugins + DevicePlugins utilfeature.Feature = "DevicePlugins" + + // owner: @gmarek + // alpha: v1.6 + // + // Changes the logic behind evicting Pods from not ready Nodes + // to take advantage of NoExecute Taints and Tolerations. + TaintBasedEvictions utilfeature.Feature = "TaintBasedEvictions" + + // owner: @jcbsmpsn + // alpha: v1.7 + // + // Gets a server certificate for the kubelet from the Certificate Signing + // Request API instead of generating one self signed and auto rotates the + // certificate as expiration approaches. + RotateKubeletServerCertificate utilfeature.Feature = "RotateKubeletServerCertificate" + + // owner: @jcbsmpsn + // alpha: v1.7 + // + // Automatically renews the client certificate used for communicating with + // the API server as the certificate approaches expiration. + RotateKubeletClientCertificate utilfeature.Feature = "RotateKubeletClientCertificate" + + // owner: @msau42 + // alpha: v1.7 + // + // A new volume type that supports local disks on a node. + PersistentLocalVolumes utilfeature.Feature = "PersistentLocalVolumes" + + // owner: @jinxu + // beta: v1.10 + // + // New local storage types to support local storage capacity isolation + LocalStorageCapacityIsolation utilfeature.Feature = "LocalStorageCapacityIsolation" + + // owner: @gnufied + // alpha: v1.8 + // Ability to Expand persistent volumes + ExpandPersistentVolumes utilfeature.Feature = "ExpandPersistentVolumes" + + // owner: @verb + // alpha: v1.10 + // + // Allows running a "debug container" in a pod namespaces to troubleshoot a running pod. + DebugContainers utilfeature.Feature = "DebugContainers" + + // owner: @verb + // alpha: v1.10 + // + // Allows all containers in a pod to share a process namespace. + PodShareProcessNamespace utilfeature.Feature = "PodShareProcessNamespace" + + // owner: @bsalamat + // alpha: v1.8 + // + // Add priority to pods. Priority affects scheduling and preemption of pods. + PodPriority utilfeature.Feature = "PodPriority" + + // owner: @resouer + // alpha: v1.8 + // + // Enable equivalence class cache for scheduler. + EnableEquivalenceClassCache utilfeature.Feature = "EnableEquivalenceClassCache" + + // owner: @k82cn + // alpha: v1.8 + // + // Taint nodes based on their condition status for 'NetworkUnavailable', + // 'MemoryPressure', 'OutOfDisk' and 'DiskPressure'. + TaintNodesByCondition utilfeature.Feature = "TaintNodesByCondition" + + // owner: @jsafrane + // beta: v1.10 + // + // Enable mount propagation of volumes. + MountPropagation utilfeature.Feature = "MountPropagation" + + // owner: @ConnorDoyle + // alpha: v1.8 + // + // Alternative container-level CPU affinity policies. + CPUManager utilfeature.Feature = "CPUManager" + + // owner: @derekwaynecarr + // beta: v1.10 + // + // Enable pods to consume pre-allocated huge pages of varying page sizes + HugePages utilfeature.Feature = "HugePages" + + // owner @brendandburns + // alpha: v1.9 + // + // Enable nodes to exclude themselves from service load balancers + ServiceNodeExclusion utilfeature.Feature = "ServiceNodeExclusion" + + // owner @brendandburns + // deprecated: v1.10 + // + // Enable the service proxy to contact external IP addresses. Note this feature is present + // only for backward compatibility, it will be removed in the 1.11 release. + ServiceProxyAllowExternalIPs utilfeature.Feature = "ServiceProxyAllowExternalIPs" + + // owner: @jsafrane + // alpha: v1.9 + // + // Enable running mount utilities in containers. + MountContainers utilfeature.Feature = "MountContainers" + + // owner: @msau42 + // alpha: v1.9 + // + // Extend the default scheduler to be aware of PV topology and handle PV binding + // Before moving to beta, resolve Kubernetes issue #56180 + VolumeScheduling utilfeature.Feature = "VolumeScheduling" + + // owner: @vladimirvivien + // alpha: v1.9 + // + // Enable mount/attachment of Container Storage Interface (CSI) backed PVs + CSIPersistentVolume utilfeature.Feature = "CSIPersistentVolume" + + // owner @MrHohn + // beta: v1.10 + // + // Support configurable pod DNS parameters. + CustomPodDNS utilfeature.Feature = "CustomPodDNS" + + // owner: @screeley44 + // alpha: v1.9 + // + // Enable Block volume support in containers. + BlockVolume utilfeature.Feature = "BlockVolume" + + // owner: @pospispa + // beta: v1.10 + // + // Postpone deletion of a PV or a PVC when they are being used + StorageObjectInUseProtection utilfeature.Feature = "StorageObjectInUseProtection" + + // owner: @aveshagarwal + // alpha: v1.9 + // + // Enable resource limits priority function + ResourceLimitsPriorityFunction utilfeature.Feature = "ResourceLimitsPriorityFunction" + + // owner: @m1093782566 + // beta: v1.9 + // + // Implement IPVS-based in-cluster service load balancing + SupportIPVSProxyMode utilfeature.Feature = "SupportIPVSProxyMode" + + // owner: @dims + // alpha: v1.10 + // + // Implement support for limiting pids in pods + SupportPodPidsLimit utilfeature.Feature = "SupportPodPidsLimit" + + // owner: @feiskyer + // alpha: v1.10 + // + // Enable Hyper-V containers on Windows + HyperVContainer utilfeature.Feature = "HyperVContainer" + + // owner: @joelsmith + // deprecated: v1.10 + // + // Mount secret, configMap, downwardAPI and projected volumes ReadOnly. Note: this feature + // gate is present only for backward compatibility, it will be removed in the 1.11 release. + ReadOnlyAPIDataVolumes utilfeature.Feature = "ReadOnlyAPIDataVolumes" + + // owner: @k82cn + // alpha: v1.10 + // + // Schedule DaemonSet Pods by default scheduler instead of DaemonSet controller + ScheduleDaemonSetPods utilfeature.Feature = "ScheduleDaemonSetPods" + + // owner: @mikedanese + // alpha: v1.10 + // + // Implement TokenRequest endpoint on service account resources. + TokenRequest utilfeature.Feature = "TokenRequest" + + // owner: @Random-Liu + // alpha: v1.10 + // + // Enable container log rotation for cri container runtime + CRIContainerLogRotation utilfeature.Feature = "CRIContainerLogRotation" + + // owner: @verult + // beta: v1.10 + // + // Enables the regional PD feature on GCE. + GCERegionalPersistentDisk utilfeature.Feature = "GCERegionalPersistentDisk" + + // owner: @krmayankk + // alpha: v1.10 + // + // Enables control over the primary group ID of containers' init processes. + RunAsGroup utilfeature.Feature = "RunAsGroup" + + // owner: @saad-ali + // ga + // + // Allow mounting a subpath of a volume in a container + // Do not remove this feature gate even though it's GA + VolumeSubpath utilfeature.Feature = "VolumeSubpath" +) + +func init() { + utilfeature.DefaultFeatureGate.Add(defaultKubernetesFeatureGates) +} + +// defaultKubernetesFeatureGates consists of all known Kubernetes-specific feature keys. +// To add a new feature, define a key for it above and add it here. The features will be +// available throughout Kubernetes binaries. +var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureSpec{ + AppArmor: {Default: true, PreRelease: utilfeature.Beta}, + DynamicKubeletConfig: {Default: false, PreRelease: utilfeature.Alpha}, + ExperimentalHostUserNamespaceDefaultingGate: {Default: false, PreRelease: utilfeature.Beta}, + ExperimentalCriticalPodAnnotation: {Default: false, PreRelease: utilfeature.Alpha}, + Accelerators: {Default: false, PreRelease: utilfeature.Alpha}, + DevicePlugins: {Default: true, PreRelease: utilfeature.Beta}, + TaintBasedEvictions: {Default: false, PreRelease: utilfeature.Alpha}, + RotateKubeletServerCertificate: {Default: false, PreRelease: utilfeature.Alpha}, + RotateKubeletClientCertificate: {Default: true, PreRelease: utilfeature.Beta}, + PersistentLocalVolumes: {Default: true, PreRelease: utilfeature.Beta}, + LocalStorageCapacityIsolation: {Default: true, PreRelease: utilfeature.Beta}, + HugePages: {Default: true, PreRelease: utilfeature.Beta}, + DebugContainers: {Default: false, PreRelease: utilfeature.Alpha}, + PodShareProcessNamespace: {Default: false, PreRelease: utilfeature.Alpha}, + PodPriority: {Default: false, PreRelease: utilfeature.Alpha}, + EnableEquivalenceClassCache: {Default: false, PreRelease: utilfeature.Alpha}, + TaintNodesByCondition: {Default: false, PreRelease: utilfeature.Alpha}, + MountPropagation: {Default: true, PreRelease: utilfeature.Beta}, + ExpandPersistentVolumes: {Default: false, PreRelease: utilfeature.Alpha}, + CPUManager: {Default: true, PreRelease: utilfeature.Beta}, + ServiceNodeExclusion: {Default: false, PreRelease: utilfeature.Alpha}, + MountContainers: {Default: false, PreRelease: utilfeature.Alpha}, + VolumeScheduling: {Default: true, PreRelease: utilfeature.Beta}, + CSIPersistentVolume: {Default: true, PreRelease: utilfeature.Beta}, + CustomPodDNS: {Default: true, PreRelease: utilfeature.Beta}, + BlockVolume: {Default: false, PreRelease: utilfeature.Alpha}, + StorageObjectInUseProtection: {Default: true, PreRelease: utilfeature.Beta}, + ResourceLimitsPriorityFunction: {Default: false, PreRelease: utilfeature.Alpha}, + SupportIPVSProxyMode: {Default: true, PreRelease: utilfeature.Beta}, + SupportPodPidsLimit: {Default: false, PreRelease: utilfeature.Alpha}, + HyperVContainer: {Default: false, PreRelease: utilfeature.Alpha}, + ScheduleDaemonSetPods: {Default: false, PreRelease: utilfeature.Alpha}, + TokenRequest: {Default: false, PreRelease: utilfeature.Alpha}, + CRIContainerLogRotation: {Default: false, PreRelease: utilfeature.Alpha}, + GCERegionalPersistentDisk: {Default: true, PreRelease: utilfeature.Beta}, + RunAsGroup: {Default: false, PreRelease: utilfeature.Alpha}, + VolumeSubpath: {Default: true, PreRelease: utilfeature.GA}, + + // inherited features from generic apiserver, relisted here to get a conflict if it is changed + // unintentionally on either side: + genericfeatures.StreamingProxyRedirects: {Default: true, PreRelease: utilfeature.Beta}, + genericfeatures.AdvancedAuditing: {Default: true, PreRelease: utilfeature.Beta}, + genericfeatures.APIResponseCompression: {Default: false, PreRelease: utilfeature.Alpha}, + genericfeatures.Initializers: {Default: false, PreRelease: utilfeature.Alpha}, + genericfeatures.APIListChunking: {Default: true, PreRelease: utilfeature.Beta}, + + // inherited features from apiextensions-apiserver, relisted here to get a conflict if it is changed + // unintentionally on either side: + apiextensionsfeatures.CustomResourceValidation: {Default: true, PreRelease: utilfeature.Beta}, + apiextensionsfeatures.CustomResourceSubresources: {Default: false, PreRelease: utilfeature.Alpha}, + + // features that enable backwards compatibility but are scheduled to be removed + ServiceProxyAllowExternalIPs: {Default: false, PreRelease: utilfeature.Deprecated}, + ReadOnlyAPIDataVolumes: {Default: true, PreRelease: utilfeature.Deprecated}, +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/BUILD b/vendor/k8s.io/kubernetes/pkg/util/BUILD new file mode 100644 index 00000000..1622fe71 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/BUILD @@ -0,0 +1,77 @@ +package(default_visibility = ["//visibility:public"]) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/util/async:all-srcs", + "//pkg/util/bandwidth:all-srcs", + "//pkg/util/config:all-srcs", + "//pkg/util/configz:all-srcs", + "//pkg/util/conntrack:all-srcs", + "//pkg/util/dbus:all-srcs", + "//pkg/util/ebtables:all-srcs", + "//pkg/util/env:all-srcs", + "//pkg/util/file:all-srcs", + "//pkg/util/filesystem:all-srcs", + "//pkg/util/flag:all-srcs", + "//pkg/util/flock:all-srcs", + "//pkg/util/goroutinemap:all-srcs", + "//pkg/util/hash:all-srcs", + "//pkg/util/initsystem:all-srcs", + "//pkg/util/interrupt:all-srcs", + "//pkg/util/io:all-srcs", + "//pkg/util/ipconfig:all-srcs", + "//pkg/util/ipset:all-srcs", + "//pkg/util/iptables:all-srcs", + "//pkg/util/ipvs:all-srcs", + "//pkg/util/keymutex:all-srcs", + "//pkg/util/labels:all-srcs", + "//pkg/util/limitwriter:all-srcs", + "//pkg/util/maps:all-srcs", + "//pkg/util/metrics:all-srcs", + "//pkg/util/mount:all-srcs", + "//pkg/util/net:all-srcs", + "//pkg/util/netsh:all-srcs", + "//pkg/util/node:all-srcs", + "//pkg/util/normalizer:all-srcs", + "//pkg/util/nsenter:all-srcs", + "//pkg/util/oom:all-srcs", + "//pkg/util/parsers:all-srcs", + "//pkg/util/pointer:all-srcs", + "//pkg/util/procfs:all-srcs", + "//pkg/util/reflector/prometheus:all-srcs", + "//pkg/util/removeall:all-srcs", + "//pkg/util/resizefs:all-srcs", + "//pkg/util/resourcecontainer:all-srcs", + "//pkg/util/rlimit:all-srcs", + "//pkg/util/selinux:all-srcs", + "//pkg/util/slice:all-srcs", + "//pkg/util/strings:all-srcs", + "//pkg/util/sysctl:all-srcs", + "//pkg/util/system:all-srcs", + "//pkg/util/tail:all-srcs", + "//pkg/util/taints:all-srcs", + "//pkg/util/template:all-srcs", + "//pkg/util/term:all-srcs", + "//pkg/util/threading:all-srcs", + "//pkg/util/tolerations:all-srcs", + "//pkg/util/version:all-srcs", + "//pkg/util/workqueue/prometheus:all-srcs", + ], + tags = ["automanaged"], +) + +sh_test( + name = "verify-util-pkg", + size = "small", + srcs = ["verify-util-pkg.sh"], + data = glob(["*.go"]), +) diff --git a/vendor/k8s.io/kubernetes/pkg/util/node/BUILD b/vendor/k8s.io/kubernetes/pkg/util/node/BUILD new file mode 100644 index 00000000..1e198dbb --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/node/BUILD @@ -0,0 +1,48 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_library( + name = "go_default_library", + srcs = ["node.go"], + importpath = "k8s.io/kubernetes/pkg/util/node", + deps = [ + "//pkg/apis/core:go_default_library", + "//pkg/kubelet/apis:go_default_library", + "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library", + "//vendor/k8s.io/client-go/kubernetes:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["node_test.go"], + embed = [":go_default_library"], + deps = [ + "//pkg/kubelet/apis:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/util/node/node.go b/vendor/k8s.io/kubernetes/pkg/util/node/node.go new file mode 100644 index 00000000..6bc3ea8a --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/node/node.go @@ -0,0 +1,203 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package node + +import ( + "encoding/json" + "fmt" + "net" + "os" + "strings" + "time" + + "github.com/golang/glog" + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/strategicpatch" + clientset "k8s.io/client-go/kubernetes" + v1core "k8s.io/client-go/kubernetes/typed/core/v1" + api "k8s.io/kubernetes/pkg/apis/core" + kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" +) + +const ( + // The reason and message set on a pod when its state cannot be confirmed as kubelet is unresponsive + // on the node it is (was) running. + NodeUnreachablePodReason = "NodeLost" + NodeUnreachablePodMessage = "Node %v which was running pod %v is unresponsive" +) + +// GetHostname returns OS's hostname if 'hostnameOverride' is empty; otherwise, return 'hostnameOverride'. +func GetHostname(hostnameOverride string) string { + hostname := hostnameOverride + if hostname == "" { + nodename, err := os.Hostname() + if err != nil { + glog.Fatalf("Couldn't determine hostname: %v", err) + } + hostname = nodename + } + return strings.ToLower(strings.TrimSpace(hostname)) +} + +// GetPreferredNodeAddress returns the address of the provided node, using the provided preference order. +// If none of the preferred address types are found, an error is returned. +func GetPreferredNodeAddress(node *v1.Node, preferredAddressTypes []v1.NodeAddressType) (string, error) { + for _, addressType := range preferredAddressTypes { + for _, address := range node.Status.Addresses { + if address.Type == addressType { + return address.Address, nil + } + } + // If hostname was requested and no Hostname address was registered... + if addressType == v1.NodeHostName { + // ...fall back to the kubernetes.io/hostname label for compatibility with kubelets before 1.5 + if hostname, ok := node.Labels[kubeletapis.LabelHostname]; ok && len(hostname) > 0 { + return hostname, nil + } + } + } + return "", fmt.Errorf("no preferred addresses found; known addresses: %v", node.Status.Addresses) +} + +// GetNodeHostIP returns the provided node's IP, based on the priority: +// 1. NodeInternalIP +// 2. NodeExternalIP +func GetNodeHostIP(node *v1.Node) (net.IP, error) { + addresses := node.Status.Addresses + addressMap := make(map[v1.NodeAddressType][]v1.NodeAddress) + for i := range addresses { + addressMap[addresses[i].Type] = append(addressMap[addresses[i].Type], addresses[i]) + } + if addresses, ok := addressMap[v1.NodeInternalIP]; ok { + return net.ParseIP(addresses[0].Address), nil + } + if addresses, ok := addressMap[v1.NodeExternalIP]; ok { + return net.ParseIP(addresses[0].Address), nil + } + return nil, fmt.Errorf("host IP unknown; known addresses: %v", addresses) +} + +// InternalGetNodeHostIP returns the provided node's IP, based on the priority: +// 1. NodeInternalIP +// 2. NodeExternalIP +func InternalGetNodeHostIP(node *api.Node) (net.IP, error) { + addresses := node.Status.Addresses + addressMap := make(map[api.NodeAddressType][]api.NodeAddress) + for i := range addresses { + addressMap[addresses[i].Type] = append(addressMap[addresses[i].Type], addresses[i]) + } + if addresses, ok := addressMap[api.NodeInternalIP]; ok { + return net.ParseIP(addresses[0].Address), nil + } + if addresses, ok := addressMap[api.NodeExternalIP]; ok { + return net.ParseIP(addresses[0].Address), nil + } + return nil, fmt.Errorf("host IP unknown; known addresses: %v", addresses) +} + +// GetZoneKey is a helper function that builds a string identifier that is unique per failure-zone; +// it returns empty-string for no zone. +func GetZoneKey(node *v1.Node) string { + labels := node.Labels + if labels == nil { + return "" + } + + region, _ := labels[kubeletapis.LabelZoneRegion] + failureDomain, _ := labels[kubeletapis.LabelZoneFailureDomain] + + if region == "" && failureDomain == "" { + return "" + } + + // We include the null character just in case region or failureDomain has a colon + // (We do assume there's no null characters in a region or failureDomain) + // As a nice side-benefit, the null character is not printed by fmt.Print or glog + return region + ":\x00:" + failureDomain +} + +// SetNodeCondition updates specific node condition with patch operation. +func SetNodeCondition(c clientset.Interface, node types.NodeName, condition v1.NodeCondition) error { + generatePatch := func(condition v1.NodeCondition) ([]byte, error) { + raw, err := json.Marshal(&[]v1.NodeCondition{condition}) + if err != nil { + return nil, err + } + return []byte(fmt.Sprintf(`{"status":{"conditions":%s}}`, raw)), nil + } + condition.LastHeartbeatTime = metav1.NewTime(time.Now()) + patch, err := generatePatch(condition) + if err != nil { + return nil + } + _, err = c.CoreV1().Nodes().PatchStatus(string(node), patch) + return err +} + +// PatchNodeCIDR patches the specified node's CIDR to the given value. +func PatchNodeCIDR(c clientset.Interface, node types.NodeName, cidr string) error { + raw, err := json.Marshal(cidr) + if err != nil { + return fmt.Errorf("failed to json.Marshal CIDR: %v", err) + } + + patchBytes := []byte(fmt.Sprintf(`{"spec":{"podCIDR":%s}}`, raw)) + + if _, err := c.CoreV1().Nodes().Patch(string(node), types.StrategicMergePatchType, patchBytes); err != nil { + return fmt.Errorf("failed to patch node CIDR: %v", err) + } + return nil +} + +// PatchNodeStatus patches node status. +func PatchNodeStatus(c v1core.CoreV1Interface, nodeName types.NodeName, oldNode *v1.Node, newNode *v1.Node) (*v1.Node, []byte, error) { + patchBytes, err := preparePatchBytesforNodeStatus(nodeName, oldNode, newNode) + if err != nil { + return nil, nil, err + } + + updatedNode, err := c.Nodes().Patch(string(nodeName), types.StrategicMergePatchType, patchBytes, "status") + if err != nil { + return nil, nil, fmt.Errorf("failed to patch status %q for node %q: %v", patchBytes, nodeName, err) + } + return updatedNode, patchBytes, nil +} + +func preparePatchBytesforNodeStatus(nodeName types.NodeName, oldNode *v1.Node, newNode *v1.Node) ([]byte, error) { + oldData, err := json.Marshal(oldNode) + if err != nil { + return nil, fmt.Errorf("failed to Marshal oldData for node %q: %v", nodeName, err) + } + + // Reset spec to make sure only patch for Status or ObjectMeta is generated. + // Note that we don't reset ObjectMeta here, because: + // 1. This aligns with Nodes().UpdateStatus(). + // 2. Some component does use this to update node annotations. + newNode.Spec = oldNode.Spec + newData, err := json.Marshal(newNode) + if err != nil { + return nil, fmt.Errorf("failed to Marshal newData for node %q: %v", nodeName, err) + } + + patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, v1.Node{}) + if err != nil { + return nil, fmt.Errorf("failed to CreateTwoWayMergePatch for node %q: %v", nodeName, err) + } + return patchBytes, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/node/node_test.go b/vendor/k8s.io/kubernetes/pkg/util/node/node_test.go new file mode 100644 index 00000000..b59f355a --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/node/node_test.go @@ -0,0 +1,91 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package node + +import ( + "testing" + + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" +) + +func TestGetPreferredAddress(t *testing.T) { + testcases := map[string]struct { + Labels map[string]string + Addresses []v1.NodeAddress + Preferences []v1.NodeAddressType + + ExpectErr string + ExpectAddress string + }{ + "no addresses": { + ExpectErr: "no preferred addresses found; known addresses: []", + }, + "missing address": { + Addresses: []v1.NodeAddress{ + {Type: v1.NodeInternalIP, Address: "1.2.3.4"}, + }, + Preferences: []v1.NodeAddressType{v1.NodeHostName}, + ExpectErr: "no preferred addresses found; known addresses: [{InternalIP 1.2.3.4}]", + }, + "found address": { + Addresses: []v1.NodeAddress{ + {Type: v1.NodeInternalIP, Address: "1.2.3.4"}, + {Type: v1.NodeExternalIP, Address: "1.2.3.5"}, + {Type: v1.NodeExternalIP, Address: "1.2.3.7"}, + }, + Preferences: []v1.NodeAddressType{v1.NodeHostName, v1.NodeExternalIP}, + ExpectAddress: "1.2.3.5", + }, + "found hostname address": { + Labels: map[string]string{kubeletapis.LabelHostname: "label-hostname"}, + Addresses: []v1.NodeAddress{ + {Type: v1.NodeExternalIP, Address: "1.2.3.5"}, + {Type: v1.NodeHostName, Address: "status-hostname"}, + }, + Preferences: []v1.NodeAddressType{v1.NodeHostName, v1.NodeExternalIP}, + ExpectAddress: "status-hostname", + }, + "found label address": { + Labels: map[string]string{kubeletapis.LabelHostname: "label-hostname"}, + Addresses: []v1.NodeAddress{ + {Type: v1.NodeExternalIP, Address: "1.2.3.5"}, + }, + Preferences: []v1.NodeAddressType{v1.NodeHostName, v1.NodeExternalIP}, + ExpectAddress: "label-hostname", + }, + } + + for k, tc := range testcases { + node := &v1.Node{ + ObjectMeta: metav1.ObjectMeta{Labels: tc.Labels}, + Status: v1.NodeStatus{Addresses: tc.Addresses}, + } + address, err := GetPreferredNodeAddress(node, tc.Preferences) + errString := "" + if err != nil { + errString = err.Error() + } + if errString != tc.ExpectErr { + t.Errorf("%s: expected err=%q, got %q", k, tc.ExpectErr, errString) + } + if address != tc.ExpectAddress { + t.Errorf("%s: expected address=%q, got %q", k, tc.ExpectAddress, address) + } + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/verify-util-pkg.sh b/vendor/k8s.io/kubernetes/pkg/util/verify-util-pkg.sh new file mode 100755 index 00000000..755924a0 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/verify-util-pkg.sh @@ -0,0 +1,48 @@ +#!/bin/bash + +# Copyright 2017 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# verify-util-pkg.sh checks whether *.go except doc.go in pkg/util have been moved into +# sub-pkgs, see issue #15634. + +set -o errexit +set -o nounset +set -o pipefail + +BASH_DIR=$(dirname "${BASH_SOURCE}") + +find_go_files() { + find . -maxdepth 1 -not \( \ + \( \ + -wholename './doc.go' \ + \) -prune \ + \) -name '*.go' +} + +ret=0 + +pushd "${BASH_DIR}" > /dev/null + for path in `find_go_files`; do + file=$(basename $path) + echo "Found pkg/util/${file}, but should be moved into util sub-pkgs." 1>&2 + ret=1 + done +popd > /dev/null + +if [[ ${ret} > 0 ]]; then + exit ${ret} +fi + +echo "Util Package Verified."