Added conditions to match the HCO requirements. (#910)

Signed-off-by: Alexander Wels <awels@redhat.com>
This commit is contained in:
Alexander Wels 2019-08-28 18:36:44 -04:00 committed by GitHub
parent be7d1cb9be
commit 45eecea14e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
910 changed files with 256266 additions and 2196 deletions

View File

@ -1694,6 +1694,49 @@
}
}
},
"v1.Condition": {
"required": [
"type",
"status",
"lastHeartbeatTime",
"lastTransitionTime"
],
"properties": {
"lastHeartbeatTime": {
"description": "last time we got an update on a given condition",
"type": "string"
},
"lastProbeTime": {
"type": [
"string",
"null"
]
},
"lastTransitionTime": {
"description": "last time the condition transit from one status to another",
"type": [
"string",
"null"
]
},
"message": {
"description": "human-readable message indicating details about last transition",
"type": "string"
},
"reason": {
"description": "one-word CamelCase reason for the condition's last transition",
"type": "string"
},
"status": {
"description": "status of the condition, one of True, False, Unknown",
"type": "string"
},
"type": {
"description": "type of condition ie. Available|Progressing|Degraded.",
"type": "string"
}
}
},
"v1.DeleteOptions": {
"description": "DeleteOptions may be provided when deleting an API object.",
"properties": {
@ -2215,39 +2258,6 @@
}
}
},
"v1alpha1.CDICondition": {
"description": "CDICondition represents a condition of a CDI deployment",
"required": [
"type",
"status"
],
"properties": {
"lastProbeTime": {
"type": [
"string",
"null"
]
},
"lastTransitionTime": {
"type": [
"string",
"null"
]
},
"message": {
"type": "string"
},
"reason": {
"type": "string"
},
"status": {
"type": "string"
},
"type": {
"type": "string"
}
}
},
"v1alpha1.CDIList": {
"description": "CDIList provides the needed parameters to do request a list of CDIs from the system\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object",
"required": [
@ -2295,7 +2305,7 @@
"conditions": {
"type": "array",
"items": {
"$ref": "#/definitions/v1alpha1.CDICondition"
"$ref": "#/definitions/v1.Condition"
}
},
"observedVersion": {

View File

@ -46,11 +46,11 @@ function wait_cdi_crd_installed {
while [ $crd_defined -eq 0 ] && [ $timeout > 0 ]; do
crd_defined=$(_kubectl get customresourcedefinition| grep cdis.cdi.kubevirt.io | wc -l)
sleep 1
timeout=timeout-1
timeout=$(($timeout-1))
done
#In case CDI crd is not defined after 120s - throw error
if [ $timeout \< 1 ]; then
if [ $crd_defined -eq 0 ]; then
echo "ERROR - CDI CRD is not defined after timeout"
exit 1
fi

View File

@ -16,6 +16,7 @@ CDI_INSTALL_OLM="install-olm"
CDI_INSTALL=${CDI_INSTALL:-${CDI_INSTALL_OPERATOR}}
CDI_INSTALL_TIMEOUT=${CDI_INSTALL_TIMEOUT:-120}
CDI_NAMESPACE=${CDI_NAMESPACE:-cdi}
CDI_INSTALL_TIMEOUT=${CDI_INSTALL_TIMEOUT:-120}
# Set controller verbosity to 3 for functional tests.
export VERBOSITY=3
@ -50,7 +51,7 @@ install_cdi
wait_cdi_crd_installed $CDI_INSTALL_TIMEOUT
_kubectl apply -f "./_out/manifests/release/cdi-cr.yaml"
_kubectl wait cdis.cdi.kubevirt.io/cdi --for=condition=running --timeout=120s
_kubectl wait cdis.cdi.kubevirt.io/cdi --for=condition=Available --timeout=120s
# Start functional test HTTP server.

30
glide.lock generated
View File

@ -1,8 +1,8 @@
hash: 3b7f147f88ea81ed85e278d641744b9540e6a601f9872e5151f488299bbaa167
updated: 2019-06-27T09:01:47.768925038Z
hash: 6d9aa23aeb785737d22d17f3836b7e46f6d3cd439d2b873712a606f79bfb2643
updated: 2019-08-27T17:27:30.454287513Z
imports:
- name: github.com/appscode/jsonpatch
version: e8422f09d27ee2c8cfb2c7f8089eb9eeb0764849
version: 7c0e3b262f30165a8ec3d0b4c6059fd92703bfb2
- name: github.com/asaskevich/govalidator
version: f9ffefc3facfbe0caee3fea233cbb6e8208f4541
- name: github.com/beorn7/perks
@ -35,11 +35,11 @@ imports:
- bson
- internal/json
- name: github.com/go-ini/ini
version: 3be5ad479f69d4e08d7fe25edf79bf3346bd658e
version: 8fe474341f7eedd6804eda75896c8f3e4b5dc36a
- name: github.com/go-logr/logr
version: 9fb12b3b21c5415d16ac18dc5cd42c1cfdd40c4e
version: d63354a31b29a1af26833a7648428060001b5049
- name: github.com/go-logr/zapr
version: 03f06a783fbb7dfaf3f629c7825480e43a7105e6
version: 2e515ec1daf7eefda8c24da3185e3967b306f957
- name: github.com/go-openapi/analysis
version: c701774f4e604d952e4e8c56dee260be696e33c3
subpackages:
@ -80,7 +80,7 @@ imports:
subpackages:
- lru
- name: github.com/golang/mock
version: 140ae90f29eaaadc229f8d4b78f074b7f0611c1e
version: dd8d2a22370e4c8a334e80ca8477f71356c8e4bb
subpackages:
- gomock
- name: github.com/golang/protobuf
@ -102,7 +102,7 @@ imports:
- compiler
- extensions
- name: github.com/gorilla/mux
version: ed099d42384823742bba0bf9a72b53b55c9e2e38
version: 00bdffe0f3c77e27d2cf6f5c70232a2d3e4d9c15
- name: github.com/gregjones/httpcache
version: 787624de3eb7bd915c329cba748687a3b22666a6
subpackages:
@ -178,7 +178,7 @@ imports:
- reporters/stenographer/support/go-isatty
- types
- name: github.com/onsi/gomega
version: efe19c39ca106ee4ed42a1b25e1a80a53be4831a
version: beea7276191eede26d9cd14df7ab50f0a5c9f25f
subpackages:
- format
- gbytes
@ -214,6 +214,10 @@ imports:
- security/clientset/versioned
- security/clientset/versioned/scheme
- security/clientset/versioned/typed/security/v1
- name: github.com/openshift/custom-resource-status
version: e62f2f3b79f33792b153e767811f420c1d457b31
subpackages:
- conditions/v1
- name: github.com/operator-framework/go-appr
version: f2aef88446f2a736fcb18db8ae57d708c52fdad0
subpackages:
@ -350,7 +354,7 @@ imports:
subpackages:
- rate
- name: google.golang.org/appengine
version: b2f4a3cf3c67576a2ee09e1fe62656a5086ce880
version: fb139bde60fa77cede04f226b4d5a3cf68dcce27
subpackages:
- internal
- internal/base
@ -368,6 +372,7 @@ imports:
subpackages:
- cipher
- json
- jwt
- name: gopkg.in/tomb.v1
version: c131134a1947e9afd9cecfe11f4c6dff0732ae58
- name: gopkg.in/yaml.v2
@ -457,6 +462,7 @@ imports:
- pkg/util/httpstream/spdy
- pkg/util/intstr
- pkg/util/json
- pkg/util/jsonmergepatch
- pkg/util/mergepatch
- pkg/util/naming
- pkg/util/net
@ -662,7 +668,7 @@ imports:
- name: k8s.io/code-generator
version: c2090bec4d9b1fb25de3812f868accc2bc9ecbae
- name: k8s.io/gengo
version: e17681d19d3ac4837a019ece36c2a0ec31ffe985
version: a874a240740c2ae854082ec73d46c5efcedd2149
- name: k8s.io/klog
version: 8139d8cb77af419532b33dfa7dd09fbc5f1d344f
- name: k8s.io/kube-aggregator
@ -725,7 +731,7 @@ imports:
- pkg/webhook/internal/metrics
- pkg/webhook/types
- name: sigs.k8s.io/testing_frameworks
version: 57f07443c2d47d9c7f932a9a4fc87fbf3408215d
version: b6c33f574b5885e28314cb115cec0d3888e78ef1
subpackages:
- integration
- integration/addr

View File

@ -64,3 +64,7 @@ import:
version: rebase-1.13.4
- package: github.com/kubernetes-csi/external-snapshotter/pkg/apis
version: e49856eb417cbafa51e5a3fb3bd0ac9e31ab1873
- package: github.com/openshift/custom-resource-status
version: e62f2f3b79f33792b153e767811f420c1d457b31
- package: github.com/appscode/jsonpatch
version: release-1.0

View File

@ -21,7 +21,8 @@ limitations under the License.
package v1alpha1
import (
v1 "k8s.io/api/core/v1"
v1 "github.com/openshift/custom-resource-status/conditions/v1"
corev1 "k8s.io/api/core/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
@ -53,24 +54,6 @@ func (in *CDI) DeepCopyObject() runtime.Object {
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CDICondition) DeepCopyInto(out *CDICondition) {
*out = *in
in.LastProbeTime.DeepCopyInto(&out.LastProbeTime)
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDICondition.
func (in *CDICondition) DeepCopy() *CDICondition {
if in == nil {
return nil
}
out := new(CDICondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CDIConfig) DeepCopyInto(out *CDIConfig) {
*out = *in
@ -233,7 +216,7 @@ func (in *CDIStatus) DeepCopyInto(out *CDIStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]CDICondition, len(*in))
*out = make([]v1.Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
@ -460,7 +443,7 @@ func (in *DataVolumeSpec) DeepCopyInto(out *DataVolumeSpec) {
in.Source.DeepCopyInto(&out.Source)
if in.PVC != nil {
in, out := &in.PVC, &out.PVC
*out = new(v1.PersistentVolumeClaimSpec)
*out = new(corev1.PersistentVolumeClaimSpec)
(*in).DeepCopyInto(*out)
}
return

View File

@ -30,7 +30,6 @@ import (
func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenAPIDefinition {
return map[string]common.OpenAPIDefinition{
"kubevirt.io/containerized-data-importer/pkg/apis/core/v1alpha1.CDI": schema_pkg_apis_core_v1alpha1_CDI(ref),
"kubevirt.io/containerized-data-importer/pkg/apis/core/v1alpha1.CDICondition": schema_pkg_apis_core_v1alpha1_CDICondition(ref),
"kubevirt.io/containerized-data-importer/pkg/apis/core/v1alpha1.CDIConfig": schema_pkg_apis_core_v1alpha1_CDIConfig(ref),
"kubevirt.io/containerized-data-importer/pkg/apis/core/v1alpha1.CDIConfigList": schema_pkg_apis_core_v1alpha1_CDIConfigList(ref),
"kubevirt.io/containerized-data-importer/pkg/apis/core/v1alpha1.CDIConfigSpec": schema_pkg_apis_core_v1alpha1_CDIConfigSpec(ref),
@ -97,56 +96,6 @@ func schema_pkg_apis_core_v1alpha1_CDI(ref common.ReferenceCallback) common.Open
}
}
func schema_pkg_apis_core_v1alpha1_CDICondition(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "CDICondition represents a condition of a CDI deployment",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"type": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"status": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"lastProbeTime": {
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"),
},
},
"lastTransitionTime": {
SchemaProps: spec.SchemaProps{
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"),
},
},
"reason": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"message": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"type", "status"},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.Time"},
}
}
func schema_pkg_apis_core_v1alpha1_CDIConfig(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
@ -388,7 +337,7 @@ func schema_pkg_apis_core_v1alpha1_CDIStatus(ref common.ReferenceCallback) commo
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: ref("kubevirt.io/containerized-data-importer/pkg/apis/core/v1alpha1.CDICondition"),
Ref: ref("github.com/openshift/custom-resource-status/conditions/v1.Condition"),
},
},
},
@ -416,7 +365,7 @@ func schema_pkg_apis_core_v1alpha1_CDIStatus(ref common.ReferenceCallback) commo
},
},
Dependencies: []string{
"kubevirt.io/containerized-data-importer/pkg/apis/core/v1alpha1.CDICondition"},
"github.com/openshift/custom-resource-status/conditions/v1.Condition"},
}
}

View File

@ -23,6 +23,8 @@ package v1alpha1
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conditions "github.com/openshift/custom-resource-status/conditions/v1"
)
// DataVolume provides a representation of our data volume
@ -204,11 +206,11 @@ type CDIPhase string
// CDIStatus defines the status of the CDI installation
type CDIStatus struct {
Phase CDIPhase `json:"phase,omitempty"`
Conditions []CDICondition `json:"conditions,omitempty" optional:"true"`
OperatorVersion string `json:"operatorVersion,omitempty" optional:"true"`
TargetVersion string `json:"targetVersion,omitempty" optional:"true"`
ObservedVersion string `json:"observedVersion,omitempty" optional:"true"`
Phase CDIPhase `json:"phase,omitempty"`
Conditions []conditions.Condition `json:"conditions,omitempty" optional:"true"`
OperatorVersion string `json:"operatorVersion,omitempty" optional:"true"`
TargetVersion string `json:"targetVersion,omitempty" optional:"true"`
ObservedVersion string `json:"observedVersion,omitempty" optional:"true"`
}
const (
@ -226,24 +228,9 @@ const (
// CDIPhaseError signals that the CDI deployment is in an error state
CDIPhaseError CDIPhase = "Error"
)
// CDICondition represents a condition of a CDI deployment
type CDICondition struct {
Type CDIConditionType `json:"type"`
Status corev1.ConditionStatus `json:"status"`
LastProbeTime metav1.Time `json:"lastProbeTime,omitempty"`
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"`
Reason string `json:"reason,omitempty"`
Message string `json:"message,omitempty"`
}
// CDIConditionType is the type of CDI condition
type CDIConditionType string
const (
// CDIConditionRunning means the CDI deployment is up/ready/healthy
CDIConditionRunning CDIConditionType = "Running"
// CDIPhaseUpgrading signals that the CDI resources are being deployed
CDIPhaseUpgrading CDIPhase = "Upgrading"
)
//CDIList provides the needed parameters to do request a list of CDIs from the system

View File

@ -99,12 +99,6 @@ func (CDIStatus) SwaggerDoc() map[string]string {
}
}
func (CDICondition) SwaggerDoc() map[string]string {
return map[string]string{
"": "CDICondition represents a condition of a CDI deployment",
}
}
func (CDIList) SwaggerDoc() map[string]string {
return map[string]string{
"": "CDIList provides the needed parameters to do request a list of CDIs from the system\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object",

View File

@ -46,6 +46,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/source"
"github.com/kelseyhightower/envconfig"
conditions "github.com/openshift/custom-resource-status/conditions/v1"
cdiv1alpha1 "kubevirt.io/containerized-data-importer/pkg/apis/core/v1alpha1"
"kubevirt.io/containerized-data-importer/pkg/operator"
@ -159,11 +160,16 @@ func (r *ReconcileCDI) Reconcile(request reconcile.Request) (reconcile.Result, e
// let's try to create stuff
if cr.Status.Phase == "" {
reqLogger.Info("Doing reconcile create")
return r.reconcileCreate(reqLogger, cr)
res, createErr := r.reconcileCreate(reqLogger, cr)
// Always update conditions after a create.
err = r.client.Update(context.TODO(), cr)
if err != nil {
return reconcile.Result{}, err
}
return res, createErr
}
reqLogger.Info("Reconciling to error state, no configmap")
// we are in a weird state
return r.reconcileError(reqLogger, cr)
}
@ -171,14 +177,27 @@ func (r *ReconcileCDI) Reconcile(request reconcile.Request) (reconcile.Result, e
// do we even care about this CR?
if !metav1.IsControlledBy(configMap, cr) {
reqLogger.Info("Reconciling to error state, unwanted CDI object")
return r.reconcileError(reqLogger, cr)
}
currentConditionValues := GetConditionValues(cr.Status.Conditions)
reqLogger.Info("Doing reconcile update")
// should be the usual case
return r.reconcileUpdate(reqLogger, cr)
existingAvailableCondition := conditions.FindStatusCondition(cr.Status.Conditions, conditions.ConditionAvailable)
if existingAvailableCondition != nil {
// should be the usual case
MarkCrHealthyMessage(cr, existingAvailableCondition.Reason, existingAvailableCondition.Message)
} else {
MarkCrHealthyMessage(cr, "", "")
}
res, err := r.reconcileUpdate(reqLogger, cr)
if conditionsChanged(currentConditionValues, GetConditionValues(cr.Status.Conditions)) {
if err := r.crUpdate(cr.Status.Phase, cr); err != nil {
return reconcile.Result{}, err
}
}
return res, err
}
func shouldTakeUpdatePath(logger logr.Logger, targetVersion, currentVersion string) (bool, error) {
@ -219,14 +238,17 @@ func shouldTakeUpdatePath(logger logr.Logger, targetVersion, currentVersion stri
}
func (r *ReconcileCDI) reconcileCreate(logger logr.Logger, cr *cdiv1alpha1.CDI) (reconcile.Result, error) {
MarkCrDeploying(cr, "DeployStarted", "Started Deployment")
// claim the configmap
if err := r.createConfigMap(cr); err != nil {
MarkCrFailed(cr, "ConfigError", "Unable to claim ConfigMap")
return reconcile.Result{}, err
}
logger.Info("ConfigMap created successfully")
if err := r.crInit(cr); err != nil {
MarkCrFailed(cr, "CrInitError", "Unable to Initialize CR")
return reconcile.Result{}, err
}
@ -251,9 +273,9 @@ func (r *ReconcileCDI) checkUpgrade(logger logr.Logger, cr *cdiv1alpha1.CDI) err
if isUpgrade && !r.isUpgrading(cr) {
logger.Info("Observed version is not target version. Begin upgrade", "Observed version ", cr.Status.ObservedVersion, "TargetVersion", r.namespacedArgs.DockerTag)
MarkCrUpgradeHealingDegraded(cr, "UpgradeStarted", fmt.Sprintf("Started upgrade to version %s", r.namespacedArgs.DockerTag))
cr.Status.TargetVersion = r.namespacedArgs.DockerTag
//Here phase has to be upgrading - this is to be handled in dedicated pr
if err := r.crUpdate(cdiv1alpha1.CDIPhaseDeploying, cr); err != nil {
if err := r.crUpdate(cdiv1alpha1.CDIPhaseUpgrading, cr); err != nil {
return err
}
}
@ -371,6 +393,7 @@ func (r *ReconcileCDI) reconcileUpdate(logger logr.Logger, cr *cdiv1alpha1.CDI)
if cr.Status.Phase != cdiv1alpha1.CDIPhaseDeployed && !r.isUpgrading(cr) {
//We are not moving to Deployed phase until new operator deployment is ready in case of Upgrade
cr.Status.ObservedVersion = r.namespacedArgs.DockerTag
MarkCrHealthyMessage(cr, "DeployCompleted", "Deployment Completed")
if err = r.crUpdate(cdiv1alpha1.CDIPhaseDeployed, cr); err != nil {
return reconcile.Result{}, err
}
@ -378,20 +401,16 @@ func (r *ReconcileCDI) reconcileUpdate(logger logr.Logger, cr *cdiv1alpha1.CDI)
logger.Info("Successfully entered Deployed state")
}
ready, err := r.checkReady(logger, cr)
degraded, err := r.checkDegraded(logger, cr)
if err != nil {
return reconcile.Result{}, err
}
if ready {
logger.Info("Operator is ready!!")
if !degraded && r.isUpgrading(cr) {
logger.Info("Completing upgrade process...")
if r.isUpgrading(cr) {
logger.Info("Completing upgrade process...")
if err = r.completeUpgrade(logger, cr); err != nil {
return reconcile.Result{}, err
}
if err = r.completeUpgrade(logger, cr); err != nil {
return reconcile.Result{}, err
}
}
@ -406,6 +425,7 @@ func (r *ReconcileCDI) completeUpgrade(logger logr.Logger, cr *cdiv1alpha1.CDI)
previousVersion := cr.Status.ObservedVersion
cr.Status.ObservedVersion = r.namespacedArgs.DockerTag
MarkCrHealthyMessage(cr, "DeployCompleted", "Deployment Completed")
if err := r.crUpdate(cdiv1alpha1.CDIPhaseDeployed, cr); err != nil {
return err
}
@ -530,6 +550,10 @@ func (r *ReconcileCDI) reconcileDelete(logger logr.Logger, cr *cdiv1alpha1.CDI)
}
func (r *ReconcileCDI) reconcileError(logger logr.Logger, cr *cdiv1alpha1.CDI) (reconcile.Result, error) {
MarkCrFailed(cr, "ConfigError", "ConfigMap not owned by cr")
if err := r.crUpdate(cr.Status.Phase, cr); err != nil {
return reconcile.Result{}, err
}
if err := r.crError(cr); err != nil {
return reconcile.Result{}, err
}
@ -537,34 +561,43 @@ func (r *ReconcileCDI) reconcileError(logger logr.Logger, cr *cdiv1alpha1.CDI) (
return reconcile.Result{}, nil
}
func (r *ReconcileCDI) checkReady(logger logr.Logger, cr *cdiv1alpha1.CDI) (bool, error) {
readyCond := conditionReady
func (r *ReconcileCDI) checkDegraded(logger logr.Logger, cr *cdiv1alpha1.CDI) (bool, error) {
degraded := false
deployments, err := r.getAllDeployments(cr)
if err != nil {
return false, err
return true, err
}
for _, deployment := range deployments {
key := client.ObjectKey{Namespace: deployment.Namespace, Name: deployment.Name}
if err = r.client.Get(context.TODO(), key, deployment); err != nil {
return false, err
return true, err
}
if !checkDeploymentReady(deployment) {
readyCond = conditionNotReady
degraded = true
break
}
}
logger.Info("CDI Ready check", "Status", readyCond.Status)
logger.Info("CDI degraded check", "Degraded", degraded)
if err = r.conditionUpdate(readyCond, cr); err != nil {
return false, err
if degraded {
conditions.SetStatusCondition(&cr.Status.Conditions, conditions.Condition{
Type: conditions.ConditionDegraded,
Status: corev1.ConditionTrue,
})
} else {
conditions.SetStatusCondition(&cr.Status.Conditions, conditions.Condition{
Type: conditions.ConditionDegraded,
Status: corev1.ConditionFalse,
})
}
return readyCond == conditionReady, nil
logger.Info("Finished degraded check", "conditions", cr.Status.Conditions)
return degraded, nil
}
func (r *ReconcileCDI) add(mgr manager.Manager) error {
@ -679,6 +712,7 @@ func (r *ReconcileCDI) getAllResources(cr *cdiv1alpha1.CDI) ([]runtime.Object, e
if deployClusterResources() {
crs, err := cdicluster.CreateAllResources(r.clusterArgs)
if err != nil {
MarkCrFailedHealing(cr, "CreateResources", "Unable to create all resources")
return nil, err
}
@ -687,6 +721,7 @@ func (r *ReconcileCDI) getAllResources(cr *cdiv1alpha1.CDI) ([]runtime.Object, e
nsrs, err := cdinamespaced.CreateAllResources(r.getNamespacedArgs(cr))
if err != nil {
MarkCrFailedHealing(cr, "CreateNamespaceResources", "Unable to create all namespaced resources")
return nil, err
}

View File

@ -44,6 +44,8 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
conditions "github.com/openshift/custom-resource-status/conditions/v1"
extv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
"k8s.io/client-go/kubernetes/scheme"
@ -159,8 +161,11 @@ var _ = Describe("Controller", func() {
Expect(args.cdi.Status.TargetVersion).Should(Equal(version))
Expect(args.cdi.Status.ObservedVersion).Should(Equal(version))
Expect(args.cdi.Status.Phase).Should(Equal(cdiviaplha1.CDIPhaseDeployed))
Expect(args.cdi.Status.Conditions).Should(BeEmpty())
Expect(args.cdi.Status.Conditions).Should(HaveLen(3))
Expect(conditions.IsStatusConditionTrue(args.cdi.Status.Conditions, conditions.ConditionAvailable)).To(BeTrue())
Expect(conditions.IsStatusConditionFalse(args.cdi.Status.Conditions, conditions.ConditionProgressing)).To(BeTrue())
// We will expect degraded status, because in the test the deployment.status.replicas will not be 1, when the desired is 1.
Expect(conditions.IsStatusConditionTrue(args.cdi.Status.Conditions, conditions.ConditionDegraded)).To(BeTrue())
Expect(args.cdi.Finalizers).Should(HaveLen(1))
})
@ -211,6 +216,7 @@ var _ = Describe("Controller", func() {
})
It("should become ready", func() {
one := int32(1)
args := createArgs()
doReconcile(args)
@ -225,6 +231,7 @@ var _ = Describe("Controller", func() {
numReplicas := d.Spec.Replicas
Expect(numReplicas).ToNot(BeNil())
Expect(numReplicas).To(Equal(&one))
d, err := getDeployment(args.client, d)
Expect(err).ToNot(HaveOccurred())
@ -234,7 +241,11 @@ var _ = Describe("Controller", func() {
doReconcile(args)
Expect(args.cdi.Status.Conditions).Should(BeEmpty())
Expect(args.cdi.Status.Conditions).Should(HaveLen(3))
Expect(conditions.IsStatusConditionTrue(args.cdi.Status.Conditions, conditions.ConditionAvailable)).To(BeTrue())
Expect(conditions.IsStatusConditionFalse(args.cdi.Status.Conditions, conditions.ConditionProgressing)).To(BeTrue())
// We will expect degraded status, because in the test the deployment.status.replicas will not be 1, when the desired is 1.
Expect(conditions.IsStatusConditionTrue(args.cdi.Status.Conditions, conditions.ConditionDegraded)).To(BeTrue())
}
resources, err = getAllResources(args.reconciler)
@ -259,9 +270,10 @@ var _ = Describe("Controller", func() {
doReconcile(args)
if len(args.cdi.Status.Conditions) == 1 &&
args.cdi.Status.Conditions[0].Type == cdiviaplha1.CDIConditionRunning &&
args.cdi.Status.Conditions[0].Status == corev1.ConditionTrue {
if len(args.cdi.Status.Conditions) == 3 &&
conditions.IsStatusConditionTrue(args.cdi.Status.Conditions, conditions.ConditionAvailable) &&
conditions.IsStatusConditionFalse(args.cdi.Status.Conditions, conditions.ConditionProgressing) &&
conditions.IsStatusConditionFalse(args.cdi.Status.Conditions, conditions.ConditionDegraded) {
running = true
}
}
@ -313,9 +325,10 @@ var _ = Describe("Controller", func() {
doReconcile(args)
Expect(args.cdi.Status.Conditions).Should(HaveLen(1))
Expect(args.cdi.Status.Conditions[0].Type).Should(Equal(cdiviaplha1.CDIConditionRunning))
Expect(args.cdi.Status.Conditions[0].Status).Should(Equal(corev1.ConditionTrue))
Expect(args.cdi.Status.Conditions).Should(HaveLen(3))
Expect(conditions.IsStatusConditionTrue(args.cdi.Status.Conditions, conditions.ConditionAvailable)).To(BeTrue())
Expect(conditions.IsStatusConditionFalse(args.cdi.Status.Conditions, conditions.ConditionProgressing)).To(BeTrue())
Expect(conditions.IsStatusConditionFalse(args.cdi.Status.Conditions, conditions.ConditionDegraded)).To(BeTrue())
for _, r := range resources {
var ok bool
@ -333,9 +346,11 @@ var _ = Describe("Controller", func() {
doReconcile(args)
Expect(args.cdi.Status.Conditions).Should(HaveLen(1))
Expect(args.cdi.Status.Conditions[0].Type).Should(Equal(cdiviaplha1.CDIConditionRunning))
Expect(args.cdi.Status.Conditions[0].Status).Should(Equal(corev1.ConditionFalse))
Expect(args.cdi.Status.Conditions).Should(HaveLen(3))
Expect(conditions.IsStatusConditionTrue(args.cdi.Status.Conditions, conditions.ConditionAvailable)).To(BeTrue())
Expect(conditions.IsStatusConditionFalse(args.cdi.Status.Conditions, conditions.ConditionProgressing)).To(BeTrue())
// Application should be degraded due to missing deployment pods (set to 0)
Expect(conditions.IsStatusConditionTrue(args.cdi.Status.Conditions, conditions.ConditionDegraded)).To(BeTrue())
deployment, err = getDeployment(args.client, deployment)
Expect(err).ToNot(HaveOccurred())
@ -345,9 +360,10 @@ var _ = Describe("Controller", func() {
doReconcile(args)
Expect(args.cdi.Status.Conditions).Should(HaveLen(1))
Expect(args.cdi.Status.Conditions[0].Type).Should(Equal(cdiviaplha1.CDIConditionRunning))
Expect(args.cdi.Status.Conditions[0].Status).Should(Equal(corev1.ConditionTrue))
Expect(args.cdi.Status.Conditions).Should(HaveLen(3))
Expect(conditions.IsStatusConditionTrue(args.cdi.Status.Conditions, conditions.ConditionAvailable)).To(BeTrue())
Expect(conditions.IsStatusConditionFalse(args.cdi.Status.Conditions, conditions.ConditionProgressing)).To(BeTrue())
Expect(conditions.IsStatusConditionFalse(args.cdi.Status.Conditions, conditions.ConditionDegraded)).To(BeTrue())
})
It("does not modify insecure registry configmap", func() {
@ -399,7 +415,10 @@ var _ = Describe("Controller", func() {
Expect(err).ToNot(HaveOccurred())
Expect(newInstance.Status.Phase).Should(Equal(cdiviaplha1.CDIPhaseError))
Expect(newInstance.Status.Conditions).Should(BeEmpty())
Expect(newInstance.Status.Conditions).Should(HaveLen(3))
Expect(conditions.IsStatusConditionFalse(newInstance.Status.Conditions, conditions.ConditionAvailable)).To(BeTrue())
Expect(conditions.IsStatusConditionFalse(newInstance.Status.Conditions, conditions.ConditionProgressing)).To(BeTrue())
Expect(conditions.IsStatusConditionTrue(newInstance.Status.Conditions, conditions.ConditionDegraded)).To(BeTrue())
})
It("should succeed when we delete CDI", func() {
@ -494,7 +513,7 @@ var _ = Describe("Controller", func() {
Expect(args.cdi.Status.OperatorVersion).Should(Equal(newVersion))
Expect(args.cdi.Status.ObservedVersion).Should(Equal(prevVersion))
Expect(args.cdi.Status.TargetVersion).Should(Equal(newVersion))
Expect(args.cdi.Status.Phase).Should(Equal(cdiviaplha1.CDIPhaseDeploying))
Expect(args.cdi.Status.Phase).Should(Equal(cdiviaplha1.CDIPhaseUpgrading))
} else {
//verify upgraded hasn't started
Expect(args.cdi.Status.OperatorVersion).Should(Equal(prevVersion))
@ -646,7 +665,7 @@ var _ = Describe("Controller", func() {
doReconcile(args)
//verify upgraded has started
Expect(args.cdi.Status.Phase).Should(Equal(cdiviaplha1.CDIPhaseDeploying))
Expect(args.cdi.Status.Phase).Should(Equal(cdiviaplha1.CDIPhaseUpgrading))
//change deployment to ready
isReady := setDeploymentsReady(args)
@ -1072,7 +1091,7 @@ var _ = Describe("Controller", func() {
doReconcile(args)
//verify upgraded has started
Expect(args.cdi.Status.Phase).Should(Equal(cdiviaplha1.CDIPhaseDeploying))
Expect(args.cdi.Status.Phase).Should(Equal(cdiviaplha1.CDIPhaseUpgrading))
//verify unused exists before upgrade is done
_, err = getObject(args.client, unusedObj)
@ -1284,9 +1303,10 @@ func setDeploymentsReady(args *args) bool {
doReconcile(args)
if len(args.cdi.Status.Conditions) == 1 &&
args.cdi.Status.Conditions[0].Type == cdiviaplha1.CDIConditionRunning &&
args.cdi.Status.Conditions[0].Status == corev1.ConditionTrue {
if len(args.cdi.Status.Conditions) == 3 &&
conditions.IsStatusConditionTrue(args.cdi.Status.Conditions, conditions.ConditionAvailable) &&
conditions.IsStatusConditionFalse(args.cdi.Status.Conditions, conditions.ConditionProgressing) &&
conditions.IsStatusConditionFalse(args.cdi.Status.Conditions, conditions.ConditionDegraded) {
running = true
}
}

View File

@ -18,40 +18,15 @@ package controller
import (
"context"
"time"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conditions "github.com/openshift/custom-resource-status/conditions/v1"
cdiv1alpha1 "kubevirt.io/containerized-data-importer/pkg/apis/core/v1alpha1"
)
var (
conditionReady = cdiv1alpha1.CDICondition{
Type: cdiv1alpha1.CDIConditionRunning,
Status: corev1.ConditionTrue,
Reason: "All deployments running and ready",
Message: "Have fun!",
}
conditionNotReady = cdiv1alpha1.CDICondition{
Type: cdiv1alpha1.CDIConditionRunning,
Status: corev1.ConditionFalse,
Reason: "CDI deployment state inconsistent",
Message: "Hang in there!",
}
)
func (r *ReconcileCDI) isUpgrading(cr *cdiv1alpha1.CDI) bool {
if cr.Status.ObservedVersion == "" {
return false
}
if cr.Status.ObservedVersion != cr.Status.TargetVersion {
return true
}
return false
return cr.Status.ObservedVersion != "" && cr.Status.ObservedVersion != cr.Status.TargetVersion
}
// this is used for testing. wish this a helper function in test file instead of member
@ -87,37 +62,135 @@ func (r *ReconcileCDI) crUpdate(phase cdiv1alpha1.CDIPhase, cr *cdiv1alpha1.CDI)
return r.client.Update(context.TODO(), cr)
}
func (r *ReconcileCDI) conditionUpdate(condition cdiv1alpha1.CDICondition, cr *cdiv1alpha1.CDI) error {
condition.LastProbeTime = metav1.Time{Time: time.Now()}
condition.LastTransitionTime = condition.LastProbeTime
i := -1
for j, c := range cr.Status.Conditions {
if c.Type == condition.Type {
i = j
break
}
// GetConditionValues gets the conditions and put them into a map for easy comparison
func GetConditionValues(conditionList []conditions.Condition) map[conditions.ConditionType]corev1.ConditionStatus {
result := make(map[conditions.ConditionType]corev1.ConditionStatus)
for _, cond := range conditionList {
result[cond.Type] = cond.Status
}
if i >= 0 {
c := cr.Status.Conditions[i]
c.LastProbeTime = condition.LastProbeTime
c.LastTransitionTime = condition.LastTransitionTime
if c == condition {
return nil
}
cr.Status.Conditions[i] = condition
} else {
if condition.Status == corev1.ConditionFalse {
// condition starts off as true
return nil
}
cr.Status.Conditions = append(cr.Status.Conditions, condition)
}
return r.crUpdate(cr.Status.Phase, cr)
return result
}
// Compare condition maps and return true if any of the conditions changed, false otherwise.
func conditionsChanged(originalValues, newValues map[conditions.ConditionType]corev1.ConditionStatus) bool {
if len(originalValues) != len(newValues) {
return true
}
for k, v := range newValues {
oldV, ok := originalValues[k]
if !ok || oldV != v {
return true
}
}
return false
}
// MarkCrHealthyMessage marks the passed in CR as healthy. The CR object needs to be updated by the caller afterwards.
// Healthy means the following status conditions are set:
// ApplicationAvailable: true
// Progressing: false
// Degraded: false
func MarkCrHealthyMessage(cr *cdiv1alpha1.CDI, reason, message string) {
conditions.SetStatusCondition(&cr.Status.Conditions, conditions.Condition{
Type: conditions.ConditionAvailable,
Status: corev1.ConditionTrue,
Reason: reason,
Message: message,
})
conditions.SetStatusCondition(&cr.Status.Conditions, conditions.Condition{
Type: conditions.ConditionProgressing,
Status: corev1.ConditionFalse,
})
conditions.SetStatusCondition(&cr.Status.Conditions, conditions.Condition{
Type: conditions.ConditionDegraded,
Status: corev1.ConditionFalse,
})
}
// MarkCrUpgradeHealingDegraded marks the passed CR as upgrading and degraded. The CR object needs to be updated by the caller afterwards.
// Failed means the following status conditions are set:
// ApplicationAvailable: true
// Progressing: true
// Degraded: true
func MarkCrUpgradeHealingDegraded(cr *cdiv1alpha1.CDI, reason, message string) {
conditions.SetStatusCondition(&cr.Status.Conditions, conditions.Condition{
Type: conditions.ConditionAvailable,
Status: corev1.ConditionTrue,
})
conditions.SetStatusCondition(&cr.Status.Conditions, conditions.Condition{
Type: conditions.ConditionProgressing,
Status: corev1.ConditionTrue,
})
conditions.SetStatusCondition(&cr.Status.Conditions, conditions.Condition{
Type: conditions.ConditionDegraded,
Status: corev1.ConditionTrue,
Reason: reason,
Message: message,
})
}
// MarkCrFailed marks the passed CR as failed and requiring human intervention. The CR object needs to be updated by the caller afterwards.
// Failed means the following status conditions are set:
// ApplicationAvailable: false
// Progressing: false
// Degraded: true
func MarkCrFailed(cr *cdiv1alpha1.CDI, reason, message string) {
conditions.SetStatusCondition(&cr.Status.Conditions, conditions.Condition{
Type: conditions.ConditionAvailable,
Status: corev1.ConditionFalse,
})
conditions.SetStatusCondition(&cr.Status.Conditions, conditions.Condition{
Type: conditions.ConditionProgressing,
Status: corev1.ConditionFalse,
})
conditions.SetStatusCondition(&cr.Status.Conditions, conditions.Condition{
Type: conditions.ConditionDegraded,
Status: corev1.ConditionTrue,
Reason: reason,
Message: message,
})
}
// MarkCrFailedHealing marks the passed CR as failed and healing. The CR object needs to be updated by the caller afterwards.
// FailedAndHealing means the following status conditions are set:
// ApplicationAvailable: false
// Progressing: true
// Degraded: true
func MarkCrFailedHealing(cr *cdiv1alpha1.CDI, reason, message string) {
conditions.SetStatusCondition(&cr.Status.Conditions, conditions.Condition{
Type: conditions.ConditionAvailable,
Status: corev1.ConditionFalse,
})
conditions.SetStatusCondition(&cr.Status.Conditions, conditions.Condition{
Type: conditions.ConditionProgressing,
Status: corev1.ConditionTrue,
})
conditions.SetStatusCondition(&cr.Status.Conditions, conditions.Condition{
Type: conditions.ConditionDegraded,
Status: corev1.ConditionTrue,
Reason: reason,
Message: message,
})
}
// MarkCrDeploying marks the passed CR as currently deploying. The CR object needs to be updated by the caller afterwards.
// Deploying means the following status conditions are set:
// ApplicationAvailable: false
// Progressing: true
// Degraded: false
func MarkCrDeploying(cr *cdiv1alpha1.CDI, reason, message string) {
conditions.SetStatusCondition(&cr.Status.Conditions, conditions.Condition{
Type: conditions.ConditionAvailable,
Status: corev1.ConditionFalse,
})
conditions.SetStatusCondition(&cr.Status.Conditions, conditions.Condition{
Type: conditions.ConditionProgressing,
Status: corev1.ConditionTrue,
Reason: reason,
Message: message,
})
conditions.SetStatusCondition(&cr.Status.Conditions, conditions.Condition{
Type: conditions.ConditionDegraded,
Status: corev1.ConditionFalse,
})
}

View File

@ -9,10 +9,14 @@ import (
routev1 "github.com/openshift/api/route/v1"
routeclient "github.com/openshift/client-go/route/clientset/versioned"
secclient "github.com/openshift/client-go/security/clientset/versioned"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"kubevirt.io/containerized-data-importer/pkg/controller"
operatorcontroller "kubevirt.io/containerized-data-importer/pkg/operator/controller"
"kubevirt.io/containerized-data-importer/tests/framework"
conditions "github.com/openshift/custom-resource-status/conditions/v1"
)
var _ = Describe("Operator tests", func() {
@ -46,4 +50,15 @@ var _ = Describe("Operator tests", func() {
cdiSA := fmt.Sprintf("system:serviceaccount:%s:cdi-sa", f.CdiInstallNs)
Expect(scc.Users).Should(ContainElement(cdiSA))
})
// Condition flags can be found here with their meaning https://github.com/kubevirt/hyperconverged-cluster-operator/blob/master/docs/conditions.md
It("Condition flags on CR should be healthy and operating", func() {
cdiObject, err := f.CdiClient.CdiV1alpha1().CDIs().Get("cdi", metav1.GetOptions{})
Expect(err).ToNot(HaveOccurred())
conditionMap := operatorcontroller.GetConditionValues(cdiObject.Status.Conditions)
// Application should be fully operational and healthy.
Expect(conditionMap[conditions.ConditionAvailable]).To(Equal(corev1.ConditionTrue))
Expect(conditionMap[conditions.ConditionProgressing]).To(Equal(corev1.ConditionFalse))
Expect(conditionMap[conditions.ConditionDegraded]).To(Equal(corev1.ConditionFalse))
})
})

View File

@ -23,5 +23,4 @@ _testmain.go
*.test
*.prof
/.idea
/vendor
.idea/

View File

@ -3,16 +3,8 @@ go:
- 1.x
- tip
go_import_path: gomodules.xyz/jsonpatch
cache:
directories:
- $HOME/.cache/go-build
- $GOPATH/pkg/mod
env:
- GO111MODULE=on
script:
- cd v2
- go test -v
- go test -v

View File

@ -1,39 +0,0 @@
# Change Log
## [v2.0.0](https://github.com/gomodules/jsonpatch/tree/v2.0.0) (2019-06-26)
[Full Changelog](https://github.com/gomodules/jsonpatch/compare/1.0.0...v2.0.0)
**Merged pull requests:**
- Use Major subdirectory structure to maintain dep compatiability [\#20](https://github.com/gomodules/jsonpatch/pull/20) ([tamalsaha](https://github.com/tamalsaha))
- Prepare v2 release [\#19](https://github.com/gomodules/jsonpatch/pull/19) ([tamalsaha](https://github.com/tamalsaha))
- Update go.mod and remove vendor folder [\#18](https://github.com/gomodules/jsonpatch/pull/18) ([tamalsaha](https://github.com/tamalsaha))
- Change package path to gomodules.xyz/jsonpath [\#17](https://github.com/gomodules/jsonpatch/pull/17) ([tamalsaha](https://github.com/tamalsaha))
- \[Emergency\] correct array index in backtrace [\#16](https://github.com/gomodules/jsonpatch/pull/16) ([kdada](https://github.com/kdada))
- Added support for arrays at the root [\#15](https://github.com/gomodules/jsonpatch/pull/15) ([e-nikolov](https://github.com/e-nikolov))
- Fix the example code in readme [\#14](https://github.com/gomodules/jsonpatch/pull/14) ([pytimer](https://github.com/pytimer))
## [1.0.0](https://github.com/gomodules/jsonpatch/tree/1.0.0) (2019-01-08)
**Fixed bugs:**
- Correctly generate patch for nested object [\#8](https://github.com/gomodules/jsonpatch/issues/8)
**Closed issues:**
- Do releases and in SemVer [\#12](https://github.com/gomodules/jsonpatch/issues/12)
- Generated patch incorrect for Array replacement [\#1](https://github.com/gomodules/jsonpatch/issues/1)
**Merged pull requests:**
- Add JsonPatchOperation as type alias for Operation [\#13](https://github.com/gomodules/jsonpatch/pull/13) ([tamalsaha](https://github.com/tamalsaha))
- Migrate to go mod [\#10](https://github.com/gomodules/jsonpatch/pull/10) ([tamalsaha](https://github.com/tamalsaha))
- Add test for nested object [\#9](https://github.com/gomodules/jsonpatch/pull/9) ([tamalsaha](https://github.com/tamalsaha))
- Add test for edit distance computation [\#7](https://github.com/gomodules/jsonpatch/pull/7) ([tamalsaha](https://github.com/tamalsaha))
- Append edit distance operations from end to start [\#6](https://github.com/gomodules/jsonpatch/pull/6) ([tamalsaha](https://github.com/tamalsaha))
- Add travis file [\#4](https://github.com/gomodules/jsonpatch/pull/4) ([tamalsaha](https://github.com/tamalsaha))
- Run go fmt [\#3](https://github.com/gomodules/jsonpatch/pull/3) ([tamalsaha](https://github.com/tamalsaha))
- Fix array comparison [\#2](https://github.com/gomodules/jsonpatch/pull/2) ([tamalsaha](https://github.com/tamalsaha))
\* *This Change Log was automatically generated by [github_changelog_generator](https://github.com/skywinder/Github-Changelog-Generator)*

View File

@ -1,20 +1,19 @@
# jsonpatch
[![Build Status](https://travis-ci.org/gomodules/jsonpatch.svg?branch=master)](https://travis-ci.org/gomodules/jsonpatch)
[![Go Report Card](https://goreportcard.com/badge/gomodules.xyz/jsonpatch "Go Report Card")](https://goreportcard.com/report/gomodules.xyz/jsonpatch)
[![GoDoc](https://godoc.org/gomodules.xyz/jsonpatch/v2?status.svg "GoDoc")](https://godoc.org/gomodules.xyz/jsonpatch/v2)
[![Build Status](https://travis-ci.org/appscode/jsonpatch.svg?branch=master)](https://travis-ci.org/appscode/jsonpatch)
[![Go Report Card](https://goreportcard.com/badge/appscode/jsonpatch "Go Report Card")](https://goreportcard.com/report/appscode/jsonpatch)
[![GoDoc](https://godoc.org/github.com/appscode/jsonpatch?status.svg "GoDoc")](https://godoc.org/github.com/appscode/jsonpatch)
As per http://jsonpatch.com JSON Patch is specified in RFC 6902 from the IETF.
JSON Patch allows you to generate JSON that describes changes you want to make to a document, so you don't have to send the whole doc. JSON Patch format is supported by HTTP PATCH method, allowing for standards based partial updates via REST APIs.
## Usage ##
```go
import "gomodules.xyz/jsonpatch/v2"
```console
go get github.com/appscode/jsonpatch
```
I tried some of the other "jsonpatch" go implementations, but none of them could diff two json documents and generate format like jsonpatch.com specifies. Here's an example of the patch format:
I tried some of the other "jsonpatch" go implementations, but none of them could diff two json documents and
generate format like jsonpatch.com specifies. Here's an example of the patch format:
```json
[
@ -33,14 +32,14 @@ package main
import (
"fmt"
"gomodules.xyz/jsonpatch/v2"
"github.com/appscode/jsonpatch"
)
var simpleA = `{"a":100, "b":200, "c":"hello"}`
var simpleB = `{"a":100, "b":200, "c":"goodbye"}`
func main() {
patch, e := jsonpatch.CreatePatch([]byte(simpleA), []byte(simpleB))
patch, e := jsonpatch.CreatePatch([]byte(simpleA), []byte(simpleA))
if e != nil {
fmt.Printf("Error creating JSON patch:%v", e)
return

View File

@ -1,9 +1,8 @@
module gomodules.xyz/jsonpatch/v2
go 1.12
module github.com/appscode/jsonpatch
require (
github.com/evanphx/json-patch v4.5.0+incompatible
github.com/pkg/errors v0.8.1 // indirect
github.com/stretchr/testify v1.3.0
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/evanphx/json-patch v4.0.0+incompatible
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/stretchr/testify v1.2.2
)

View File

@ -1,11 +1,8 @@
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M=
github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/evanphx/json-patch v4.0.0+incompatible h1:xregGRMLBeuRcwiOTHRCsPPuzCQlqhxUPbqdw+zNkLc=
github.com/evanphx/json-patch v4.0.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=

View File

@ -58,8 +58,8 @@ func NewPatch(operation, path string, value interface{}) Operation {
//
// An error will be returned if any of the two documents are invalid.
func CreatePatch(a, b []byte) ([]Operation, error) {
var aI interface{}
var bI interface{}
aI := map[string]interface{}{}
bI := map[string]interface{}{}
err := json.Unmarshal(a, &aI)
if err != nil {
return nil, errBadJSONDoc
@ -68,7 +68,7 @@ func CreatePatch(a, b []byte) ([]Operation, error) {
if err != nil {
return nil, errBadJSONDoc
}
return handleValues(aI, bI, "", []Operation{})
return diff(aI, bI, "", []Operation{})
}
// Returns true if the values matches (must be json types)
@ -326,7 +326,7 @@ func backtrace(s, t []interface{}, p string, i int, j int, matrix [][]int) []Ope
return append([]Operation{op}, backtrace(s, t, p, i-1, j-1, matrix)...)
}
p2, _ := handleValues(s[i-1], t[j-1], makePath(p, i-1), []Operation{})
p2, _ := handleValues(s[j-1], t[j-1], makePath(p, i-1), []Operation{})
return append(p2, backtrace(s, t, p, i-1, j-1, matrix)...)
}
if i > 0 && j > 0 && matrix[i-1][j-1] == matrix[i][j] {

View File

@ -3,8 +3,8 @@ package jsonpatch_test
import (
"testing"
"github.com/appscode/jsonpatch"
"github.com/stretchr/testify/assert"
"gomodules.xyz/jsonpatch/v2"
)
func TestMarshalNullableValue(t *testing.T) {

View File

@ -4,9 +4,9 @@ import (
"encoding/json"
"testing"
"github.com/appscode/jsonpatch"
jp "github.com/evanphx/json-patch"
"github.com/stretchr/testify/assert"
"gomodules.xyz/jsonpatch/v2"
)
var simpleA = `{"a":100, "b":200, "c":"hello"}`
@ -737,70 +737,6 @@ var (
}`
)
var (
oldArray = `{
"apiVersion": "kubedb.com/v1alpha1",
"kind": "Elasticsearch",
"metadata": {
"name": "quick-elasticsearch",
"namespace": "demo"
},
"spec": {
"tolerations": [
{
"key": "node.kubernetes.io/key1",
"operator": "Equal",
"value": "value1",
"effect": "NoSchedule"
},
{
"key": "node.kubernetes.io/key2",
"operator": "Equal",
"value": "value2",
"effect": "NoSchedule"
},
{
"key": "node.kubernetes.io/not-ready",
"operator": "Exists",
"effect": "NoExecute",
"tolerationSeconds": 300
},
{
"key": "node.kubernetes.io/unreachable",
"operator": "Exists",
"effect": "NoExecute",
"tolerationSeconds": 300
}
]
}
}`
newArray = `{
"apiVersion": "kubedb.com/v1alpha1",
"kind": "Elasticsearch",
"metadata": {
"name": "quick-elasticsearch",
"namespace": "demo"
},
"spec": {
"tolerations": [
{
"key": "node.kubernetes.io/key2",
"operator": "Equal",
"value": "value2",
"effect": "NoSchedule"
},
{
"key": "node.kubernetes.io/key1",
"operator": "Equal",
"value": "value1",
"effect": "NoSchedule"
}
]
}
}`
)
func TestCreatePatch(t *testing.T) {
cases := []struct {
name string
@ -841,12 +777,7 @@ func TestCreatePatch(t *testing.T) {
{"Kubernetes:Annotations", oldDeployment, newDeployment},
// crd with nested object
{"Nested Member Object", oldNestedObj, newNestedObj},
// array with different order
{"Different Array", oldArray, newArray},
{"Array at root", `[{"asdf":"qwerty"}]`, `[{"asdf":"bla"},{"asdf":"zzz"}]`},
{"Empty array at root", `[]`, `[{"asdf":"bla"},{"asdf":"zzz"}]`},
}
for _, c := range cases {
t.Run(c.name+"[src->dst]", func(t *testing.T) {
check(t, c.src, c.dst)

View File

@ -1,9 +0,0 @@
module gomodules.xyz/jsonpatch/v2
go 1.12
require (
github.com/evanphx/json-patch v4.5.0+incompatible
github.com/pkg/errors v0.8.1 // indirect
github.com/stretchr/testify v1.3.0
)

View File

@ -1,11 +0,0 @@
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M=
github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=

View File

@ -1,336 +0,0 @@
package jsonpatch
import (
"bytes"
"encoding/json"
"fmt"
"reflect"
"strings"
)
var errBadJSONDoc = fmt.Errorf("invalid JSON Document")
type JsonPatchOperation = Operation
type Operation struct {
Operation string `json:"op"`
Path string `json:"path"`
Value interface{} `json:"value,omitempty"`
}
func (j *Operation) Json() string {
b, _ := json.Marshal(j)
return string(b)
}
func (j *Operation) MarshalJSON() ([]byte, error) {
var b bytes.Buffer
b.WriteString("{")
b.WriteString(fmt.Sprintf(`"op":"%s"`, j.Operation))
b.WriteString(fmt.Sprintf(`,"path":"%s"`, j.Path))
// Consider omitting Value for non-nullable operations.
if j.Value != nil || j.Operation == "replace" || j.Operation == "add" {
v, err := json.Marshal(j.Value)
if err != nil {
return nil, err
}
b.WriteString(`,"value":`)
b.Write(v)
}
b.WriteString("}")
return b.Bytes(), nil
}
type ByPath []Operation
func (a ByPath) Len() int { return len(a) }
func (a ByPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a ByPath) Less(i, j int) bool { return a[i].Path < a[j].Path }
func NewPatch(operation, path string, value interface{}) Operation {
return Operation{Operation: operation, Path: path, Value: value}
}
// CreatePatch creates a patch as specified in http://jsonpatch.com/
//
// 'a' is original, 'b' is the modified document. Both are to be given as json encoded content.
// The function will return an array of JsonPatchOperations
//
// An error will be returned if any of the two documents are invalid.
func CreatePatch(a, b []byte) ([]Operation, error) {
var aI interface{}
var bI interface{}
err := json.Unmarshal(a, &aI)
if err != nil {
return nil, errBadJSONDoc
}
err = json.Unmarshal(b, &bI)
if err != nil {
return nil, errBadJSONDoc
}
return handleValues(aI, bI, "", []Operation{})
}
// Returns true if the values matches (must be json types)
// The types of the values must match, otherwise it will always return false
// If two map[string]interface{} are given, all elements must match.
func matchesValue(av, bv interface{}) bool {
if reflect.TypeOf(av) != reflect.TypeOf(bv) {
return false
}
switch at := av.(type) {
case string:
bt, ok := bv.(string)
if ok && bt == at {
return true
}
case float64:
bt, ok := bv.(float64)
if ok && bt == at {
return true
}
case bool:
bt, ok := bv.(bool)
if ok && bt == at {
return true
}
case map[string]interface{}:
bt, ok := bv.(map[string]interface{})
if !ok {
return false
}
for key := range at {
if !matchesValue(at[key], bt[key]) {
return false
}
}
for key := range bt {
if !matchesValue(at[key], bt[key]) {
return false
}
}
return true
case []interface{}:
bt, ok := bv.([]interface{})
if !ok {
return false
}
if len(bt) != len(at) {
return false
}
for key := range at {
if !matchesValue(at[key], bt[key]) {
return false
}
}
for key := range bt {
if !matchesValue(at[key], bt[key]) {
return false
}
}
return true
}
return false
}
// From http://tools.ietf.org/html/rfc6901#section-4 :
//
// Evaluation of each reference token begins by decoding any escaped
// character sequence. This is performed by first transforming any
// occurrence of the sequence '~1' to '/', and then transforming any
// occurrence of the sequence '~0' to '~'.
// TODO decode support:
// var rfc6901Decoder = strings.NewReplacer("~1", "/", "~0", "~")
var rfc6901Encoder = strings.NewReplacer("~", "~0", "/", "~1")
func makePath(path string, newPart interface{}) string {
key := rfc6901Encoder.Replace(fmt.Sprintf("%v", newPart))
if path == "" {
return "/" + key
}
if strings.HasSuffix(path, "/") {
return path + key
}
return path + "/" + key
}
// diff returns the (recursive) difference between a and b as an array of JsonPatchOperations.
func diff(a, b map[string]interface{}, path string, patch []Operation) ([]Operation, error) {
for key, bv := range b {
p := makePath(path, key)
av, ok := a[key]
// value was added
if !ok {
patch = append(patch, NewPatch("add", p, bv))
continue
}
// Types are the same, compare values
var err error
patch, err = handleValues(av, bv, p, patch)
if err != nil {
return nil, err
}
}
// Now add all deleted values as nil
for key := range a {
_, found := b[key]
if !found {
p := makePath(path, key)
patch = append(patch, NewPatch("remove", p, nil))
}
}
return patch, nil
}
func handleValues(av, bv interface{}, p string, patch []Operation) ([]Operation, error) {
{
at := reflect.TypeOf(av)
bt := reflect.TypeOf(bv)
if at == nil && bt == nil {
// do nothing
return patch, nil
} else if at == nil && bt != nil {
return append(patch, NewPatch("add", p, bv)), nil
} else if at != bt {
// If types have changed, replace completely (preserves null in destination)
return append(patch, NewPatch("replace", p, bv)), nil
}
}
var err error
switch at := av.(type) {
case map[string]interface{}:
bt := bv.(map[string]interface{})
patch, err = diff(at, bt, p, patch)
if err != nil {
return nil, err
}
case string, float64, bool:
if !matchesValue(av, bv) {
patch = append(patch, NewPatch("replace", p, bv))
}
case []interface{}:
bt := bv.([]interface{})
if isSimpleArray(at) && isSimpleArray(bt) {
patch = append(patch, compareEditDistance(at, bt, p)...)
} else {
n := min(len(at), len(bt))
for i := len(at) - 1; i >= n; i-- {
patch = append(patch, NewPatch("remove", makePath(p, i), nil))
}
for i := n; i < len(bt); i++ {
patch = append(patch, NewPatch("add", makePath(p, i), bt[i]))
}
for i := 0; i < n; i++ {
var err error
patch, err = handleValues(at[i], bt[i], makePath(p, i), patch)
if err != nil {
return nil, err
}
}
}
default:
panic(fmt.Sprintf("Unknown type:%T ", av))
}
return patch, nil
}
func isBasicType(a interface{}) bool {
switch a.(type) {
case string, float64, bool:
default:
return false
}
return true
}
func isSimpleArray(a []interface{}) bool {
for i := range a {
switch a[i].(type) {
case string, float64, bool:
default:
val := reflect.ValueOf(a[i])
if val.Kind() == reflect.Map {
for _, k := range val.MapKeys() {
av := val.MapIndex(k)
if av.Kind() == reflect.Ptr || av.Kind() == reflect.Interface {
if av.IsNil() {
continue
}
av = av.Elem()
}
if av.Kind() != reflect.String && av.Kind() != reflect.Float64 && av.Kind() != reflect.Bool {
return false
}
}
return true
}
return false
}
}
return true
}
// https://en.wikipedia.org/wiki/Wagner%E2%80%93Fischer_algorithm
// Adapted from https://github.com/texttheater/golang-levenshtein
func compareEditDistance(s, t []interface{}, p string) []Operation {
m := len(s)
n := len(t)
d := make([][]int, m+1)
for i := 0; i <= m; i++ {
d[i] = make([]int, n+1)
d[i][0] = i
}
for j := 0; j <= n; j++ {
d[0][j] = j
}
for j := 1; j <= n; j++ {
for i := 1; i <= m; i++ {
if reflect.DeepEqual(s[i-1], t[j-1]) {
d[i][j] = d[i-1][j-1] // no op required
} else {
del := d[i-1][j] + 1
add := d[i][j-1] + 1
rep := d[i-1][j-1] + 1
d[i][j] = min(rep, min(add, del))
}
}
}
return backtrace(s, t, p, m, n, d)
}
func min(x int, y int) int {
if y < x {
return y
}
return x
}
func backtrace(s, t []interface{}, p string, i int, j int, matrix [][]int) []Operation {
if i > 0 && matrix[i-1][j]+1 == matrix[i][j] {
op := NewPatch("remove", makePath(p, i-1), nil)
return append([]Operation{op}, backtrace(s, t, p, i-1, j, matrix)...)
}
if j > 0 && matrix[i][j-1]+1 == matrix[i][j] {
op := NewPatch("add", makePath(p, i), t[j-1])
return append([]Operation{op}, backtrace(s, t, p, i, j-1, matrix)...)
}
if i > 0 && j > 0 && matrix[i-1][j-1]+1 == matrix[i][j] {
if isBasicType(s[0]) {
op := NewPatch("replace", makePath(p, i-1), t[j-1])
return append([]Operation{op}, backtrace(s, t, p, i-1, j-1, matrix)...)
}
p2, _ := handleValues(s[i-1], t[j-1], makePath(p, i-1), []Operation{})
return append(p2, backtrace(s, t, p, i-1, j-1, matrix)...)
}
if i > 0 && j > 0 && matrix[i-1][j-1] == matrix[i][j] {
return backtrace(s, t, p, i-1, j-1, matrix)
}
return []Operation{}
}

View File

@ -1,33 +0,0 @@
package jsonpatch_test
import (
"testing"
"github.com/stretchr/testify/assert"
"gomodules.xyz/jsonpatch/v2"
)
func TestMarshalNullableValue(t *testing.T) {
p1 := jsonpatch.Operation{
Operation: "replace",
Path: "/a1",
Value: nil,
}
assert.JSONEq(t, `{"op":"replace", "path":"/a1","value":null}`, p1.Json())
p2 := jsonpatch.Operation{
Operation: "replace",
Path: "/a2",
Value: "v2",
}
assert.JSONEq(t, `{"op":"replace", "path":"/a2", "value":"v2"}`, p2.Json())
}
func TestMarshalNonNullableValue(t *testing.T) {
p1 := jsonpatch.Operation{
Operation: "remove",
Path: "/a1",
}
assert.JSONEq(t, `{"op":"remove", "path":"/a1"}`, p1.Json())
}

View File

@ -1,874 +0,0 @@
package jsonpatch_test
import (
"encoding/json"
"testing"
jp "github.com/evanphx/json-patch"
"github.com/stretchr/testify/assert"
"gomodules.xyz/jsonpatch/v2"
)
var simpleA = `{"a":100, "b":200, "c":"hello"}`
var simpleB = `{"a":100, "b":200, "c":"goodbye"}`
var simpleC = `{"a":100, "b":100, "c":"hello"}`
var simpleD = `{"a":100, "b":200, "c":"hello", "d":"foo"}`
var simpleE = `{"a":100, "b":200}`
var simplef = `{"a":100, "b":100, "d":"foo"}`
var simpleG = `{"a":100, "b":null, "d":"foo"}`
var empty = `{}`
var arraySrc = `
{
"spec": {
"loadBalancerSourceRanges": [
"192.101.0.0/16",
"192.0.0.0/24"
]
}
}
`
var arrayDst = `
{
"spec": {
"loadBalancerSourceRanges": [
"192.101.0.0/24"
]
}
}
`
var complexBase = `{"a":100, "b":[{"c1":"hello", "d1":"foo"},{"c2":"hello2", "d2":"foo2"} ], "e":{"f":200, "g":"h", "i":"j"}}`
var complexA = `{"a":100, "b":[{"c1":"goodbye", "d1":"foo"},{"c2":"hello2", "d2":"foo2"} ], "e":{"f":200, "g":"h", "i":"j"}}`
var complexB = `{"a":100, "b":[{"c1":"hello", "d1":"foo"},{"c2":"hello2", "d2":"foo2"} ], "e":{"f":100, "g":"h", "i":"j"}}`
var complexC = `{"a":100, "b":[{"c1":"hello", "d1":"foo"},{"c2":"hello2", "d2":"foo2"} ], "e":{"f":200, "g":"h", "i":"j"}, "k":[{"l":"m"}, {"l":"o"}]}`
var complexD = `{"a":100, "b":[{"c1":"hello", "d1":"foo"},{"c2":"hello2", "d2":"foo2"}, {"c3":"hello3", "d3":"foo3"} ], "e":{"f":200, "g":"h", "i":"j"}}`
var complexE = `{"a":100, "b":[{"c1":"hello", "d1":"foo"},{"c2":"hello2", "d2":"foo2"} ], "e":{"f":200, "g":"h", "i":"j"}}`
var point = `{"type":"Point", "coordinates":[0.0, 1.0]}`
var lineString = `{"type":"LineString", "coordinates":[[0.0, 1.0], [2.0, 3.0]]}`
var hyperComplexBase = `
{
"goods": [
{
"id": "0001",
"type": "donut",
"name": "Cake",
"ppu": 0.55,
"batters":
{
"batter":
[
{ "id": "1001", "type": "Regular" },
{ "id": "1002", "type": "Chocolate" },
{ "id": "1003", "type": "Blueberry" },
{ "id": "1004", "type": "Devil's Food" }
]
},
"topping":
[
{ "id": "5001", "type": "None" },
{ "id": "5002", "type": "Glazed" },
{ "id": "5005", "type": "Sugar" },
{ "id": "5007", "type": "Powdered Sugar" },
{ "id": "5006", "type": "Chocolate with Sprinkles" },
{ "id": "5003", "type": "Chocolate" },
{ "id": "5004", "type": "Maple" }
]
},
{
"id": "0002",
"type": "donut",
"name": "Raised",
"ppu": 0.55,
"batters":
{
"batter":
[
{ "id": "1001", "type": "Regular" }
]
},
"topping":
[
{ "id": "5001", "type": "None" },
{ "id": "5002", "type": "Glazed" },
{ "id": "5005", "type": "Sugar" },
{ "id": "5003", "type": "Chocolate" },
{ "id": "5004", "type": "Maple" }
]
},
{
"id": "0003",
"type": "donut",
"name": "Old Fashioned",
"ppu": 0.55,
"batters":
{
"batter":
[
{ "id": "1001", "type": "Regular" },
{ "id": "1002", "type": "Chocolate" }
]
},
"topping":
[
{ "id": "5001", "type": "None" },
{ "id": "5002", "type": "Glazed" },
{ "id": "5003", "type": "Chocolate" },
{ "id": "5004", "type": "Maple" }
]
}
]
}`
var hyperComplexA = `
{
"goods": [
{
"id": "0001",
"type": "donut",
"name": "Cake",
"ppu": 0.55,
"batters":
{
"batter":
[
{ "id": "1001", "type": "Regular" },
{ "id": "1002", "type": "Chocolate" },
{ "id": "1003", "type": "Strawberry" },
{ "id": "1004", "type": "Devil's Food" }
]
},
"topping":
[
{ "id": "5001", "type": "None" },
{ "id": "5002", "type": "Glazed" },
{ "id": "5005", "type": "Sugar" },
{ "id": "5007", "type": "Powdered Sugar" },
{ "id": "5006", "type": "Chocolate with Sprinkles" },
{ "id": "5003", "type": "Chocolate" },
{ "id": "5004", "type": "Maple" }
]
},
{
"id": "0002",
"type": "donut",
"name": "Raised",
"ppu": 0.55,
"batters":
{
"batter":
[
{ "id": "1001", "type": "Regular" }
]
},
"topping":
[
{ "id": "5001", "type": "None" },
{ "id": "5002", "type": "Glazed" },
{ "id": "5005", "type": "Sugar" },
{ "id": "5003", "type": "Chocolate" },
{ "id": "5004", "type": "Maple" }
]
},
{
"id": "0003",
"type": "donut",
"name": "Old Fashioned",
"ppu": 0.55,
"batters":
{
"batter":
[
{ "id": "1001", "type": "Regular" },
{ "id": "1002", "type": "Chocolate" },
{ "id": "1003", "type": "Vanilla" }
]
},
"topping":
[
{ "id": "5001", "type": "None" },
{ "id": "5002", "type": "Glazed" },
{ "id": "5004", "type": "Maple" }
]
}
]
}`
var superComplexBase = `
{
"annotations": {
"annotation": [
{
"name": "version",
"value": "8"
},
{
"name": "versionTag",
"value": "Published on May 13, 2015 at 8:48pm (MST)"
}
]
},
"attributes": {
"attribute-key": [
{
"id": "3b05c943-d81a-436f-b242-8b519e7a6f30",
"properties": {
"visible": true
}
},
{
"id": "d794c7ee-2a4b-4da4-bba7-e8b973d50c4b",
"properties": {
"visible": true
}
},
{
"id": "a0259458-517c-480f-9f04-9b54b1b2af1f",
"properties": {
"visible": true
}
},
{
"id": "9415f39d-c396-4458-9019-fc076c847964",
"properties": {
"visible": true
}
},
{
"id": "0a2e49a9-8989-42fb-97da-cc66334f828b",
"properties": {
"visible": true
}
},
{
"id": "27f5f14a-ea97-4feb-b22a-6ff754a31212",
"properties": {
"visible": true
}
},
{
"id": "6f810508-4615-4fd0-9e87-80f9c94f9ad8",
"properties": {
"visible": true
}
},
{
"id": "3451b1b2-7365-455c-8bb1-0b464d4d3ba1",
"properties": {
"visible": true
}
},
{
"id": "a82ec957-8c26-41ea-8af6-6dd75c384801",
"properties": {
"visible": true
}
},
{
"id": "736c5496-9a6e-4a82-aa00-456725796432",
"properties": {
"visible": true
}
},
{
"id": "2d428b3c-9d3b-4ec1-bf98-e00673599d60",
"properties": {
"visible": true
}
},
{
"id": "68566ebb-811d-4337-aba9-a8a8baf90e4b",
"properties": {
"visible": true
}
},
{
"id": "ca88bab1-a1ea-40cc-8f96-96d1e9f1217d",
"properties": {
"visible": true
}
},
{
"id": "c63a12c8-542d-47f3-bee1-30b5fe2b0690",
"properties": {
"visible": true
}
},
{
"id": "cbd9e3bc-6a49-432a-a906-b1674c1de24c",
"properties": {
"visible": true
}
},
{
"id": "03262f07-8a15-416d-a3f5-e2bf561c78f9",
"properties": {
"visible": true
}
},
{
"id": "e5c93b87-83fc-45b6-b4d5-bf1e3f523075",
"properties": {
"visible": true
}
},
{
"id": "72260ac5-3d51-49d7-bb31-f794dd129f1c",
"properties": {
"visible": true
}
},
{
"id": "d856bde1-1b42-4935-9bee-c37e886c9ecf",
"properties": {
"visible": true
}
},
{
"id": "62380509-bedf-4134-95c3-77ff377a4a6a",
"properties": {
"visible": true
}
},
{
"id": "f4ed5ac9-b386-49a6-a0a0-6f3341ce9021",
"properties": {
"visible": true
}
},
{
"id": "528d2bd2-87fe-4a49-954a-c93a03256929",
"properties": {
"visible": true
}
},
{
"id": "ff8951f1-61a7-416b-9223-fac4bb6dac50",
"properties": {
"visible": true
}
},
{
"id": "95c2b011-d782-4042-8a07-6aa4a5765c2e",
"properties": {
"visible": true
}
},
{
"id": "dbe5837b-0624-4a05-91f3-67b5bd9b812a",
"properties": {
"visible": true
}
},
{
"id": "13f198ed-82ab-4e51-8144-bfaa5bf77fd5",
"properties": {
"visible": true
}
},
{
"id": "025312eb-12b6-47e6-9750-0fb31ddc2111",
"properties": {
"visible": true
}
},
{
"id": "24292d58-db66-4ef3-8f4f-005d7b719433",
"properties": {
"visible": true
}
},
{
"id": "22e5b5c4-821c-413a-a5b1-ab866d9a03bb",
"properties": {
"visible": true
}
},
{
"id": "2fde0aac-df89-403d-998e-854b949c7b57",
"properties": {
"visible": true
}
},
{
"id": "8b576876-5c16-4178-805e-24984c24fac3",
"properties": {
"visible": true
}
},
{
"id": "415b7d2a-b362-4f1e-b83a-927802328ecb",
"properties": {
"visible": true
}
},
{
"id": "8ef24fc2-ab25-4f22-9d9f-61902b49dc01",
"properties": {
"visible": true
}
},
{
"id": "2299b09e-9f8e-4b79-a55c-a7edacde2c85",
"properties": {
"visible": true
}
},
{
"id": "bf506538-f438-425c-be85-5aa2f9b075b8",
"properties": {
"visible": true
}
},
{
"id": "2b501dc6-799d-4675-9144-fac77c50c57c",
"properties": {
"visible": true
}
},
{
"id": "c0446da1-e069-417e-bd5a-34edcd028edc",
"properties": {
"visible": true
}
}
]
}
}`
var superComplexA = `
{
"annotations": {
"annotation": [
{
"name": "version",
"value": "8"
},
{
"name": "versionTag",
"value": "Published on May 13, 2015 at 8:48pm (MST)"
}
]
},
"attributes": {
"attribute-key": [
{
"id": "3b05c943-d81a-436f-b242-8b519e7a6f30",
"properties": {
"visible": true
}
},
{
"id": "d794c7ee-2a4b-4da4-bba7-e8b973d50c4b",
"properties": {
"visible": true
}
},
{
"id": "a0259458-517c-480f-9f04-9b54b1b2af1f",
"properties": {
"visible": true
}
},
{
"id": "9415f39d-c396-4458-9019-fc076c847964",
"properties": {
"visible": true
}
},
{
"id": "0a2e49a9-8989-42fb-97da-cc66334f828b",
"properties": {
"visible": true
}
},
{
"id": "27f5f14a-ea97-4feb-b22a-6ff754a31212",
"properties": {
"visible": true
}
},
{
"id": "6f810508-4615-4fd0-9e87-80f9c94f9ad8",
"properties": {
"visible": true
}
},
{
"id": "3451b1b2-7365-455c-8bb1-0b464d4d3ba1",
"properties": {
"visible": true
}
},
{
"id": "a82ec957-8c26-41ea-8af6-6dd75c384801",
"properties": {
"visible": true
}
},
{
"id": "736c5496-9a6e-4a82-aa00-456725796432",
"properties": {
"visible": true
}
},
{
"id": "2d428b3c-9d3b-4ec1-bf98-e00673599d60",
"properties": {
"visible": true
}
},
{
"id": "68566ebb-811d-4337-aba9-a8a8baf90e4b",
"properties": {
"visible": true
}
},
{
"id": "ca88bab1-a1ea-40cc-8f96-96d1e9f1217d",
"properties": {
"visible": true
}
},
{
"id": "c63a12c8-542d-47f3-bee1-30b5fe2b0690",
"properties": {
"visible": true
}
},
{
"id": "cbd9e3bc-6a49-432a-a906-b1674c1de24c",
"properties": {
"visible": true
}
},
{
"id": "03262f07-8a15-416d-a3f5-e2bf561c78f9",
"properties": {
"visible": true
}
},
{
"id": "e5c93b87-83fc-45b6-b4d5-bf1e3f523075",
"properties": {
"visible": true
}
},
{
"id": "72260ac5-3d51-49d7-bb31-f794dd129f1c",
"properties": {
"visible": true
}
},
{
"id": "d856bde1-1b42-4935-9bee-c37e886c9ecf",
"properties": {
"visible": true
}
},
{
"id": "62380509-bedf-4134-95c3-77ff377a4a6a",
"properties": {
"visible": true
}
},
{
"id": "f4ed5ac9-b386-49a6-a0a0-6f3341ce9021",
"properties": {
"visible": true
}
},
{
"id": "528d2bd2-87fe-4a49-954a-c93a03256929",
"properties": {
"visible": true
}
},
{
"id": "ff8951f1-61a7-416b-9223-fac4bb6dac50",
"properties": {
"visible": true
}
},
{
"id": "95c2b011-d782-4042-8a07-6aa4a5765c2e",
"properties": {
"visible": true
}
},
{
"id": "dbe5837b-0624-4a05-91f3-67b5bd9b812a",
"properties": {
"visible": true
}
},
{
"id": "13f198ed-82ab-4e51-8144-bfaa5bf77fd5",
"properties": {
"visible": true
}
},
{
"id": "025312eb-12b6-47e6-9750-0fb31ddc2111",
"properties": {
"visible": true
}
},
{
"id": "24292d58-db66-4ef3-8f4f-005d7b719433",
"properties": {
"visible": true
}
},
{
"id": "22e5b5c4-821c-413a-a5b1-ab866d9a03bb",
"properties": {
"visible": true
}
},
{
"id": "2fde0aac-df89-403d-998e-854b949c7b57",
"properties": {
"visible": true
}
},
{
"id": "8b576876-5c16-4178-805e-24984c24fac3",
"properties": {
"visible": true
}
},
{
"id": "415b7d2a-b362-4f1e-b83a-927802328ecb",
"properties": {
"visible": true
}
},
{
"id": "8ef24fc2-ab25-4f22-9d9f-61902b49dc01",
"properties": {
"visible": true
}
},
{
"id": "2299b09e-9f8e-4b79-a55c-a7edacde2c85",
"properties": {
"visible": true
}
},
{
"id": "bf506538-f438-425c-be85-5aa2f9b075b8",
"properties": {
"visible": true
}
},
{
"id": "2b501dc6-799d-4675-9144-fac77c50c57c",
"properties": {
"visible": true
}
},
{
"id": "c0446da1-e069-417e-bd5a-34edcd028edc",
"properties": {
"visible": false
}
}
]
}
}`
var (
oldDeployment = `{
"apiVersion": "apps/v1beta1",
"kind": "Deployment",
"metadata": {
"annotations": {
"k8s.io/app": "busy-dep"
}
}
}`
newDeployment = `{
"apiVersion": "apps/v1beta1",
"kind": "Deployment",
"metadata": {
"annotations": {
"k8s.io/app": "busy-dep",
"docker.com/commit": "github.com/myrepo#xyz"
}
}
}`
)
var (
oldNestedObj = `{
"apiVersion": "kubedb.com/v1alpha1",
"kind": "Elasticsearch",
"metadata": {
"name": "quick-elasticsearch",
"namespace": "demo"
},
"spec": {
"doNotPause": true,
"version": "5.6"
}
}`
newNestedObj = `{
"apiVersion": "kubedb.com/v1alpha1",
"kind": "Elasticsearch",
"metadata": {
"name": "quick-elasticsearch",
"namespace": "demo"
},
"spec": {
"doNotPause": true,
"version": "5.6",
"storageType": "Durable",
"updateStrategy": {
"type": "RollingUpdate"
},
"terminationPolicy": "Pause"
}
}`
)
var (
oldArray = `{
"apiVersion": "kubedb.com/v1alpha1",
"kind": "Elasticsearch",
"metadata": {
"name": "quick-elasticsearch",
"namespace": "demo"
},
"spec": {
"tolerations": [
{
"key": "node.kubernetes.io/key1",
"operator": "Equal",
"value": "value1",
"effect": "NoSchedule"
},
{
"key": "node.kubernetes.io/key2",
"operator": "Equal",
"value": "value2",
"effect": "NoSchedule"
},
{
"key": "node.kubernetes.io/not-ready",
"operator": "Exists",
"effect": "NoExecute",
"tolerationSeconds": 300
},
{
"key": "node.kubernetes.io/unreachable",
"operator": "Exists",
"effect": "NoExecute",
"tolerationSeconds": 300
}
]
}
}`
newArray = `{
"apiVersion": "kubedb.com/v1alpha1",
"kind": "Elasticsearch",
"metadata": {
"name": "quick-elasticsearch",
"namespace": "demo"
},
"spec": {
"tolerations": [
{
"key": "node.kubernetes.io/key2",
"operator": "Equal",
"value": "value2",
"effect": "NoSchedule"
},
{
"key": "node.kubernetes.io/key1",
"operator": "Equal",
"value": "value1",
"effect": "NoSchedule"
}
]
}
}`
)
func TestCreatePatch(t *testing.T) {
cases := []struct {
name string
src string
dst string
}{
// simple
{"Simple:OneNullReplace", simplef, simpleG},
{"Simple:Same", simpleA, simpleA},
{"Simple:OneStringReplace", simpleA, simpleB},
{"Simple:OneIntReplace", simpleA, simpleC},
{"Simple:OneAdd", simpleA, simpleD},
{"Simple:OneRemove", simpleA, simpleE},
{"Simple:VsEmpty", simpleA, empty},
// array types
{"Array:Same", arraySrc, arraySrc},
{"Array:BoolReplace", arraySrc, arrayDst},
{"Array:AlmostSame", `{"Lines":[1,2,3,4,5,6,7,8,9,10]}`, `{"Lines":[2,3,4,5,6,7,8,9,10,11]}`},
{"Array:Remove", `{"x":["A", "B", "C"]}`, `{"x":["D"]}`},
{"Array:EditDistance", `{"letters":["A","B","C","D","E","F","G","H","I","J","K"]}`, `{"letters":["L","M","N"]}`},
// complex types
{"Complex:Same", complexBase, complexBase},
{"Complex:OneStringReplaceInArray", complexBase, complexA},
{"Complex:OneIntReplace", complexBase, complexB},
{"Complex:OneAdd", complexBase, complexC},
{"Complex:OneAddToArray", complexBase, complexC},
{"Complex:VsEmpty", complexBase, empty},
// geojson
{"GeoJson:PointLineStringReplace", point, lineString},
{"GeoJson:LineStringPointReplace", lineString, point},
// HyperComplex
{"HyperComplex:Same", hyperComplexBase, hyperComplexBase},
{"HyperComplex:BoolReplace", hyperComplexBase, hyperComplexA},
// SuperComplex
{"SuperComplex:Same", superComplexBase, superComplexBase},
{"SuperComplex:BoolReplace", superComplexBase, superComplexA},
// map
{"Kubernetes:Annotations", oldDeployment, newDeployment},
// crd with nested object
{"Nested Member Object", oldNestedObj, newNestedObj},
// array with different order
{"Different Array", oldArray, newArray},
{"Array at root", `[{"asdf":"qwerty"}]`, `[{"asdf":"bla"},{"asdf":"zzz"}]`},
{"Empty array at root", `[]`, `[{"asdf":"bla"},{"asdf":"zzz"}]`},
}
for _, c := range cases {
t.Run(c.name+"[src->dst]", func(t *testing.T) {
check(t, c.src, c.dst)
})
t.Run(c.name+"[dst->src]", func(t *testing.T) {
check(t, c.dst, c.src)
})
}
}
func check(t *testing.T, src, dst string) {
patch, err := jsonpatch.CreatePatch([]byte(src), []byte(dst))
assert.Nil(t, err)
data, err := json.Marshal(patch)
assert.Nil(t, err)
p2, err := jp.DecodePatch(data)
assert.Nil(t, err)
d2, err := p2.Apply([]byte(src))
assert.Nil(t, err)
assert.JSONEq(t, dst, string(d2))
}

View File

@ -9,6 +9,7 @@ go:
- 1.11.x
- 1.12.x
install: skip
script:
- go get golang.org/x/tools/cmd/cover
- go get github.com/smartystreets/goconvey

View File

@ -22,18 +22,10 @@ Package ini provides INI file read and write functionality in Go.
The minimum requirement of Go is **1.6**.
To use a tagged revision:
```sh
$ go get gopkg.in/ini.v1
```
To use with latest changes:
```sh
$ go get github.com/go-ini/ini
```
Please add `-u` flag to update in the future.
## Getting Help

74
vendor/github.com/go-ini/ini/data_source.go generated vendored Normal file
View File

@ -0,0 +1,74 @@
// Copyright 2019 Unknwon
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package ini
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"os"
)
var (
_ dataSource = (*sourceFile)(nil)
_ dataSource = (*sourceData)(nil)
_ dataSource = (*sourceReadCloser)(nil)
)
// dataSource is an interface that returns object which can be read and closed.
type dataSource interface {
ReadCloser() (io.ReadCloser, error)
}
// sourceFile represents an object that contains content on the local file system.
type sourceFile struct {
name string
}
func (s sourceFile) ReadCloser() (_ io.ReadCloser, err error) {
return os.Open(s.name)
}
// sourceData represents an object that contains content in memory.
type sourceData struct {
data []byte
}
func (s *sourceData) ReadCloser() (io.ReadCloser, error) {
return ioutil.NopCloser(bytes.NewReader(s.data)), nil
}
// sourceReadCloser represents an input stream with Close method.
type sourceReadCloser struct {
reader io.ReadCloser
}
func (s *sourceReadCloser) ReadCloser() (io.ReadCloser, error) {
return s.reader, nil
}
func parseDataSource(source interface{}) (dataSource, error) {
switch s := source.(type) {
case string:
return sourceFile{s}, nil
case []byte:
return &sourceData{s}, nil
case io.ReadCloser:
return &sourceReadCloser{s}, nil
default:
return nil, fmt.Errorf("error parsing data source: unknown type %q", s)
}
}

25
vendor/github.com/go-ini/ini/deprecated.go generated vendored Normal file
View File

@ -0,0 +1,25 @@
// Copyright 2019 Unknwon
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package ini
const (
// Deprecated: Use "DefaultSection" instead.
DEFAULT_SECTION = DefaultSection
)
var (
// Deprecated: AllCapsUnderscore converts to format ALL_CAPS_UNDERSCORE.
AllCapsUnderscore = SnackCase
)

View File

@ -302,7 +302,7 @@ func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) {
}
alignSpaces := bytes.Repeat([]byte(" "), alignLength)
KEY_LIST:
KeyList:
for _, kname := range sec.keyList {
key := sec.Key(kname)
if len(key.Comment) > 0 {
@ -347,7 +347,7 @@ func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) {
if kname != sec.keyList[len(sec.keyList)-1] {
buf.WriteString(LineBreak)
}
continue KEY_LIST
continue KeyList
}
// Write out alignment spaces before "=" sign

24
vendor/github.com/go-ini/ini/helper.go generated vendored Normal file
View File

@ -0,0 +1,24 @@
// Copyright 2019 Unknwon
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package ini
func inSlice(str string, s []string) bool {
for _, v := range s {
if str == v {
return true
}
}
return false
}

29
vendor/github.com/go-ini/ini/helper_test.go generated vendored Normal file
View File

@ -0,0 +1,29 @@
// Copyright 2019 Unknwon
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package ini
import (
"testing"
. "github.com/smartystreets/goconvey/convey"
)
func Test_isSlice(t *testing.T) {
Convey("Check if a string is in the slice", t, func() {
ss := []string{"a", "b", "c"}
So(inSlice("a", ss), ShouldBeTrue)
So(inSlice("d", ss), ShouldBeFalse)
})
}

86
vendor/github.com/go-ini/ini/ini.go generated vendored
View File

@ -18,11 +18,6 @@
package ini
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"os"
"regexp"
"runtime"
)
@ -31,12 +26,10 @@ const (
// DefaultSection is the name of default section. You can use this constant or the string literal.
// In most of cases, an empty string is all you need to access the section.
DefaultSection = "DEFAULT"
// Deprecated: Use "DefaultSection" instead.
DEFAULT_SECTION = DefaultSection
// Maximum allowed depth when recursively substituing variable names.
depthValues = 99
version = "1.42.1"
version = "1.46.0"
)
// Version returns current package version literal.
@ -49,26 +42,23 @@ var (
// This variable will be changed to "\r\n" automatically on Windows at package init time.
LineBreak = "\n"
// DefaultFormatLeft places custom spaces on the left when PrettyFormat and PrettyEqual are both disabled.
DefaultFormatLeft = ""
// DefaultFormatRight places custom spaces on the right when PrettyFormat and PrettyEqual are both disabled.
DefaultFormatRight = ""
// Variable regexp pattern: %(variable)s
varPattern = regexp.MustCompile(`%\(([^\)]+)\)s`)
// PrettyFormat indicates whether to align "=" sign with spaces to produce pretty output
// or reduce all possible spaces for compact format.
PrettyFormat = true
// PrettyEqual places spaces around "=" sign even when PrettyFormat is false.
PrettyEqual = false
varPattern = regexp.MustCompile(`%\(([^)]+)\)s`)
// DefaultHeader explicitly writes default section header.
DefaultHeader = false
// PrettySection indicates whether to put a line between sections.
PrettySection = true
// PrettyFormat indicates whether to align "=" sign with spaces to produce pretty output
// or reduce all possible spaces for compact format.
PrettyFormat = true
// PrettyEqual places spaces around "=" sign even when PrettyFormat is false.
PrettyEqual = false
// DefaultFormatLeft places custom spaces on the left when PrettyFormat and PrettyEqual are both disabled.
DefaultFormatLeft = ""
// DefaultFormatRight places custom spaces on the right when PrettyFormat and PrettyEqual are both disabled.
DefaultFormatRight = ""
)
func init() {
@ -77,60 +67,6 @@ func init() {
}
}
func inSlice(str string, s []string) bool {
for _, v := range s {
if str == v {
return true
}
}
return false
}
// dataSource is an interface that returns object which can be read and closed.
type dataSource interface {
ReadCloser() (io.ReadCloser, error)
}
// sourceFile represents an object that contains content on the local file system.
type sourceFile struct {
name string
}
func (s sourceFile) ReadCloser() (_ io.ReadCloser, err error) {
return os.Open(s.name)
}
// sourceData represents an object that contains content in memory.
type sourceData struct {
data []byte
}
func (s *sourceData) ReadCloser() (io.ReadCloser, error) {
return ioutil.NopCloser(bytes.NewReader(s.data)), nil
}
// sourceReadCloser represents an input stream with Close method.
type sourceReadCloser struct {
reader io.ReadCloser
}
func (s *sourceReadCloser) ReadCloser() (io.ReadCloser, error) {
return s.reader, nil
}
func parseDataSource(source interface{}) (dataSource, error) {
switch s := source.(type) {
case string:
return sourceFile{s}, nil
case []byte:
return &sourceData{s}, nil
case io.ReadCloser:
return &sourceReadCloser{s}, nil
default:
return nil, fmt.Errorf("error parsing data source: unknown type '%s'", s)
}
}
// LoadOptions contains all customized options used for load data source(s).
type LoadOptions struct {
// Loose indicates whether the parser should ignore nonexistent files or return error.

View File

@ -25,11 +25,3 @@ func Test_Version(t *testing.T) {
So(Version(), ShouldEqual, version)
})
}
func Test_isSlice(t *testing.T) {
Convey("Check if a string is in the slice", t, func() {
ss := []string{"a", "b", "c"}
So(inSlice("a", ss), ShouldBeTrue)
So(inSlice("d", ss), ShouldBeFalse)
})
}

View File

@ -266,6 +266,19 @@ e-mail = u@gogs.io
So(f.Section("Author").Key("e-mail").String(), ShouldBeEmpty)
})
})
// Ref: https://github.com/go-ini/ini/issues/198
Convey("Insensitive load with default section", t, func() {
f, err := ini.InsensitiveLoad([]byte(`
user = unknwon
[profile]
email = unknwon@local
`))
So(err, ShouldBeNil)
So(f, ShouldNotBeNil)
So(f.Section(ini.DefaultSection).Key("user").String(), ShouldEqual, "unknwon")
})
}
func TestLoadSources(t *testing.T) {
@ -507,11 +520,16 @@ long_rsa_private_key = -----BEGIN RSA PRIVATE KEY-----
foobar
barfoo
-----END RSA PRIVATE KEY-----
multiline_list =
first
second
third
`))
So(err, ShouldBeNil)
So(f, ShouldNotBeNil)
So(f.Section("long").Key("long_rsa_private_key").String(), ShouldEqual, "-----BEGIN RSA PRIVATE KEY-----\nfoo\nbar\nfoobar\nbarfoo\n-----END RSA PRIVATE KEY-----")
So(f.Section("long").Key("multiline_list").String(), ShouldEqual, "\nfirst\nsecond\nthird")
})
Convey("Can parse big python-compatible INI files", func() {
@ -817,18 +835,26 @@ GITHUB = U;n;k;n;w;o;n
Convey("with false `AllowPythonMultilineValues`", func() {
Convey("Ignore nonexistent files", func() {
f, err := ini.LoadSources(ini.LoadOptions{AllowPythonMultilineValues: false, Loose: true}, notFoundConf, minimalConf)
f, err := ini.LoadSources(ini.LoadOptions{
AllowPythonMultilineValues: false,
Loose: true,
}, notFoundConf, minimalConf)
So(err, ShouldBeNil)
So(f, ShouldNotBeNil)
Convey("Inverse case", func() {
_, err = ini.LoadSources(ini.LoadOptions{AllowPythonMultilineValues: false}, notFoundConf)
_, err = ini.LoadSources(ini.LoadOptions{
AllowPythonMultilineValues: false,
}, notFoundConf)
So(err, ShouldNotBeNil)
})
})
Convey("Insensitive to section and key names", func() {
f, err := ini.LoadSources(ini.LoadOptions{AllowPythonMultilineValues: false, Insensitive: true}, minimalConf)
f, err := ini.LoadSources(ini.LoadOptions{
AllowPythonMultilineValues: false,
Insensitive: true,
}, minimalConf)
So(err, ShouldBeNil)
So(f, ShouldNotBeNil)
@ -845,7 +871,9 @@ e-mail = u@gogs.io
})
Convey("Inverse case", func() {
f, err := ini.LoadSources(ini.LoadOptions{AllowPythonMultilineValues: false}, minimalConf)
f, err := ini.LoadSources(ini.LoadOptions{
AllowPythonMultilineValues: false,
}, minimalConf)
So(err, ShouldBeNil)
So(f, ShouldNotBeNil)

View File

@ -204,6 +204,9 @@ func (p *parser) readValue(in []byte, bufferSize int) (string, error) {
line := strings.TrimLeftFunc(string(in), unicode.IsSpace)
if len(line) == 0 {
if p.options.AllowPythonMultilineValues && len(in) > 0 && in[len(in)-1] == '\n' {
return p.readPythonMultilines(line, bufferSize)
}
return "", nil
}
@ -272,44 +275,46 @@ func (p *parser) readValue(in []byte, bufferSize int) (string, error) {
line = strings.Replace(line, `\#`, "#", -1)
}
} else if p.options.AllowPythonMultilineValues && lastChar == '\n' {
parserBufferPeekResult, _ := p.buf.Peek(bufferSize)
peekBuffer := bytes.NewBuffer(parserBufferPeekResult)
val := line
for {
peekData, peekErr := peekBuffer.ReadBytes('\n')
if peekErr != nil {
if peekErr == io.EOF {
return val, nil
}
return "", peekErr
}
peekMatches := pythonMultiline.FindStringSubmatch(string(peekData))
if len(peekMatches) != 3 {
return val, nil
}
// NOTE: Return if not a python-ini multi-line value.
currentIdentSize := len(peekMatches[1])
if currentIdentSize <= 0 {
return val, nil
}
// NOTE: Just advance the parser reader (buffer) in-sync with the peek buffer.
_, err := p.readUntil('\n')
if err != nil {
return "", err
}
val += fmt.Sprintf("\n%s", peekMatches[2])
}
return p.readPythonMultilines(line, bufferSize)
}
return line, nil
}
func (p *parser) readPythonMultilines(line string, bufferSize int) (string, error) {
parserBufferPeekResult, _ := p.buf.Peek(bufferSize)
peekBuffer := bytes.NewBuffer(parserBufferPeekResult)
for {
peekData, peekErr := peekBuffer.ReadBytes('\n')
if peekErr != nil {
if peekErr == io.EOF {
return line, nil
}
return "", peekErr
}
peekMatches := pythonMultiline.FindStringSubmatch(string(peekData))
if len(peekMatches) != 3 {
return line, nil
}
// NOTE: Return if not a python-ini multi-line value.
currentIdentSize := len(peekMatches[1])
if currentIdentSize <= 0 {
return line, nil
}
// NOTE: Just advance the parser reader (buffer) in-sync with the peek buffer.
_, err := p.readUntil('\n')
if err != nil {
return "", err
}
line += fmt.Sprintf("\n%s", peekMatches[2])
}
}
// parse parses data through an io.Reader.
func (f *File) parse(reader io.Reader) (err error) {
p := newParser(reader, parserOptions{

View File

@ -29,8 +29,8 @@ type NameMapper func(string) string
// Built-in name getters.
var (
// AllCapsUnderscore converts to format ALL_CAPS_UNDERSCORE.
AllCapsUnderscore NameMapper = func(raw string) string {
// SnackCase converts to format SNACK_CASE.
SnackCase NameMapper = func(raw string) string {
newstr := make([]rune, 0, len(raw))
for i, chr := range raw {
if isUpper := 'A' <= chr && chr <= 'Z'; isUpper {
@ -50,7 +50,7 @@ var (
if i > 0 {
newstr = append(newstr, '_')
}
chr -= ('A' - 'a')
chr -= 'A' - 'a'
}
newstr = append(newstr, chr)
}
@ -149,7 +149,7 @@ func wrapStrictError(err error, isStrict bool) error {
// setWithProperType sets proper value to field based on its type,
// but it does not return error for failing parsing,
// because we want to use default value that is already assigned to strcut.
// because we want to use default value that is already assigned to struct.
func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error {
switch t.Kind() {
case reflect.String:
@ -205,6 +205,17 @@ func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim stri
field.Set(reflect.ValueOf(timeVal))
case reflect.Slice:
return setSliceWithProperType(key, field, delim, allowShadow, isStrict)
case reflect.Ptr:
switch t.Elem().Kind() {
case reflect.Bool:
boolVal, err := key.Bool()
if err != nil {
return wrapStrictError(err, isStrict)
}
field.Set(reflect.ValueOf(&boolVal))
default:
return fmt.Errorf("unsupported type '%s'", t)
}
default:
return fmt.Errorf("unsupported type '%s'", t)
}
@ -244,14 +255,21 @@ func (s *Section) mapTo(val reflect.Value, isStrict bool) error {
continue
}
isAnonymous := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous
isStruct := tpField.Type.Kind() == reflect.Struct
isStructPtr := tpField.Type.Kind() == reflect.Ptr && tpField.Type.Elem().Kind() == reflect.Struct
isAnonymous := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous
if isAnonymous {
field.Set(reflect.New(tpField.Type.Elem()))
}
if isAnonymous || isStruct {
if isAnonymous || isStruct || isStructPtr {
if sec, err := s.f.GetSection(fieldName); err == nil {
// Only set the field to non-nil struct value if we have
// a section for it. Otherwise, we end up with a non-nil
// struct ptr even though there is no data.
if isStructPtr && field.IsNil() {
field.Set(reflect.New(tpField.Type.Elem()))
}
if err = sec.mapTo(field, isStrict); err != nil {
return fmt.Errorf("error mapping field(%s): %v", fieldName, err)
}
@ -342,14 +360,43 @@ func StrictMapTo(v, source interface{}, others ...interface{}) error {
}
// reflectSliceWithProperType does the opposite thing as setSliceWithProperType.
func reflectSliceWithProperType(key *Key, field reflect.Value, delim string) error {
func reflectSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow bool) error {
slice := field.Slice(0, field.Len())
if field.Len() == 0 {
return nil
}
sliceOf := field.Type().Elem().Kind()
if allowShadow {
var keyWithShadows *Key
for i := 0; i < field.Len(); i++ {
var val string
switch sliceOf {
case reflect.String:
val = slice.Index(i).String()
case reflect.Int, reflect.Int64:
val = fmt.Sprint(slice.Index(i).Int())
case reflect.Uint, reflect.Uint64:
val = fmt.Sprint(slice.Index(i).Uint())
case reflect.Float64:
val = fmt.Sprint(slice.Index(i).Float())
case reflectTime:
val = slice.Index(i).Interface().(time.Time).Format(time.RFC3339)
default:
return fmt.Errorf("unsupported type '[]%s'", sliceOf)
}
if i == 0 {
keyWithShadows = newKey(key.s, key.name, val)
} else {
keyWithShadows.AddShadow(val)
}
}
key = keyWithShadows
return nil
}
var buf bytes.Buffer
sliceOf := field.Type().Elem().Kind()
for i := 0; i < field.Len(); i++ {
switch sliceOf {
case reflect.String:
@ -367,12 +414,12 @@ func reflectSliceWithProperType(key *Key, field reflect.Value, delim string) err
}
buf.WriteString(delim)
}
key.SetValue(buf.String()[:buf.Len()-1])
key.SetValue(buf.String()[:buf.Len()-len(delim)])
return nil
}
// reflectWithProperType does the opposite thing as setWithProperType.
func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error {
func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow bool) error {
switch t.Kind() {
case reflect.String:
key.SetValue(field.String())
@ -387,7 +434,11 @@ func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim
case reflectTime:
key.SetValue(fmt.Sprint(field.Interface().(time.Time).Format(time.RFC3339)))
case reflect.Slice:
return reflectSliceWithProperType(key, field, delim)
return reflectSliceWithProperType(key, field, delim, allowShadow)
case reflect.Ptr:
if !field.IsNil() {
return reflectWithProperType(t.Elem(), key, field.Elem(), delim, allowShadow)
}
default:
return fmt.Errorf("unsupported type '%s'", t)
}
@ -432,12 +483,12 @@ func (s *Section) reflectFrom(val reflect.Value) error {
continue
}
opts := strings.SplitN(tag, ",", 2)
if len(opts) == 2 && opts[1] == "omitempty" && isEmptyValue(field) {
rawName, omitEmpty, allowShadow := parseTagOptions(tag)
if omitEmpty && isEmptyValue(field) {
continue
}
fieldName := s.parseFieldName(tpField.Name, opts[0])
fieldName := s.parseFieldName(tpField.Name, rawName)
if len(fieldName) == 0 || !field.CanSet() {
continue
}
@ -473,7 +524,7 @@ func (s *Section) reflectFrom(val reflect.Value) error {
key.Comment = tpField.Tag.Get("comment")
}
if err = reflectWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil {
if err = reflectWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim")), allowShadow); err != nil {
return fmt.Errorf("error reflecting field (%s): %v", fieldName, err)
}

View File

@ -45,10 +45,13 @@ type testStruct struct {
Name string `ini:"NAME"`
Age int
Male bool
Optional *bool
Money float64
Born time.Time
Time time.Duration `ini:"Duration"`
Others testNested
OthersPtr *testNested
NilPtr *testNested
*TestEmbeded `ini:"grade"`
Unused int `ini:"-"`
Unsigned uint
@ -61,6 +64,7 @@ const _CONF_DATA_STRUCT = `
NAME = Unknwon
Age = 21
Male = true
Optional = true
Money = 1.25
Born = 1993-10-07T20:17:05Z
Duration = 2h45m
@ -79,6 +83,16 @@ Populations = 12345678,98765432
Coordinates = 192.168,10.11
Note = Hello world!
[OthersPtr]
Cities = HangZhou|Boston
Visits = 1993-10-07T20:17:05Z, 1993-10-07T20:17:05Z
Years = 1993,1994
Numbers = 10010,10086
Ages = 18,19
Populations = 12345678,98765432
Coordinates = 192.168,10.11
Note = Hello world!
[grade]
GPA = 2.8
@ -106,12 +120,13 @@ type unsupport4 struct {
}
type defaultValue struct {
Name string
Age int
Male bool
Money float64
Born time.Time
Cities []string
Name string
Age int
Male bool
Optional *bool
Money float64
Born time.Time
Cities []string
}
type fooBar struct {
@ -136,6 +151,7 @@ func Test_MapToStruct(t *testing.T) {
So(ts.Name, ShouldEqual, "Unknwon")
So(ts.Age, ShouldEqual, 21)
So(ts.Male, ShouldBeTrue)
So(*ts.Optional, ShouldBeTrue)
So(ts.Money, ShouldEqual, 1.25)
So(ts.Unsigned, ShouldEqual, 3)
@ -156,6 +172,17 @@ func Test_MapToStruct(t *testing.T) {
So(fmt.Sprint(ts.Others.Coordinates), ShouldEqual, "[192.168 10.11]")
So(ts.Others.Note, ShouldEqual, "Hello world!")
So(ts.TestEmbeded.GPA, ShouldEqual, 2.8)
So(strings.Join(ts.OthersPtr.Cities, ","), ShouldEqual, "HangZhou,Boston")
So(ts.OthersPtr.Visits[0].String(), ShouldEqual, t.String())
So(fmt.Sprint(ts.OthersPtr.Years), ShouldEqual, "[1993 1994]")
So(fmt.Sprint(ts.OthersPtr.Numbers), ShouldEqual, "[10010 10086]")
So(fmt.Sprint(ts.OthersPtr.Ages), ShouldEqual, "[18 19]")
So(fmt.Sprint(ts.OthersPtr.Populations), ShouldEqual, "[12345678 98765432]")
So(fmt.Sprint(ts.OthersPtr.Coordinates), ShouldEqual, "[192.168 10.11]")
So(ts.OthersPtr.Note, ShouldEqual, "Hello world!")
So(ts.NilPtr, ShouldBeNil)
})
Convey("Map section to struct", func() {
@ -219,7 +246,7 @@ func Test_MapToStruct(t *testing.T) {
t, err := time.Parse(time.RFC3339, "1993-10-07T20:17:05Z")
So(err, ShouldBeNil)
dv := &defaultValue{"Joe", 10, true, 1.25, t, []string{"HangZhou", "Boston"}}
dv := &defaultValue{"Joe", 10, true, nil, 1.25, t, []string{"HangZhou", "Boston"}}
So(f.MapTo(dv), ShouldBeNil)
So(dv.Name, ShouldEqual, "Joe")
So(dv.Age, ShouldEqual, 10)
@ -275,6 +302,7 @@ func Test_ReflectFromStruct(t *testing.T) {
type Author struct {
Name string `ini:"NAME"`
Male bool
Optional *bool
Age int `comment:"Author's age"`
Height uint
GPA float64
@ -285,7 +313,7 @@ func Test_ReflectFromStruct(t *testing.T) {
t, err := time.Parse(time.RFC3339, "1993-10-07T20:17:05Z")
So(err, ShouldBeNil)
a := &Author{"Unknwon", true, 21, 100, 2.8, t, "",
a := &Author{"Unknwon", true, nil, 21, 100, 2.8, t, "",
&Embeded{
[]time.Time{t, t},
[]string{"HangZhou", "Boston"},
@ -302,13 +330,14 @@ func Test_ReflectFromStruct(t *testing.T) {
var buf bytes.Buffer
_, err = cfg.WriteTo(&buf)
So(err, ShouldBeNil)
So(buf.String(), ShouldEqual, `NAME = Unknwon
Male = true
So(buf.String(), ShouldEqual, `NAME = Unknwon
Male = true
Optional =
; Author's age
Age = 21
Height = 100
GPA = 2.8
Date = 1993-10-07T20:17:05Z
Age = 21
Height = 100
GPA = 2.8
Date = 1993-10-07T20:17:05Z
; Embeded section
[infos]
@ -352,6 +381,40 @@ omitempty = 9
})
}
// Inspired by https://github.com/go-ini/ini/issues/196
func TestMapToAndReflectFromStructWithShadows(t *testing.T) {
Convey("Map to struct and then reflect with shadows should generate original config content", t, func() {
type include struct {
Paths []string `ini:"path,omitempty,allowshadow"`
}
cfg, err := ini.LoadSources(ini.LoadOptions{
AllowShadows: true,
}, []byte(`
[include]
path = /tmp/gpm-profiles/test5.profile
path = /tmp/gpm-profiles/test1.profile`))
So(err, ShouldBeNil)
sec := cfg.Section("include")
inc := new(include)
err = sec.MapTo(inc)
So(err, ShouldBeNil)
err = sec.ReflectFrom(inc)
So(err, ShouldBeNil)
var buf bytes.Buffer
_, err = cfg.WriteTo(&buf)
So(err, ShouldBeNil)
So(buf.String(), ShouldEqual, `[include]
path = /tmp/gpm-profiles/test5.profile
path = /tmp/gpm-profiles/test1.profile
`)
})
}
type testMapper struct {
PackageName string
}

View File

@ -1,9 +1,9 @@
# A more minimal logging API for Go
Before you consider this package, please read [this blog post by the inimitable
Dave Cheney](http://dave.cheney.net/2015/11/05/lets-talk-about-logging). I
really appreciate what he has to say, and it largely aligns with my own
experiences. Too many choices of levels means inconsistent logs.
Before you consider this package, please read [this blog post by the
inimitable Dave Cheney][warning-makes-no-sense]. I really appreciate what
he has to say, and it largely aligns with my own experiences. Too many
choices of levels means inconsistent logs.
This package offers a purely abstract interface, based on these ideas but with
a few twists. Code can depend on just this interface and have the actual
@ -31,6 +31,150 @@ may feel very similar, but the primary difference is the lack of semantics.
Because verbosity is a numerical value, it's safe to assume that an app running
with higher verbosity means more (and less important) logs will be generated.
This is a BETA grade API. I have implemented it for
[glog](https://godoc.org/github.com/golang/glog). Until there is a significant
2nd implementation, I don't really know how it will change.
This is a BETA grade API.
There are implementations for the following logging libraries:
- **github.com/google/glog**: [glogr](https://github.com/go-logr/glogr)
- **k8s.io/klog**: [klogr](https://git.k8s.io/klog/klogr)
- **go.uber.org/zap**: [zapr](https://github.com/go-logr/zapr)
- **log** (the Go standard library logger):
[stdr](https://github.com/go-logr/stdr)
# FAQ
## Conceptual
## Why structured logging?
- **Structured logs are more easily queriable**: Since you've got
key-value pairs, it's much easier to query your structured logs for
particular values by filtering on the contents of a particular key --
think searching request logs for error codes, Kubernetes reconcilers for
the name and namespace of the reconciled object, etc
- **Structured logging makes it easier to have cross-referencable logs**:
Similarly to searchability, if you maintain conventions around your
keys, it becomes easy to gather all log lines related to a particular
concept.
- **Structured logs allow better dimensions of filtering**: if you have
structure to your logs, you've got more precise control over how much
information is logged -- you might choose in a particular configuration
to log certain keys but not others, only log lines where a certain key
matches a certain value, etc, instead of just having v-levels and names
to key off of.
- **Structured logs better represent structured data**: sometimes, the
data that you want to log is inherently structured (think tuple-link
objects). Structured logs allow you to preserve that structure when
outputting.
## Why V-levels?
**V-levels give operators an easy way to control the chattiness of log
operations**. V-levels provide a way for a given package to distinguish
the relative importance or verbosity of a given log message. Then, if
a particular logger or package is is logging too many messages, the user
of the package can simply chang the v-levels for that library.
## Why not more named levels, like Warning?
Read [Dave Cheney's post][warning-makes-no-sense]. Then read [Differences
from Dave's ideas](#differences-from-daves-ideas).
## Why not allow format strings, too?
**Format strings negate many of the benefits of structured logs**:
- They're not easily searchable without resorting to fuzzy searching,
regular expressions, etc
- They don't store structured data well, since contents are flattened into
a string
- They're not cross-referencable
- They don't compress easily, since the message is not constant
(unless you turn positional parameters into key-value pairs with numerical
keys, at which point you've gotten key-value logging with meaningless
keys)
## Practical
## Why key-value pairs, and not a map?
Key-value pairs are *much* easier to optimize, especially around
allocations. Zap (a structured logger that inspired logr's interface) has
[performance measurements](https://github.com/uber-go/zap#performance)
that show this quite nicely.
While the interface ends up being a little less obvious, you get
potentially better performance, plus avoid making users type
`map[string]string{}` every time they want to log.
## What if my V-levels differ between libraries?
That's fine. Control your V-levels on a per-logger basis, and use the
`WithName` function to pass different loggers to different libraries.
Generally, you should take care to ensure that you have relatively
consistent V-levels within a given logger, however, as this makes deciding
on what verbosity of logs to request easier.
## But I *really* want to use a format string!
That's not actually a question. Assuming your question is "how do
I convert my mental model of logging with format strings to logging with
constant messages":
1. figure out what the error actually is, as you'd write in a TL;DR style,
and use that as a message
2. For every place you'd write a format specifier, look to the word before
it, and add that as a key value pair
For instance, consider the following examples (all taken from spots in the
Kubernetes codebase):
- `klog.V(4).Infof("Client is returning errors: code %v, error %v",
responseCode, err)` becomes `logger.V(4).Error(error, "client returned an
error", "code", responseCode)
- `klog.V(4).Infof("Got a Retry-After %ds response for attempt %d to %v",
seconds, retries, url)` becomes `logger.V(4).Info("got a retry-after
response when requesting url", "attempt", retries, "after
seconds", seconds, "url", url)`
If you *really* must use a format string, place it as a key value, and
call `fmt.Sprintf` yourself -- for instance, `log.Printf("unable to
reflect over type %T")` becomes `logger.Info("unable to reflect over
type", "type", fmt.Sprintf("%T"))`. In general though, the cases where
this is necessary should be few and far between.
## How do I choose my V-levels?
This is basically the only hard constraint: increase V-levels to denote
more verbose or more debug-y logs.
Otherwise, you can start out with `0` as "you always want to see this",
`1` as "common logging that you might *possibly* want to turn off", and
`10` as "I would like to performance-test your log collection stack".
Then gradually choose levels in between as you need them, working your way
down from 10 (for debug and trace style logs) and up from 1 (for chattier
info-type logs).
## How do I choose my keys
- make your keys human-readable
- constant keys are generally a good idea
- be consistent across your codebase
- keys should naturally match parts of the message string
While key names are mostly unrestricted (and spaces are acceptable),
it's generally a good idea to stick to printable ascii characters, or at
least match the general character set of your log lines.
[warning-makes-no-sense]: http://dave.cheney.net/2015/11/05/lets-talk-about-logging

View File

@ -13,22 +13,22 @@
//
// Usage
//
// Logging is done using a Logger. Loggers can have name prefixes and named values
// attached, so that all log messages logged with that Logger have some base context
// associated.
// Logging is done using a Logger. Loggers can have name prefixes and named
// values attached, so that all log messages logged with that Logger have some
// base context associated.
//
// The term "key" is used to refer to the name associated with a particular value, to
// disambiguate it from the general Logger name.
// The term "key" is used to refer to the name associated with a particular
// value, to disambiguate it from the general Logger name.
//
// For instance, suppose we're trying to reconcile the state of an object, and we want
// to log that we've made some decision.
// For instance, suppose we're trying to reconcile the state of an object, and
// we want to log that we've made some decision.
//
// With the traditional log package, we might write
// With the traditional log package, we might write:
// log.Printf(
// "decided to set field foo to value %q for object %s/%s",
// targetValue, object.Namespace, object.Name)
//
// With logr's structured logging, we'd write
// With logr's structured logging, we'd write:
// // elsewhere in the file, set up the logger to log with the prefix of "reconcilers",
// // and the named value target-type=Foo, for extra context.
// log := mainLogger.WithName("reconcilers").WithValues("target-type", "Foo")
@ -36,64 +36,83 @@
// // later on...
// log.Info("setting field foo on object", "value", targetValue, "object", object)
//
// Depending on our logging implementation, we could then make logging decisions based on field values
// (like only logging such events for objects in a certain namespace), or copy the structured
// information into a structured log store.
// Depending on our logging implementation, we could then make logging decisions
// based on field values (like only logging such events for objects in a certain
// namespace), or copy the structured information into a structured log store.
//
// For logging errors, Logger has a method called Error. Suppose we wanted to log an
// error while reconciling. With the traditional log package, we might write
// For logging errors, Logger has a method called Error. Suppose we wanted to
// log an error while reconciling. With the traditional log package, we might
// write:
// log.Errorf("unable to reconcile object %s/%s: %v", object.Namespace, object.Name, err)
//
// With logr, we'd instead write
// With logr, we'd instead write:
// // assuming the above setup for log
// log.Error(err, "unable to reconcile object", "object", object)
//
// This functions similarly to:
// log.Info("unable to reconcile object", "error", err, "object", object)
//
// However, it ensures that a standard key for the error value ("error") is used across all
// error logging. Furthermore, certain implementations may choose to attach additional
// information (such as stack traces) on calls to Error, so it's preferred to use Error
// to log errors.
// However, it ensures that a standard key for the error value ("error") is used
// across all error logging. Furthermore, certain implementations may choose to
// attach additional information (such as stack traces) on calls to Error, so
// it's preferred to use Error to log errors.
//
// Parts of a log line
//
// Each log message from a Logger has four types of context:
// logger name, log verbosity, log message, and the named values.
//
// The Logger name constists of a series of name "segments" added by successive calls to WithName.
// These name segments will be joined in some way by the underlying implementation. It is strongly
// reccomended that name segements contain simple identifiers (letters, digits, and hyphen), and do
// not contain characters that could muddle the log output or confuse the joining operation (e.g.
// whitespace, commas, periods, slashes, brackets, quotes, etc).
// The Logger name constists of a series of name "segments" added by successive
// calls to WithName. These name segments will be joined in some way by the
// underlying implementation. It is strongly reccomended that name segements
// contain simple identifiers (letters, digits, and hyphen), and do not contain
// characters that could muddle the log output or confuse the joining operation
// (e.g. whitespace, commas, periods, slashes, brackets, quotes, etc).
//
// Log verbosity represents how little a log matters. Level zero, the default, matters most.
// Increasing levels matter less and less. Try to avoid lots of different verbosity levels,
// and instead provide useful keys, logger names, and log messages for users to filter on.
// It's illegal to pass a log level below zero.
// Log verbosity represents how little a log matters. Level zero, the default,
// matters most. Increasing levels matter less and less. Try to avoid lots of
// different verbosity levels, and instead provide useful keys, logger names,
// and log messages for users to filter on. It's illegal to pass a log level
// below zero.
//
// The log message consists of a constant message attached to the the log line. This
// should generally be a simple description of what's occuring, and should never be a format string.
// The log message consists of a constant message attached to the the log line.
// This should generally be a simple description of what's occuring, and should
// never be a format string.
//
// Variable information can then be attached using named values (key/value pairs). Keys are arbitrary
// strings, while values may be any Go value.
// Variable information can then be attached using named values (key/value
// pairs). Keys are arbitrary strings, while values may be any Go value.
//
// Key Naming Conventions
//
// While users are generally free to use key names of their choice, it's generally best to avoid
// using the following keys, as they're frequently used by implementations:
// Keys are not strictly required to conform to any specification or regex, but
// it is recommended that they:
// * be human-readable and meaningful (not auto-generated or simple ordinals)
// * be constant (not dependent on input data)
// * contain only printable characters
// * not contain whitespace or punctuation
//
// These guidelines help ensure that log data is processed properly regardless
// of the log implementation. For example, log implementations will try to
// output JSON data or will store data for later database (e.g. SQL) queries.
//
// While users are generally free to use key names of their choice, it's
// generally best to avoid using the following keys, as they're frequently used
// by implementations:
//
// - `"error"`: the underlying error value in the `Error` method.
// - `"stacktrace"`: the stack trace associated with a particular log line or error
// (often from the `Error` message).
// - `"caller"`: the calling information (file/line) of a particular log line.
// - `"msg"`: the log message.
// - `"error"`: the underlying error value in the `Error` method.
// - `"level"`: the log level.
// - `"logger"`: the name of the associated logger.
// - `"msg"`: the log message.
// - `"stacktrace"`: the stack trace associated with a particular log line or
// error (often from the `Error` message).
// - `"ts"`: the timestamp for a log line.
//
// Implementations are encouraged to make use of these keys to represent the above
// concepts, when neccessary (for example, in a pure-JSON output form, it would be
// necessary to represent at least message and timestamp as ordinary named values).
// Implementations are encouraged to make use of these keys to represent the
// above concepts, when neccessary (for example, in a pure-JSON output form, it
// would be necessary to represent at least message and timestamp as ordinary
// named values).
//
package logr
// TODO: consider adding back in format strings if they're really needed

22
vendor/github.com/go-logr/zapr/example/main.go generated vendored Normal file
View File

@ -0,0 +1,22 @@
package main
import "github.com/go-logr/zapr"
import "go.uber.org/zap"
type E struct {
str string
}
func (e E) Error() string {
return e.str
}
func main() {
log := zapr.NewLogger(zap.NewExample())
log = log.WithName("MyName").WithValues("user", "you")
log.Info("hello", "val1", 1, "val2", map[string]int{"k": 1})
log.V(1).Info("you should see this")
log.V(3).Info("you should NOT see this")
log.Error(nil, "uh oh", "trouble", true, "reasons", []float64{0.1, 0.11, 3.14})
log.Error(E{"an error occurred"}, "goodbye", "code", -1)
}

View File

@ -276,7 +276,7 @@ func (c *Call) satisfied() bool {
return c.numCalls >= c.minCalls
}
// Returns true iff the maximum number of calls have been made.
// Returns true if the maximum number of calls have been made.
func (c *Call) exhausted() bool {
return c.numCalls >= c.maxCalls
}

75
vendor/github.com/gorilla/mux/.circleci/config.yml generated vendored Normal file
View File

@ -0,0 +1,75 @@
version: 2.0
jobs:
# Base test configuration for Go library tests Each distinct version should
# inherit this base, and override (at least) the container image used.
"test": &test
docker:
- image: circleci/golang:latest
working_directory: /go/src/github.com/gorilla/mux
steps: &steps
- checkout
- run: go version
- run: go get -t -v ./...
# Only run gofmt, vet & lint against the latest Go version
- run: >
if [[ "$LATEST" = true ]]; then
go get -u golang.org/x/lint/golint
golint ./...
fi
- run: >
if [[ "$LATEST" = true ]]; then
diff -u <(echo -n) <(gofmt -d .)
fi
- run: >
if [[ "$LATEST" = true ]]; then
go vet -v .
fi
- run: go test -v -race ./...
"latest":
<<: *test
environment:
LATEST: true
"1.12":
<<: *test
docker:
- image: circleci/golang:1.12
"1.11":
<<: *test
docker:
- image: circleci/golang:1.11
"1.10":
<<: *test
docker:
- image: circleci/golang:1.10
"1.9":
<<: *test
docker:
- image: circleci/golang:1.9
"1.8":
<<: *test
docker:
- image: circleci/golang:1.8
"1.7":
<<: *test
docker:
- image: circleci/golang:1.7
workflows:
version: 2
build:
jobs:
- "latest"
- "1.12"
- "1.11"
- "1.10"
- "1.9"
- "1.8"
- "1.7"

View File

@ -1,10 +1,10 @@
daysUntilStale: 60
daysUntilClose: 7
daysUntilStale: 75
daysUntilClose: 14
# Issues with these labels will never be considered stale
exemptLabels:
- v2
- needs-review
- work-required
- proposal
- needs review
- build system
staleLabel: stale
markComment: >
This issue has been automatically marked as stale because it hasn't seen

View File

@ -1,24 +0,0 @@
language: go
matrix:
include:
- go: 1.7.x
- go: 1.8.x
- go: 1.9.x
- go: 1.10.x
- go: 1.11.x
- go: 1.x
env: LATEST=true
- go: tip
allow_failures:
- go: tip
install:
- # Skip
script:
- go get -t -v ./...
- diff -u <(echo -n) <(gofmt -d .)
- if [[ "$LATEST" = true ]]; then go vet .; fi
- go test -v -race ./...

View File

@ -1,11 +0,0 @@
**What version of Go are you running?** (Paste the output of `go version`)
**What version of gorilla/mux are you at?** (Paste the output of `git rev-parse HEAD` inside `$GOPATH/src/github.com/gorilla/mux`)
**Describe your problem** (and what you have tried so far)
**Paste a minimal, runnable, reproduction of your issue below** (use backticks to format it)

View File

@ -2,6 +2,7 @@
[![GoDoc](https://godoc.org/github.com/gorilla/mux?status.svg)](https://godoc.org/github.com/gorilla/mux)
[![Build Status](https://travis-ci.org/gorilla/mux.svg?branch=master)](https://travis-ci.org/gorilla/mux)
[![CircleCI](https://circleci.com/gh/gorilla/mux.svg?style=svg)](https://circleci.com/gh/gorilla/mux)
[![Sourcegraph](https://sourcegraph.com/github.com/gorilla/mux/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/mux?badge)
![Gorilla Logo](http://www.gorillatoolkit.org/static/images/gorilla-icon-64.png)
@ -29,6 +30,7 @@ The name mux stands for "HTTP request multiplexer". Like the standard `http.Serv
* [Walking Routes](#walking-routes)
* [Graceful Shutdown](#graceful-shutdown)
* [Middleware](#middleware)
* [Handling CORS Requests](#handling-cors-requests)
* [Testing Handlers](#testing-handlers)
* [Full Example](#full-example)
@ -491,6 +493,73 @@ r.Use(amw.Middleware)
Note: The handler chain will be stopped if your middleware doesn't call `next.ServeHTTP()` with the corresponding parameters. This can be used to abort a request if the middleware writer wants to. Middlewares _should_ write to `ResponseWriter` if they _are_ going to terminate the request, and they _should not_ write to `ResponseWriter` if they _are not_ going to terminate it.
### Handling CORS Requests
[CORSMethodMiddleware](https://godoc.org/github.com/gorilla/mux#CORSMethodMiddleware) intends to make it easier to strictly set the `Access-Control-Allow-Methods` response header.
* You will still need to use your own CORS handler to set the other CORS headers such as `Access-Control-Allow-Origin`
* The middleware will set the `Access-Control-Allow-Methods` header to all the method matchers (e.g. `r.Methods(http.MethodGet, http.MethodPut, http.MethodOptions)` -> `Access-Control-Allow-Methods: GET,PUT,OPTIONS`) on a route
* If you do not specify any methods, then:
> _Important_: there must be an `OPTIONS` method matcher for the middleware to set the headers.
Here is an example of using `CORSMethodMiddleware` along with a custom `OPTIONS` handler to set all the required CORS headers:
```go
package main
import (
"net/http"
"github.com/gorilla/mux"
)
func main() {
r := mux.NewRouter()
// IMPORTANT: you must specify an OPTIONS method matcher for the middleware to set CORS headers
r.HandleFunc("/foo", fooHandler).Methods(http.MethodGet, http.MethodPut, http.MethodPatch, http.MethodOptions)
r.Use(mux.CORSMethodMiddleware(r))
http.ListenAndServe(":8080", r)
}
func fooHandler(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Access-Control-Allow-Origin", "*")
if r.Method == http.MethodOptions {
return
}
w.Write([]byte("foo"))
}
```
And an request to `/foo` using something like:
```bash
curl localhost:8080/foo -v
```
Would look like:
```bash
* Trying ::1...
* TCP_NODELAY set
* Connected to localhost (::1) port 8080 (#0)
> GET /foo HTTP/1.1
> Host: localhost:8080
> User-Agent: curl/7.59.0
> Accept: */*
>
< HTTP/1.1 200 OK
< Access-Control-Allow-Methods: GET,PUT,PATCH,OPTIONS
< Access-Control-Allow-Origin: *
< Date: Fri, 28 Jun 2019 20:13:30 GMT
< Content-Length: 3
< Content-Type: text/plain; charset=utf-8
<
* Connection #0 to host localhost left intact
foo
```
### Testing Handlers
Testing handlers in a Go web application is straightforward, and _mux_ doesn't complicate this any further. Given two files: `endpoints.go` and `endpoints_test.go`, here's how we'd test an application using _mux_.

View File

@ -295,7 +295,7 @@ A more complex authentication middleware, which maps session token to users, cou
r := mux.NewRouter()
r.HandleFunc("/", handler)
amw := authenticationMiddleware{}
amw := authenticationMiddleware{tokenUsers: make(map[string]string)}
amw.Populate()
r.Use(amw.Middleware)

View File

@ -0,0 +1,37 @@
package mux_test
import (
"fmt"
"net/http"
"net/http/httptest"
"github.com/gorilla/mux"
)
func ExampleCORSMethodMiddleware() {
r := mux.NewRouter()
r.HandleFunc("/foo", func(w http.ResponseWriter, r *http.Request) {
// Handle the request
}).Methods(http.MethodGet, http.MethodPut, http.MethodPatch)
r.HandleFunc("/foo", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Access-Control-Allow-Origin", "http://example.com")
w.Header().Set("Access-Control-Max-Age", "86400")
}).Methods(http.MethodOptions)
r.Use(mux.CORSMethodMiddleware(r))
rw := httptest.NewRecorder()
req, _ := http.NewRequest("OPTIONS", "/foo", nil) // needs to be OPTIONS
req.Header.Set("Access-Control-Request-Method", "POST") // needs to be non-empty
req.Header.Set("Access-Control-Request-Headers", "Authorization") // needs to be non-empty
req.Header.Set("Origin", "http://example.com") // needs to be non-empty
r.ServeHTTP(rw, req)
fmt.Println(rw.Header().Get("Access-Control-Allow-Methods"))
fmt.Println(rw.Header().Get("Access-Control-Allow-Origin"))
// Output:
// GET,PUT,PATCH,OPTIONS
// http://example.com
}

View File

@ -32,37 +32,19 @@ func (r *Router) useInterface(mw middleware) {
r.middlewares = append(r.middlewares, mw)
}
// CORSMethodMiddleware sets the Access-Control-Allow-Methods response header
// on a request, by matching routes based only on paths. It also handles
// OPTIONS requests, by settings Access-Control-Allow-Methods, and then
// returning without calling the next http handler.
// CORSMethodMiddleware automatically sets the Access-Control-Allow-Methods response header
// on requests for routes that have an OPTIONS method matcher to all the method matchers on
// the route. Routes that do not explicitly handle OPTIONS requests will not be processed
// by the middleware. See examples for usage.
func CORSMethodMiddleware(r *Router) MiddlewareFunc {
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
var allMethods []string
err := r.Walk(func(route *Route, _ *Router, _ []*Route) error {
for _, m := range route.matchers {
if _, ok := m.(*routeRegexp); ok {
if m.Match(req, &RouteMatch{}) {
methods, err := route.GetMethods()
if err != nil {
return err
}
allMethods = append(allMethods, methods...)
}
break
}
}
return nil
})
allMethods, err := getAllMethodsForRoute(r, req)
if err == nil {
w.Header().Set("Access-Control-Allow-Methods", strings.Join(append(allMethods, "OPTIONS"), ","))
if req.Method == "OPTIONS" {
return
for _, v := range allMethods {
if v == http.MethodOptions {
w.Header().Set("Access-Control-Allow-Methods", strings.Join(allMethods, ","))
}
}
}
@ -70,3 +52,28 @@ func CORSMethodMiddleware(r *Router) MiddlewareFunc {
})
}
}
// getAllMethodsForRoute returns all the methods from method matchers matching a given
// request.
func getAllMethodsForRoute(r *Router, req *http.Request) ([]string, error) {
var allMethods []string
err := r.Walk(func(route *Route, _ *Router, _ []*Route) error {
for _, m := range route.matchers {
if _, ok := m.(*routeRegexp); ok {
if m.Match(req, &RouteMatch{}) {
methods, err := route.GetMethods()
if err != nil {
return err
}
allMethods = append(allMethods, methods...)
}
break
}
}
return nil
})
return allMethods, err
}

View File

@ -3,7 +3,6 @@ package mux
import (
"bytes"
"net/http"
"net/http/httptest"
"testing"
)
@ -28,12 +27,12 @@ func TestMiddlewareAdd(t *testing.T) {
router.useInterface(mw)
if len(router.middlewares) != 1 || router.middlewares[0] != mw {
t.Fatal("Middleware was not added correctly")
t.Fatal("Middleware interface was not added correctly")
}
router.Use(mw.Middleware)
if len(router.middlewares) != 2 {
t.Fatal("MiddlewareFunc method was not added correctly")
t.Fatal("Middleware method was not added correctly")
}
banalMw := func(handler http.Handler) http.Handler {
@ -41,7 +40,7 @@ func TestMiddlewareAdd(t *testing.T) {
}
router.Use(banalMw)
if len(router.middlewares) != 3 {
t.Fatal("MiddlewareFunc method was not added correctly")
t.Fatal("Middleware function was not added correctly")
}
}
@ -55,34 +54,37 @@ func TestMiddleware(t *testing.T) {
rw := NewRecorder()
req := newRequest("GET", "/")
// Test regular middleware call
router.ServeHTTP(rw, req)
if mw.timesCalled != 1 {
t.Fatalf("Expected %d calls, but got only %d", 1, mw.timesCalled)
}
t.Run("regular middleware call", func(t *testing.T) {
router.ServeHTTP(rw, req)
if mw.timesCalled != 1 {
t.Fatalf("Expected %d calls, but got only %d", 1, mw.timesCalled)
}
})
// Middleware should not be called for 404
req = newRequest("GET", "/not/found")
router.ServeHTTP(rw, req)
if mw.timesCalled != 1 {
t.Fatalf("Expected %d calls, but got only %d", 1, mw.timesCalled)
}
t.Run("not called for 404", func(t *testing.T) {
req = newRequest("GET", "/not/found")
router.ServeHTTP(rw, req)
if mw.timesCalled != 1 {
t.Fatalf("Expected %d calls, but got only %d", 1, mw.timesCalled)
}
})
// Middleware should not be called if there is a method mismatch
req = newRequest("POST", "/")
router.ServeHTTP(rw, req)
if mw.timesCalled != 1 {
t.Fatalf("Expected %d calls, but got only %d", 1, mw.timesCalled)
}
// Add the middleware again as function
router.Use(mw.Middleware)
req = newRequest("GET", "/")
router.ServeHTTP(rw, req)
if mw.timesCalled != 3 {
t.Fatalf("Expected %d calls, but got only %d", 3, mw.timesCalled)
}
t.Run("not called for method mismatch", func(t *testing.T) {
req = newRequest("POST", "/")
router.ServeHTTP(rw, req)
if mw.timesCalled != 1 {
t.Fatalf("Expected %d calls, but got only %d", 1, mw.timesCalled)
}
})
t.Run("regular call using function middleware", func(t *testing.T) {
router.Use(mw.Middleware)
req = newRequest("GET", "/")
router.ServeHTTP(rw, req)
if mw.timesCalled != 3 {
t.Fatalf("Expected %d calls, but got only %d", 3, mw.timesCalled)
}
})
}
func TestMiddlewareSubrouter(t *testing.T) {
@ -98,42 +100,56 @@ func TestMiddlewareSubrouter(t *testing.T) {
rw := NewRecorder()
req := newRequest("GET", "/")
router.ServeHTTP(rw, req)
if mw.timesCalled != 0 {
t.Fatalf("Expected %d calls, but got only %d", 0, mw.timesCalled)
}
t.Run("not called for route outside subrouter", func(t *testing.T) {
router.ServeHTTP(rw, req)
if mw.timesCalled != 0 {
t.Fatalf("Expected %d calls, but got only %d", 0, mw.timesCalled)
}
})
req = newRequest("GET", "/sub/")
router.ServeHTTP(rw, req)
if mw.timesCalled != 0 {
t.Fatalf("Expected %d calls, but got only %d", 0, mw.timesCalled)
}
t.Run("not called for subrouter root 404", func(t *testing.T) {
req = newRequest("GET", "/sub/")
router.ServeHTTP(rw, req)
if mw.timesCalled != 0 {
t.Fatalf("Expected %d calls, but got only %d", 0, mw.timesCalled)
}
})
req = newRequest("GET", "/sub/x")
router.ServeHTTP(rw, req)
if mw.timesCalled != 1 {
t.Fatalf("Expected %d calls, but got only %d", 1, mw.timesCalled)
}
t.Run("called once for route inside subrouter", func(t *testing.T) {
req = newRequest("GET", "/sub/x")
router.ServeHTTP(rw, req)
if mw.timesCalled != 1 {
t.Fatalf("Expected %d calls, but got only %d", 1, mw.timesCalled)
}
})
req = newRequest("GET", "/sub/not/found")
router.ServeHTTP(rw, req)
if mw.timesCalled != 1 {
t.Fatalf("Expected %d calls, but got only %d", 1, mw.timesCalled)
}
t.Run("not called for 404 inside subrouter", func(t *testing.T) {
req = newRequest("GET", "/sub/not/found")
router.ServeHTTP(rw, req)
if mw.timesCalled != 1 {
t.Fatalf("Expected %d calls, but got only %d", 1, mw.timesCalled)
}
})
router.useInterface(mw)
t.Run("middleware added to router", func(t *testing.T) {
router.useInterface(mw)
req = newRequest("GET", "/")
router.ServeHTTP(rw, req)
if mw.timesCalled != 2 {
t.Fatalf("Expected %d calls, but got only %d", 2, mw.timesCalled)
}
t.Run("called once for route outside subrouter", func(t *testing.T) {
req = newRequest("GET", "/")
router.ServeHTTP(rw, req)
if mw.timesCalled != 2 {
t.Fatalf("Expected %d calls, but got only %d", 2, mw.timesCalled)
}
})
req = newRequest("GET", "/sub/x")
router.ServeHTTP(rw, req)
if mw.timesCalled != 4 {
t.Fatalf("Expected %d calls, but got only %d", 4, mw.timesCalled)
}
t.Run("called twice for route inside subrouter", func(t *testing.T) {
req = newRequest("GET", "/sub/x")
router.ServeHTTP(rw, req)
if mw.timesCalled != 4 {
t.Fatalf("Expected %d calls, but got only %d", 4, mw.timesCalled)
}
})
})
}
func TestMiddlewareExecution(t *testing.T) {
@ -145,30 +161,33 @@ func TestMiddlewareExecution(t *testing.T) {
w.Write(handlerStr)
})
rw := NewRecorder()
req := newRequest("GET", "/")
t.Run("responds normally without middleware", func(t *testing.T) {
rw := NewRecorder()
req := newRequest("GET", "/")
// Test handler-only call
router.ServeHTTP(rw, req)
router.ServeHTTP(rw, req)
if !bytes.Equal(rw.Body.Bytes(), handlerStr) {
t.Fatal("Handler response is not what it should be")
}
// Test middleware call
rw = NewRecorder()
router.Use(func(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Write(mwStr)
h.ServeHTTP(w, r)
})
if !bytes.Equal(rw.Body.Bytes(), handlerStr) {
t.Fatal("Handler response is not what it should be")
}
})
router.ServeHTTP(rw, req)
if !bytes.Equal(rw.Body.Bytes(), append(mwStr, handlerStr...)) {
t.Fatal("Middleware + handler response is not what it should be")
}
t.Run("responds with handler and middleware response", func(t *testing.T) {
rw := NewRecorder()
req := newRequest("GET", "/")
router.Use(func(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Write(mwStr)
h.ServeHTTP(w, r)
})
})
router.ServeHTTP(rw, req)
if !bytes.Equal(rw.Body.Bytes(), append(mwStr, handlerStr...)) {
t.Fatal("Middleware + handler response is not what it should be")
}
})
}
func TestMiddlewareNotFound(t *testing.T) {
@ -187,26 +206,29 @@ func TestMiddlewareNotFound(t *testing.T) {
})
// Test not found call with default handler
rw := NewRecorder()
req := newRequest("GET", "/notfound")
t.Run("not called", func(t *testing.T) {
rw := NewRecorder()
req := newRequest("GET", "/notfound")
router.ServeHTTP(rw, req)
if bytes.Contains(rw.Body.Bytes(), mwStr) {
t.Fatal("Middleware was called for a 404")
}
// Test not found call with custom handler
rw = NewRecorder()
req = newRequest("GET", "/notfound")
router.NotFoundHandler = http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
rw.Write([]byte("Custom 404 handler"))
router.ServeHTTP(rw, req)
if bytes.Contains(rw.Body.Bytes(), mwStr) {
t.Fatal("Middleware was called for a 404")
}
})
router.ServeHTTP(rw, req)
if bytes.Contains(rw.Body.Bytes(), mwStr) {
t.Fatal("Middleware was called for a custom 404")
}
t.Run("not called with custom not found handler", func(t *testing.T) {
rw := NewRecorder()
req := newRequest("GET", "/notfound")
router.NotFoundHandler = http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
rw.Write([]byte("Custom 404 handler"))
})
router.ServeHTTP(rw, req)
if bytes.Contains(rw.Body.Bytes(), mwStr) {
t.Fatal("Middleware was called for a custom 404")
}
})
}
func TestMiddlewareMethodMismatch(t *testing.T) {
@ -225,27 +247,29 @@ func TestMiddlewareMethodMismatch(t *testing.T) {
})
})
// Test method mismatch
rw := NewRecorder()
req := newRequest("POST", "/")
t.Run("not called", func(t *testing.T) {
rw := NewRecorder()
req := newRequest("POST", "/")
router.ServeHTTP(rw, req)
if bytes.Contains(rw.Body.Bytes(), mwStr) {
t.Fatal("Middleware was called for a method mismatch")
}
// Test not found call
rw = NewRecorder()
req = newRequest("POST", "/")
router.MethodNotAllowedHandler = http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
rw.Write([]byte("Method not allowed"))
router.ServeHTTP(rw, req)
if bytes.Contains(rw.Body.Bytes(), mwStr) {
t.Fatal("Middleware was called for a method mismatch")
}
})
router.ServeHTTP(rw, req)
if bytes.Contains(rw.Body.Bytes(), mwStr) {
t.Fatal("Middleware was called for a method mismatch")
}
t.Run("not called with custom method not allowed handler", func(t *testing.T) {
rw := NewRecorder()
req := newRequest("POST", "/")
router.MethodNotAllowedHandler = http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
rw.Write([]byte("Method not allowed"))
})
router.ServeHTTP(rw, req)
if bytes.Contains(rw.Body.Bytes(), mwStr) {
t.Fatal("Middleware was called for a method mismatch")
}
})
}
func TestMiddlewareNotFoundSubrouter(t *testing.T) {
@ -269,27 +293,29 @@ func TestMiddlewareNotFoundSubrouter(t *testing.T) {
})
})
// Test not found call for default handler
rw := NewRecorder()
req := newRequest("GET", "/sub/notfound")
t.Run("not called", func(t *testing.T) {
rw := NewRecorder()
req := newRequest("GET", "/sub/notfound")
router.ServeHTTP(rw, req)
if bytes.Contains(rw.Body.Bytes(), mwStr) {
t.Fatal("Middleware was called for a 404")
}
// Test not found call with custom handler
rw = NewRecorder()
req = newRequest("GET", "/sub/notfound")
subrouter.NotFoundHandler = http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
rw.Write([]byte("Custom 404 handler"))
router.ServeHTTP(rw, req)
if bytes.Contains(rw.Body.Bytes(), mwStr) {
t.Fatal("Middleware was called for a 404")
}
})
router.ServeHTTP(rw, req)
if bytes.Contains(rw.Body.Bytes(), mwStr) {
t.Fatal("Middleware was called for a custom 404")
}
t.Run("not called with custom not found handler", func(t *testing.T) {
rw := NewRecorder()
req := newRequest("GET", "/sub/notfound")
subrouter.NotFoundHandler = http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
rw.Write([]byte("Custom 404 handler"))
})
router.ServeHTTP(rw, req)
if bytes.Contains(rw.Body.Bytes(), mwStr) {
t.Fatal("Middleware was called for a custom 404")
}
})
}
func TestMiddlewareMethodMismatchSubrouter(t *testing.T) {
@ -313,66 +339,142 @@ func TestMiddlewareMethodMismatchSubrouter(t *testing.T) {
})
})
// Test method mismatch without custom handler
rw := NewRecorder()
req := newRequest("POST", "/sub/")
t.Run("not called", func(t *testing.T) {
rw := NewRecorder()
req := newRequest("POST", "/sub/")
router.ServeHTTP(rw, req)
if bytes.Contains(rw.Body.Bytes(), mwStr) {
t.Fatal("Middleware was called for a method mismatch")
}
// Test method mismatch with custom handler
rw = NewRecorder()
req = newRequest("POST", "/sub/")
router.MethodNotAllowedHandler = http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
rw.Write([]byte("Method not allowed"))
router.ServeHTTP(rw, req)
if bytes.Contains(rw.Body.Bytes(), mwStr) {
t.Fatal("Middleware was called for a method mismatch")
}
})
router.ServeHTTP(rw, req)
if bytes.Contains(rw.Body.Bytes(), mwStr) {
t.Fatal("Middleware was called for a method mismatch")
}
t.Run("not called with custom method not allowed handler", func(t *testing.T) {
rw := NewRecorder()
req := newRequest("POST", "/sub/")
router.MethodNotAllowedHandler = http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
rw.Write([]byte("Method not allowed"))
})
router.ServeHTTP(rw, req)
if bytes.Contains(rw.Body.Bytes(), mwStr) {
t.Fatal("Middleware was called for a method mismatch")
}
})
}
func TestCORSMethodMiddleware(t *testing.T) {
router := NewRouter()
cases := []struct {
path string
response string
method string
testURL string
expectedAllowedMethods string
testCases := []struct {
name string
registerRoutes func(r *Router)
requestHeader http.Header
requestMethod string
requestPath string
expectedAccessControlAllowMethodsHeader string
expectedResponse string
}{
{"/g/{o}", "a", "POST", "/g/asdf", "POST,PUT,GET,OPTIONS"},
{"/g/{o}", "b", "PUT", "/g/bla", "POST,PUT,GET,OPTIONS"},
{"/g/{o}", "c", "GET", "/g/orilla", "POST,PUT,GET,OPTIONS"},
{"/g", "d", "POST", "/g", "POST,OPTIONS"},
{
name: "does not set without OPTIONS matcher",
registerRoutes: func(r *Router) {
r.HandleFunc("/foo", stringHandler("a")).Methods(http.MethodGet, http.MethodPut, http.MethodPatch)
},
requestMethod: "GET",
requestPath: "/foo",
expectedAccessControlAllowMethodsHeader: "",
expectedResponse: "a",
},
{
name: "sets on non OPTIONS",
registerRoutes: func(r *Router) {
r.HandleFunc("/foo", stringHandler("a")).Methods(http.MethodGet, http.MethodPut, http.MethodPatch)
r.HandleFunc("/foo", stringHandler("b")).Methods(http.MethodOptions)
},
requestMethod: "GET",
requestPath: "/foo",
expectedAccessControlAllowMethodsHeader: "GET,PUT,PATCH,OPTIONS",
expectedResponse: "a",
},
{
name: "sets without preflight headers",
registerRoutes: func(r *Router) {
r.HandleFunc("/foo", stringHandler("a")).Methods(http.MethodGet, http.MethodPut, http.MethodPatch)
r.HandleFunc("/foo", stringHandler("b")).Methods(http.MethodOptions)
},
requestMethod: "OPTIONS",
requestPath: "/foo",
expectedAccessControlAllowMethodsHeader: "GET,PUT,PATCH,OPTIONS",
expectedResponse: "b",
},
{
name: "does not set on error",
registerRoutes: func(r *Router) {
r.HandleFunc("/foo", stringHandler("a"))
},
requestMethod: "OPTIONS",
requestPath: "/foo",
expectedAccessControlAllowMethodsHeader: "",
expectedResponse: "a",
},
{
name: "sets header on valid preflight",
registerRoutes: func(r *Router) {
r.HandleFunc("/foo", stringHandler("a")).Methods(http.MethodGet, http.MethodPut, http.MethodPatch)
r.HandleFunc("/foo", stringHandler("b")).Methods(http.MethodOptions)
},
requestMethod: "OPTIONS",
requestPath: "/foo",
requestHeader: http.Header{
"Access-Control-Request-Method": []string{"GET"},
"Access-Control-Request-Headers": []string{"Authorization"},
"Origin": []string{"http://example.com"},
},
expectedAccessControlAllowMethodsHeader: "GET,PUT,PATCH,OPTIONS",
expectedResponse: "b",
},
{
name: "does not set methods from unmatching routes",
registerRoutes: func(r *Router) {
r.HandleFunc("/foo", stringHandler("c")).Methods(http.MethodDelete)
r.HandleFunc("/foo/bar", stringHandler("a")).Methods(http.MethodGet, http.MethodPut, http.MethodPatch)
r.HandleFunc("/foo/bar", stringHandler("b")).Methods(http.MethodOptions)
},
requestMethod: "OPTIONS",
requestPath: "/foo/bar",
requestHeader: http.Header{
"Access-Control-Request-Method": []string{"GET"},
"Access-Control-Request-Headers": []string{"Authorization"},
"Origin": []string{"http://example.com"},
},
expectedAccessControlAllowMethodsHeader: "GET,PUT,PATCH,OPTIONS",
expectedResponse: "b",
},
}
for _, tt := range cases {
router.HandleFunc(tt.path, stringHandler(tt.response)).Methods(tt.method)
}
for _, tt := range testCases {
t.Run(tt.name, func(t *testing.T) {
router := NewRouter()
router.Use(CORSMethodMiddleware(router))
tt.registerRoutes(router)
for _, tt := range cases {
rr := httptest.NewRecorder()
req := newRequest(tt.method, tt.testURL)
router.Use(CORSMethodMiddleware(router))
router.ServeHTTP(rr, req)
rw := NewRecorder()
req := newRequest(tt.requestMethod, tt.requestPath)
req.Header = tt.requestHeader
if rr.Body.String() != tt.response {
t.Errorf("Expected body '%s', found '%s'", tt.response, rr.Body.String())
}
router.ServeHTTP(rw, req)
allowedMethods := rr.Header().Get("Access-Control-Allow-Methods")
actualMethodsHeader := rw.Header().Get("Access-Control-Allow-Methods")
if actualMethodsHeader != tt.expectedAccessControlAllowMethodsHeader {
t.Fatalf("Expected Access-Control-Allow-Methods to equal %s but got %s", tt.expectedAccessControlAllowMethodsHeader, actualMethodsHeader)
}
if allowedMethods != tt.expectedAllowedMethods {
t.Errorf("Expected Access-Control-Allow-Methods '%s', found '%s'", tt.expectedAllowedMethods, allowedMethods)
}
actualResponse := rw.Body.String()
if actualResponse != tt.expectedResponse {
t.Fatalf("Expected response to equal %s but got %s", tt.expectedResponse, actualResponse)
}
})
}
}
@ -411,27 +513,33 @@ func TestMiddlewareOnMultiSubrouter(t *testing.T) {
})
})
rw := NewRecorder()
req := newRequest("GET", "/first")
t.Run("/first uses first middleware", func(t *testing.T) {
rw := NewRecorder()
req := newRequest("GET", "/first")
router.ServeHTTP(rw, req)
if rw.Body.String() != first {
t.Fatalf("Middleware did not run: expected %s middleware to write a response (got %s)", first, rw.Body.String())
}
router.ServeHTTP(rw, req)
if rw.Body.String() != first {
t.Fatalf("Middleware did not run: expected %s middleware to write a response (got %s)", first, rw.Body.String())
}
})
rw = NewRecorder()
req = newRequest("GET", "/second")
t.Run("/second uses second middleware", func(t *testing.T) {
rw := NewRecorder()
req := newRequest("GET", "/second")
router.ServeHTTP(rw, req)
if rw.Body.String() != second {
t.Fatalf("Middleware did not run: expected %s middleware to write a response (got %s)", second, rw.Body.String())
}
router.ServeHTTP(rw, req)
if rw.Body.String() != second {
t.Fatalf("Middleware did not run: expected %s middleware to write a response (got %s)", second, rw.Body.String())
}
})
rw = NewRecorder()
req = newRequest("GET", "/second/not-exist")
t.Run("uses not found handler", func(t *testing.T) {
rw := NewRecorder()
req := newRequest("GET", "/second/not-exist")
router.ServeHTTP(rw, req)
if rw.Body.String() != notFound {
t.Fatalf("Notfound handler did not run: expected %s for not-exist, (got %s)", notFound, rw.Body.String())
}
router.ServeHTTP(rw, req)
if rw.Body.String() != notFound {
t.Fatalf("Notfound handler did not run: expected %s for not-exist, (got %s)", notFound, rw.Body.String())
}
})
}

View File

@ -1,3 +1,27 @@
## 1.6.0
### Features
- Display special chars on error [41e1b26]
- Add BeElementOf matcher [6a48b48]
### Fixes
- Remove duplication in XML matcher tests [cc1a6cb]
- Remove unnecessary conversions (#357) [7bf756a]
- Fixed import order (#353) [2e3b965]
- Added missing error handling in test (#355) [c98d3eb]
- Simplify code (#356) [0001ed9]
- Simplify code (#354) [0d9100e]
- Fixed typos (#352) [3f647c4]
- Add failure message tests to BeElementOf matcher [efe19c3]
- Update go-testcov untested sections [37ee382]
- Mark all uncovered files so go-testcov ./... works [53b150e]
- Reenable gotip in travis [5c249dc]
- Fix the typo of comment (#345) [f0e010e]
- Optimize contain_element_matcher [abeb93d]
## 1.5.0
### Features

View File

@ -36,7 +36,7 @@ var PrintContextObjects = false
// TruncatedDiff choose if we should display a truncated pretty diff or not
var TruncatedDiff = true
// Ctx interface defined here to keep backwards compatability with go < 1.7
// Ctx interface defined here to keep backwards compatibility with go < 1.7
// It matches the context.Context interface
type Ctx interface {
Deadline() (deadline time.Time, ok bool)
@ -61,7 +61,7 @@ Generates a formatted matcher success/failure message of the form:
<message>
<pretty printed expected>
If expected is omited, then the message looks like:
If expected is omitted, then the message looks like:
Expected
<pretty printed actual>
@ -300,7 +300,7 @@ func formatString(object interface{}, indentation uint) string {
}
}
return fmt.Sprintf("%s", result)
return result
} else {
return fmt.Sprintf("%q", object)
}

View File

@ -21,7 +21,7 @@ Say is a Gomega matcher that operates on gbytes.Buffers:
will succeed if the unread portion of the buffer matches the regular expression "something".
When Say succeeds, it fast forwards the gbytes.Buffer's read cursor to just after the succesful match.
When Say succeeds, it fast forwards the gbytes.Buffer's read cursor to just after the successful match.
Thus, subsequent calls to Say will only match against the unread portion of the buffer
Say pairs very well with Eventually. To assert that a buffer eventually receives data matching "[123]-star" within 3 seconds you can:

View File

@ -68,7 +68,7 @@ func doBuild(gopath, packagePath string, env []string, args ...string) (compiled
executable := filepath.Join(tmpDir, path.Base(packagePath))
if runtime.GOOS == "windows" {
executable = executable + ".exe"
executable += ".exe"
}
cmdArgs := append([]string{"build"}, args...)

View File

@ -8,7 +8,7 @@ import (
)
/*
PrefixedWriter wraps an io.Writer, emiting the passed in prefix at the beginning of each new line.
PrefixedWriter wraps an io.Writer, emitting the passed in prefix at the beginning of each new line.
This can be useful when running multiple gexec.Sessions concurrently - you can prefix the log output of each
session by passing in a PrefixedWriter:

View File

@ -38,7 +38,7 @@ A more comprehensive example is available at https://onsi.github.io/gomega/#_tes
})
Context("when requesting all sprockets", func() {
Context("when the response is succesful", func() {
Context("when the response is successful", func() {
BeforeEach(func() {
sprockets = []Sprocket{
NewSprocket("Alfalfa"),

View File

@ -1072,6 +1072,7 @@ var _ = Describe("TestServer", func() {
var received protobuf.SimpleMessage
body, err := ioutil.ReadAll(resp.Body)
Expect(err).ShouldNot(HaveOccurred())
err = proto.Unmarshal(body, &received)
Expect(err).ShouldNot(HaveOccurred())
})

View File

@ -24,7 +24,7 @@ import (
"github.com/onsi/gomega/types"
)
const GOMEGA_VERSION = "1.5.0"
const GOMEGA_VERSION = "1.6.0"
const nilFailHandlerPanic = `You are trying to make an assertion, but Gomega's fail handler is nil.
If you're using Ginkgo then you probably forgot to put your assertion in an It().
@ -155,7 +155,7 @@ func Expect(actual interface{}, extra ...interface{}) Assertion {
// ExpectWithOffset(1, "foo").To(Equal("foo"))
//
// Unlike `Expect` and `Ω`, `ExpectWithOffset` takes an additional integer argument
// this is used to modify the call-stack offset when computing line numbers.
// that is used to modify the call-stack offset when computing line numbers.
//
// This is most useful in helper functions that make assertions. If you want Gomega's
// error message to refer to the calling line in the test (as opposed to the line in the helper function)
@ -280,7 +280,7 @@ func SetDefaultEventuallyPollingInterval(t time.Duration) {
defaultEventuallyPollingInterval = t
}
// SetDefaultConsistentlyDuration sets the default duration for Consistently. Consistently will verify that your condition is satsified for this long.
// SetDefaultConsistentlyDuration sets the default duration for Consistently. Consistently will verify that your condition is satisfied for this long.
func SetDefaultConsistentlyDuration(t time.Duration) {
defaultConsistentlyDuration = t
}

View File

@ -41,12 +41,6 @@ var _ = Describe("MatchXMLMatcher", func() {
Expect(sample_09).ShouldNot(MatchXML(sample_10)) // same structures with different attribute values
Expect(sample_11).Should(MatchXML(sample_11)) // with non UTF-8 encoding
})
It("should work with byte arrays", func() {
Expect([]byte(sample_01)).Should(MatchXML([]byte(sample_01)))
Expect([]byte(sample_01)).Should(MatchXML(sample_01))
Expect(sample_01).Should(MatchXML([]byte(sample_01)))
})
})
Context("when the expected is not valid XML", func() {

View File

@ -265,7 +265,7 @@ var _ = Describe("ReceiveMatcher", func() {
Describe("when used with eventually and a custom matcher", func() {
It("should return the matcher's error when a failing value is received on the channel, instead of the must receive something failure", func() {
failures := InterceptGomegaFailures(func() {
c := make(chan string, 0)
c := make(chan string)
Eventually(c, 0.01).Should(Receive(Equal("hello")))
})
Expect(failures[0]).Should(ContainSubstring("When passed a matcher, ReceiveMatcher's channel *must* receive something."))

View File

@ -2,10 +2,11 @@ package matchers_test
import (
"errors"
"regexp"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
. "github.com/onsi/gomega/matchers"
"regexp"
)
func Erroring() error {

View File

@ -1,6 +1,5 @@
package bipartitegraph
import "errors"
import "fmt"
import . "github.com/onsi/gomega/matchers/support/goraph/node"
@ -28,7 +27,7 @@ func NewBipartiteGraph(leftValues, rightValues []interface{}, neighbours func(in
for j, rightValue := range rightValues {
neighbours, err := neighbours(leftValue, rightValue)
if err != nil {
return nil, errors.New(fmt.Sprintf("error determining adjacency for %v and %v: %s", leftValue, rightValue, err.Error()))
return nil, fmt.Errorf("error determining adjacency for %v and %v: %s", leftValue, rightValue, err.Error())
}
if neighbours {

View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,24 @@
CODEGEN_PKG ?= ./vendor/k8s.io/code-generator
all: test verify-deepcopy
update-deepcopy: ## Update the deepcopy generated code
./tools/update-deepcopy.sh
verify-deepcopy: ## Verify deepcopy generated code
VERIFY=--verify-only ./tools/update-deepcopy.sh
test: ## Run unit tests
go test -count=1 -short ./conditions/...
go test -count=1 -short ./objectreferences/...
help: ## Show this help screen
@echo 'Usage: make <OPTIONS> ... <TARGETS>'
@echo ''
@echo 'Available targets are:'
@echo ''
@grep -E '^[ a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | \
awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}'
@echo ''
.PHONY: update-deepcopy verify-deepcopy

View File

@ -0,0 +1,8 @@
reviewers:
- djzager
- rthallisey
- mhrivnak
approvers:
- djzager
- rthallisey
- mhrivnak

View File

@ -0,0 +1,25 @@
Custom Resource Status
======================
[![Go Report Card](https://goreportcard.com/badge/github.com/openshift/custom-resource-status)](https://goreportcard.com/report/github.com/openshift/custom-resource-status)
[![Go Doc](https://godoc.org/github.com/openshift/custom-resource-status?status.svg)](http://godoc.org/github.com/openshift/custom-resource-status)
[![GitHub Issues](https://img.shields.io/github/issues/openshift/custom-resource-status.svg)](https://github.com/openshift/custom-resource-status/issues)
[![Licensed under Apache License version 2.0](https://img.shields.io/github/license/openshift/custom-resource-status.svg?maxAge=2592000)](https://www.apache.org/licenses/LICENSE-2.0)
The purpose of this project is to provide some level of standardization and
best-practices with respect to managing the status of custom resources. This project
steals, err draws from:
* [Cluster Version Operator (CVO)](https://github.com/openshift/cluster-version-operator)
that manages essential OpenShift operators.
* [ClusterOperator Custom Resource](https://github.com/openshift/cluster-version-operator/blob/master/docs/dev/clusteroperator.md#what-should-an-operator-report-with-clusteroperator-custom-resource)
that exists for operators managed by CVO to communicate their status.
* [openshift/library-go ClusterOperator status helpers](https://github.com/openshift/library-go/blob/master/pkg/config/clusteroperator/v1helpers/status.go)
that makes it easy to manage the status on a ClusterOperator resource.
The goal here is to prescribe, without mandate, how to meaningfully populate the
status of the Custom Resources your operator manages. Types, constants, and
functions are provided for the following:
* [Conditions](conditions/README.md)
* [Object References](objectreferences/README.md)

View File

@ -0,0 +1,44 @@
Conditions
==========
Provides:
* `Condition` type as specified in the [Kubernetes API Conventions](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md)
* `ConditionType` and generally useful constants for this type (ie. "Available",
"Progressing", "Degraded", and "Upgradeable")
* Functions for setting, removing, finding, and evaluating conditions.
To use, simply add `Conditions` to your Custom Resource Status struct like:
```
// ExampleAppStatus defines the observed state of ExampleApp
type ExampleAppStatus struct {
...
// conditions describes the state of the operator's reconciliation functionality.
// +patchMergeKey=type
// +patchStrategy=merge
// +optional
// Conditions is a list of conditions related to operator reconciliation
Conditions []conditions.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"`
}
```
Then, as appropriate in your Reconcile function, use
`conditions.SetStatusConditions` like:
```
instance := &examplev1alpha1.ExampleApp{}
err := r.client.Get(context.TODO(), request.NamespacedName, instance)
...handle err
conditions.SetStatusCondition(&instance.Status.Conditions, conditions.Condition{
Type: conditions.ConditionAvailable,
Status: corev1.ConditionFalse,
Reason: "ReconcileStarted",
Message: "Reconciling resource"
})
// Update the status
err = r.client.Status().Update(context.TODO(), instance)
...handle err
```

View File

@ -0,0 +1,9 @@
// +k8s:deepcopy-gen=package,register
// +k8s:defaulter-gen=TypeMeta
// +k8s:openapi-gen=true
// Package v1 provides version v1 of the types and functions necessary to
// manage and inspect a slice of conditions. It is opinionated in the
// condition types provided but leaves it to the user to define additional
// types as necessary.
package v1

View File

@ -0,0 +1,82 @@
package v1
import (
"time"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// SetStatusCondition sets the corresponding condition in conditions to newCondition.
func SetStatusCondition(conditions *[]Condition, newCondition Condition) {
if conditions == nil {
conditions = &[]Condition{}
}
existingCondition := FindStatusCondition(*conditions, newCondition.Type)
if existingCondition == nil {
newCondition.LastTransitionTime = metav1.NewTime(time.Now())
newCondition.LastHeartbeatTime = metav1.NewTime(time.Now())
*conditions = append(*conditions, newCondition)
return
}
if existingCondition.Status != newCondition.Status {
existingCondition.Status = newCondition.Status
existingCondition.LastTransitionTime = metav1.NewTime(time.Now())
}
existingCondition.Reason = newCondition.Reason
existingCondition.Message = newCondition.Message
existingCondition.LastHeartbeatTime = metav1.NewTime(time.Now())
}
// RemoveStatusCondition removes the corresponding conditionType from conditions.
func RemoveStatusCondition(conditions *[]Condition, conditionType ConditionType) {
if conditions == nil {
return
}
newConditions := []Condition{}
for _, condition := range *conditions {
if condition.Type != conditionType {
newConditions = append(newConditions, condition)
}
}
*conditions = newConditions
}
// FindStatusCondition finds the conditionType in conditions.
func FindStatusCondition(conditions []Condition, conditionType ConditionType) *Condition {
for i := range conditions {
if conditions[i].Type == conditionType {
return &conditions[i]
}
}
return nil
}
// IsStatusConditionTrue returns true when the conditionType is present and set to `corev1.ConditionTrue`
func IsStatusConditionTrue(conditions []Condition, conditionType ConditionType) bool {
return IsStatusConditionPresentAndEqual(conditions, conditionType, corev1.ConditionTrue)
}
// IsStatusConditionFalse returns true when the conditionType is present and set to `corev1.ConditionFalse`
func IsStatusConditionFalse(conditions []Condition, conditionType ConditionType) bool {
return IsStatusConditionPresentAndEqual(conditions, conditionType, corev1.ConditionFalse)
}
// IsStatusConditionUnknown returns true when the conditionType is present and set to `corev1.ConditionUnknown`
func IsStatusConditionUnknown(conditions []Condition, conditionType ConditionType) bool {
return IsStatusConditionPresentAndEqual(conditions, conditionType, corev1.ConditionUnknown)
}
// IsStatusConditionPresentAndEqual returns true when conditionType is present and equal to status.
func IsStatusConditionPresentAndEqual(conditions []Condition, conditionType ConditionType, status corev1.ConditionStatus) bool {
for _, condition := range conditions {
if condition.Type == conditionType {
return condition.Status == status
}
}
return false
}

View File

@ -0,0 +1,216 @@
package v1
import (
"testing"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func TestSetStatusCondition(t *testing.T) {
testCases := []struct {
name string
testCondition Condition
startConditions *[]Condition
expectedConditions *[]Condition
}{
{
name: "add when empty",
testCondition: Condition{
Type: ConditionAvailable,
Status: "True",
Reason: "Testing",
Message: "Basic message",
},
startConditions: &[]Condition{},
expectedConditions: &[]Condition{
{
Type: ConditionAvailable,
Status: "True",
Reason: "Testing",
Message: "Basic message",
},
},
},
{
name: "add to conditions",
testCondition: Condition{
Type: ConditionAvailable,
Status: "True",
Reason: "TestingAvailableTrue",
Message: "Available condition true",
},
startConditions: &[]Condition{
{
Type: ConditionDegraded,
Status: "False",
Reason: "TestingDegradedFalse",
Message: "Degraded condition false",
LastHeartbeatTime: metav1.NewTime(time.Now()),
},
},
expectedConditions: &[]Condition{
{
Type: ConditionAvailable,
Status: "True",
Reason: "TestingAvailableTrue",
Message: "Available condition true",
},
{
Type: ConditionDegraded,
Status: "False",
Reason: "TestingDegradedFalse",
Message: "Degraded condition false",
},
},
},
{
name: "replace condition",
testCondition: Condition{
Type: ConditionDegraded,
Status: "True",
Reason: "TestingDegradedTrue",
Message: "Degraded condition true",
},
startConditions: &[]Condition{
{
Type: ConditionDegraded,
Status: "False",
Reason: "TestingDegradedFalse",
Message: "Degraded condition false",
},
},
expectedConditions: &[]Condition{
{
Type: ConditionDegraded,
Status: "True",
Reason: "TestingDegradedTrue",
Message: "Degraded condition true",
},
},
},
{
name: "last heartbeat",
testCondition: Condition{
Type: ConditionDegraded,
Status: "True",
Reason: "TestingDegradedTrue",
Message: "Degraded condition true",
},
startConditions: &[]Condition{
{
Type: ConditionDegraded,
Status: "True",
Reason: "TestingDegradedFalse",
Message: "Degraded condition false",
},
},
expectedConditions: &[]Condition{
{
Type: ConditionDegraded,
Status: "True",
Reason: "TestingDegradedTrue",
Message: "Degraded condition true",
},
},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
SetStatusCondition(tc.startConditions, tc.testCondition)
compareConditions(t, tc.startConditions, tc.expectedConditions)
})
}
return
}
func TestRemoveStatusCondition(t *testing.T) {
testCases := []struct {
name string
testConditionType ConditionType
startConditions *[]Condition
expectedConditions *[]Condition
}{
{
name: "remove when empty",
testConditionType: ConditionAvailable,
startConditions: &[]Condition{},
expectedConditions: &[]Condition{},
},
{
name: "basic remove",
testConditionType: ConditionAvailable,
startConditions: &[]Condition{
{
Type: ConditionAvailable,
Status: "True",
Reason: "TestingAvailableTrue",
Message: "Available condition true",
LastHeartbeatTime: metav1.NewTime(time.Now()),
},
{
Type: ConditionDegraded,
Status: "False",
Reason: "TestingDegradedFalse",
Message: "Degraded condition false",
LastHeartbeatTime: metav1.NewTime(time.Now()),
},
},
expectedConditions: &[]Condition{
{
Type: ConditionDegraded,
Status: "False",
Reason: "TestingDegradedFalse",
Message: "Degraded condition false",
},
},
},
{
name: "remove last condition",
testConditionType: ConditionAvailable,
startConditions: &[]Condition{
{
Type: ConditionAvailable,
Status: "True",
Reason: "TestingAvailableTrue",
Message: "Available condition true",
},
},
expectedConditions: &[]Condition{},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
RemoveStatusCondition(tc.startConditions, tc.testConditionType)
compareConditions(t, tc.startConditions, tc.expectedConditions)
})
}
return
}
func compareConditions(t *testing.T, gotConditions *[]Condition, expectedConditions *[]Condition) {
for _, expectedCondition := range *expectedConditions {
testCondition := FindStatusCondition(*gotConditions, expectedCondition.Type)
if testCondition == nil {
t.Errorf("Condition type '%v' not found in '%v'", expectedCondition.Type, *gotConditions)
}
if testCondition.Status != expectedCondition.Status {
t.Errorf("Unexpected status '%v', expected '%v'", testCondition.Status, expectedCondition.Status)
}
if testCondition.Message != expectedCondition.Message {
t.Errorf("Unexpected message '%v', expected '%v'", testCondition.Message, expectedCondition.Message)
}
// Test for lastHeartbeatTime
if testCondition.LastHeartbeatTime.IsZero() {
t.Error("lastHeartbeatTime should never be zero")
}
timeNow := metav1.NewTime(time.Now())
if timeNow.Before(&testCondition.LastHeartbeatTime) {
t.Errorf("Unexpected lastHeartbeatTime '%v', should be before '%v'", testCondition.LastHeartbeatTime, timeNow)
}
}
}

View File

@ -0,0 +1,51 @@
package v1
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// Condition represents the state of the operator's
// reconciliation functionality.
// +k8s:deepcopy-gen=true
type Condition struct {
Type ConditionType `json:"type" description:"type of condition ie. Available|Progressing|Degraded."`
Status corev1.ConditionStatus `json:"status" description:"status of the condition, one of True, False, Unknown"`
// +optional
Reason string `json:"reason,omitempty" description:"one-word CamelCase reason for the condition's last transition"`
// +optional
Message string `json:"message,omitempty" description:"human-readable message indicating details about last transition"`
// +optional
LastHeartbeatTime metav1.Time `json:"lastHeartbeatTime" description:"last time we got an update on a given condition"`
// +optional
LastTransitionTime metav1.Time `json:"lastTransitionTime" description:"last time the condition transit from one status to another"`
}
// ConditionType is the state of the operator's reconciliation functionality.
type ConditionType string
const (
// ConditionAvailable indicates that the resources maintained by the operator,
// is functional and available in the cluster.
ConditionAvailable ConditionType = "Available"
// ConditionProgressing indicates that the operator is actively making changes to the resources maintained by the
// operator
ConditionProgressing ConditionType = "Progressing"
// ConditionDegraded indicates that the resources maintained by the operator are not functioning completely.
// An example of a degraded state would be if not all pods in a deployment were running.
// It may still be available, but it is degraded
ConditionDegraded ConditionType = "Degraded"
// ConditionUpgradeable indicates whether the resources maintained by the operator are in a state that is safe to upgrade.
// When `False`, the resources maintained by the operator should not be upgraded and the
// message field should contain a human readable description of what the administrator should do to
// allow the operator to successfully update the resources maintained by the operator.
ConditionUpgradeable ConditionType = "Upgradeable"
)

View File

@ -0,0 +1,23 @@
// +build !ignore_autogenerated
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Condition) DeepCopyInto(out *Condition) {
*out = *in
in.LastHeartbeatTime.DeepCopyInto(&out.LastHeartbeatTime)
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Condition.
func (in *Condition) DeepCopy() *Condition {
if in == nil {
return nil
}
out := new(Condition)
in.DeepCopyInto(out)
return out
}

View File

@ -0,0 +1,10 @@
module github.com/openshift/custom-resource-status
go 1.12
require (
github.com/onsi/gomega v1.5.0
k8s.io/api v0.0.0-20190725062911-6607c48751ae
k8s.io/apimachinery v0.0.0-20190719140911-bfcf53abc9f8
k8s.io/code-generator v0.0.0-20190717022600-77f3a1fe56bb
)

View File

@ -0,0 +1,143 @@
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY=
github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/gogo/protobuf v1.0.0 h1:2jyBKDKU/8v3v2xVR2PtiWQviFUyiaGk2rpfyFT8rTM=
github.com/gogo/protobuf v1.0.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gnostic v0.0.0-20170426233943-68f4ded48ba9/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo=
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M=
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495 h1:I6A9Ag9FpEKOjcKrRNjQkPHawoXIhKyTGfvvjFAiiAk=
golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 h1:dfGZHvZk057jK2MCeWus/TowKpJ8y4AmooUzdBSR9GU=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59 h1:QjA/9ArTfVTLfEhClDCG7SGrZkZixxWpwNCDiwJfh88=
golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485 h1:OB/uP/Puiu5vS5QMRPrXCDWUPb+kt8f1KW8oQzFejQw=
gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0=
gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e h1:jRyg0XfpwWlhEV8mDfdNGBeSJM2fuyh9Yjrnd8kF2Ts=
gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/inf.v0 v0.9.0 h1:3zYtXIO92bvsdS3ggAdA8Gb4Azj0YU+TVY1uGYNFA8o=
gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
k8s.io/api v0.0.0-20190725062911-6607c48751ae h1:La/F8nlqpe1mOXWX22I+Ce8wfQOfXcymYZofbDgmjyo=
k8s.io/api v0.0.0-20190725062911-6607c48751ae/go.mod h1:1O0xzX/RAtnm7l+5VEUxZ1ysO2ghatfq/OZED4zM9kA=
k8s.io/apimachinery v0.0.0-20190719140911-bfcf53abc9f8 h1:fVMoqaOPZ6KTeszBSBO8buFmXaR2JlnMn53eEBeganU=
k8s.io/apimachinery v0.0.0-20190719140911-bfcf53abc9f8/go.mod h1:sBJWIJZfxLhp7mRsRyuAE/NfKTr3kXGR1iaqg8O0gJo=
k8s.io/code-generator v0.0.0-20190717022600-77f3a1fe56bb h1:hfslhgotToortpUcX3HrgGkwp/XmuhTv9SryRLzHkmY=
k8s.io/code-generator v0.0.0-20190717022600-77f3a1fe56bb/go.mod h1:cDx5jQmWH25Ff74daM7NVYty9JWw9dvIS9zT9eIubCY=
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6 h1:4s3/R4+OYYYUKptXPhZKjQ04WJ6EhQQVFdjOFvCazDk=
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
k8s.io/klog v0.3.1 h1:RVgyDHY/kFKtLqh67NvEWIgkMneNoIrdkN0CxDSQc68=
k8s.io/klog v0.3.1/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
k8s.io/kube-openapi v0.0.0-20190709113604-33be087ad058/go.mod h1:nfDlWeOsu3pUf4yWGL+ERqohP4YsZcBJXWMK+gkzOA4=
modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw=
modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk=
modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k=
modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs=
modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I=
sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI=
sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=

View File

@ -0,0 +1,49 @@
Object References
=================
The `ObjectReference` type is provided by Kubernetes Core API
`"k8s.io/api/core/v1"` but the functions to set and find an `ObjectReference`
are provided in this package. This is useful if you would like
to include in the Status of your Custom Resource a list of objects
that are managed by your operator (ie. Deployments, Services, other
Custom Resources, etc.).
For example, we can add `RelatedObjects` to our Status struct:
```
// ExampleAppStatus defines the observed state of ExampleApp
type ExampleAppStatus struct {
...
// RelatedObjects is a list of objects that are "interesting" or related to this operator.
RelatedObjects []corev1.ObjectReference `json:"relatedObjects,omitempty"`
}
```
Then, through Reconcile, when an object we manage has been found we can add it to
the `RelatedObjects` slice.
```
found := &someAPI.SomeObject{}
err := r.client.Get(context.TODO(), types.NamespacedName{Name: object.Name, Namespace: object.Namespace}, found)
...handle err
// Add it to the list of RelatedObjects if found
// import "k8s.io/client-go/tools/reference"
objectRef, err := reference.GetReference(r.scheme, found)
if err != nil {
return err
}
objectreferencesv1.SetObjectReference(&instance.Status.RelatedObjects, *objectRef)
// Update the status
err = r.client.Status().Update(context.TODO(), instance)
...handle err
```
**NOTE**: This package specifies a minimum for what constitutes a valid object
reference. The minimum valid object reference consists of non-empty strings
for the object's:
* APIVersion
* Kind
* Name

View File

@ -0,0 +1,9 @@
// +k8s:deepcopy-gen=package,register
// +k8s:defaulter-gen=TypeMeta
// +k8s:openapi-gen=true
// Package v1 provides version v1 of the functions necessary to
// manage and inspect a slice of object references. This can be
// used to add a RelatedObjects field on the status of your custom
// resource, adding objects that your operator manages to the status.
package v1

View File

@ -0,0 +1,108 @@
package v1
import (
"errors"
corev1 "k8s.io/api/core/v1"
)
var errMinObjectRef = errors.New("object reference must have, at a minimum: apiVersion, kind, and name")
// SetObjectReference - updates list of object references based on newObject
func SetObjectReference(objects *[]corev1.ObjectReference, newObject corev1.ObjectReference) error {
if !minObjectReference(newObject) {
return errMinObjectRef
}
if objects == nil {
objects = &[]corev1.ObjectReference{}
}
existingObject, err := FindObjectReference(*objects, newObject)
if err != nil {
return err
}
if existingObject == nil { // add it to the slice
*objects = append(*objects, newObject)
} else { // update found reference
*existingObject = newObject
}
return nil
}
// RemoveObjectReference - updates list of object references to remove rmObject
func RemoveObjectReference(objects *[]corev1.ObjectReference, rmObject corev1.ObjectReference) error {
if !minObjectReference(rmObject) {
return errMinObjectRef
}
if objects == nil {
return nil
}
newObjectReferences := []corev1.ObjectReference{}
// TODO: this is incredibly inefficient. If the performance hit becomes a
// problem this should be improved.
for _, object := range *objects {
if !ObjectReferenceEqual(object, rmObject) {
newObjectReferences = append(newObjectReferences, object)
}
}
*objects = newObjectReferences
return nil
}
// FindObjectReference - finds the first ObjectReference in a slice of objects
// matching find.
func FindObjectReference(objects []corev1.ObjectReference, find corev1.ObjectReference) (*corev1.ObjectReference, error) {
if !minObjectReference(find) {
return nil, errMinObjectRef
}
for i := range objects {
if ObjectReferenceEqual(find, objects[i]) {
return &objects[i], nil
}
}
return nil, nil
}
// ObjectReferenceEqual - compares gotRef to expectedRef
// preference order: APIVersion, Kind, Name, and Namespace
// if either gotRef or expectedRef fail minObjectReference test, this function
// will simply return false
func ObjectReferenceEqual(gotRef, expectedRef corev1.ObjectReference) bool {
if !minObjectReference(gotRef) || !minObjectReference(expectedRef) {
return false
}
if gotRef.APIVersion != expectedRef.APIVersion {
return false
}
if gotRef.Kind != expectedRef.Kind {
return false
}
if gotRef.Name != expectedRef.Name {
return false
}
if expectedRef.Namespace != "" && (gotRef.Namespace != expectedRef.Namespace) {
return false
}
return true
}
// in order to have any meaningful semantics on this we need to
// ensuer that some minimal amount of information is provided in
// the object reference
func minObjectReference(objRef corev1.ObjectReference) bool {
if objRef.APIVersion == "" {
return false
}
if objRef.Kind == "" {
return false
}
if objRef.Name == "" {
return false
}
return true
}

View File

@ -0,0 +1,364 @@
package v1
import (
"testing"
"k8s.io/apimachinery/pkg/api/equality"
corev1 "k8s.io/api/core/v1"
)
func TestSetObjectReference(t *testing.T) {
testCases := []struct {
name string
testRef corev1.ObjectReference
startRefs *[]corev1.ObjectReference
expectedRefs *[]corev1.ObjectReference
shouldError bool
}{
{
name: "add when empty",
testRef: corev1.ObjectReference{
Kind: "FooKind",
Namespace: "test-namespace",
Name: "foo",
APIVersion: "test.example.io",
},
startRefs: &[]corev1.ObjectReference{},
expectedRefs: &[]corev1.ObjectReference{
{
Kind: "FooKind",
Namespace: "test-namespace",
Name: "foo",
APIVersion: "test.example.io",
},
},
shouldError: false,
},
{
name: "simple add",
testRef: corev1.ObjectReference{
Kind: "FooKind",
Namespace: "test-namespace",
Name: "foo",
APIVersion: "test.example.io",
},
startRefs: &[]corev1.ObjectReference{
{
Kind: "BarKind",
Namespace: "test-namespace",
Name: "bar",
APIVersion: "test.example.io",
},
},
expectedRefs: &[]corev1.ObjectReference{
{
Kind: "BarKind",
Namespace: "test-namespace",
Name: "bar",
APIVersion: "test.example.io",
},
{
Kind: "FooKind",
Namespace: "test-namespace",
Name: "foo",
APIVersion: "test.example.io",
},
},
shouldError: false,
},
{
name: "replace reference",
testRef: corev1.ObjectReference{
Kind: "FooKind",
Namespace: "test-namespace",
Name: "foo",
APIVersion: "test.example.io",
UID: "fooid",
},
startRefs: &[]corev1.ObjectReference{
{
Kind: "FooKind",
Namespace: "test-namespace",
Name: "foo",
APIVersion: "test.example.io",
},
{
Kind: "BarKind",
Namespace: "test-namespace",
Name: "bar",
APIVersion: "test.example.io",
},
},
expectedRefs: &[]corev1.ObjectReference{
{
Kind: "FooKind",
Namespace: "test-namespace",
Name: "foo",
APIVersion: "test.example.io",
UID: "fooid",
},
{
Kind: "BarKind",
Namespace: "test-namespace",
Name: "bar",
APIVersion: "test.example.io",
},
},
shouldError: false,
},
{
name: "error on newObject not minObjectReference",
testRef: corev1.ObjectReference{
Kind: "FooKind",
APIVersion: "test.example.io",
},
startRefs: &[]corev1.ObjectReference{},
expectedRefs: &[]corev1.ObjectReference{},
shouldError: true,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
err := SetObjectReference(tc.startRefs, tc.testRef)
if err != nil && !tc.shouldError {
t.Fatalf("Error occurred unexpectedly: %v", err)
}
if err != nil && tc.shouldError {
return
}
if !equality.Semantic.DeepEqual(*tc.startRefs, *tc.expectedRefs) {
t.Errorf("Unexpected object refs '%v', expected '%v'", tc.startRefs, tc.expectedRefs)
}
})
}
return
}
func TestRemoveObjectReference(t *testing.T) {
testCases := []struct {
name string
testRef corev1.ObjectReference
startRefs *[]corev1.ObjectReference
expectedRefs *[]corev1.ObjectReference
shouldError bool
}{
{
name: "remove when empty",
testRef: corev1.ObjectReference{
Kind: "FooKind",
Namespace: "test-namespace",
Name: "foo",
APIVersion: "test.example.io",
},
startRefs: &[]corev1.ObjectReference{},
expectedRefs: &[]corev1.ObjectReference{},
shouldError: false,
},
{
name: "simple remove",
testRef: corev1.ObjectReference{
Kind: "FooKind",
Namespace: "test-namespace",
Name: "foo",
APIVersion: "test.example.io",
},
startRefs: &[]corev1.ObjectReference{
{
Kind: "FooKind",
Namespace: "test-namespace",
Name: "foo",
APIVersion: "test.example.io",
},
{
Kind: "BarKind",
Namespace: "test-namespace",
Name: "bar",
APIVersion: "test.example.io",
},
},
expectedRefs: &[]corev1.ObjectReference{
{
Kind: "BarKind",
Namespace: "test-namespace",
Name: "bar",
APIVersion: "test.example.io",
},
},
shouldError: false,
},
{
name: "remove last",
testRef: corev1.ObjectReference{
Kind: "FooKind",
Namespace: "test-namespace",
Name: "foo",
APIVersion: "test.example.io",
},
startRefs: &[]corev1.ObjectReference{
{
Kind: "FooKind",
Namespace: "test-namespace",
Name: "foo",
APIVersion: "test.example.io",
},
},
expectedRefs: &[]corev1.ObjectReference{},
shouldError: false,
},
{
// Not sure if this is possible by using SetObjectReference
// but testing this anyway
name: "remove matching",
testRef: corev1.ObjectReference{
Kind: "FooKind",
Namespace: "test-namespace",
Name: "foo",
APIVersion: "test.example.io",
},
startRefs: &[]corev1.ObjectReference{
{
Kind: "FooKind",
Namespace: "test-namespace",
Name: "foo",
APIVersion: "test.example.io",
},
{
Kind: "BarKind",
Namespace: "test-namespace",
Name: "bar",
APIVersion: "test.example.io",
},
{
Kind: "FooKind",
Namespace: "test-namespace",
Name: "foo",
APIVersion: "test.example.io",
UID: "myuid",
},
},
expectedRefs: &[]corev1.ObjectReference{
{
Kind: "BarKind",
Namespace: "test-namespace",
Name: "bar",
APIVersion: "test.example.io",
},
},
shouldError: false,
},
{
name: "error on rmObject not minObjectReference",
testRef: corev1.ObjectReference{
Kind: "FooKind",
APIVersion: "test.example.io",
},
startRefs: &[]corev1.ObjectReference{
{
Kind: "FooKind",
Namespace: "test-namespace",
Name: "foo",
APIVersion: "test.example.io",
},
{
Kind: "BarKind",
Namespace: "test-namespace",
Name: "bar",
APIVersion: "test.example.io",
},
},
expectedRefs: &[]corev1.ObjectReference{},
shouldError: true,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
err := RemoveObjectReference(tc.startRefs, tc.testRef)
if err != nil && !tc.shouldError {
t.Fatalf("Error occurred unexpectedly: %v", err)
}
if err != nil && tc.shouldError {
return
}
if !equality.Semantic.DeepEqual(*tc.startRefs, *tc.expectedRefs) {
t.Errorf("Unexpected object refs '%v', expected '%v'", tc.startRefs, tc.expectedRefs)
}
})
}
return
}
func TestFindObjectReference(t *testing.T) {
testCases := []struct {
name string
testRef corev1.ObjectReference
startRefs *[]corev1.ObjectReference
expectedRef *corev1.ObjectReference
shouldError bool
}{
{
name: "simple find",
testRef: corev1.ObjectReference{
Kind: "FooKind",
Namespace: "test-namespace",
Name: "foo",
APIVersion: "test.example.io",
},
startRefs: &[]corev1.ObjectReference{
{
Kind: "FooKind",
Namespace: "test-namespace",
Name: "foo",
APIVersion: "test.example.io",
},
},
expectedRef: &corev1.ObjectReference{
Kind: "FooKind",
Namespace: "test-namespace",
Name: "foo",
APIVersion: "test.example.io",
},
shouldError: false,
},
{
name: "find when empty",
testRef: corev1.ObjectReference{
Kind: "FooKind",
Namespace: "test-namespace",
Name: "foo",
APIVersion: "test.example.io",
},
startRefs: &[]corev1.ObjectReference{},
expectedRef: nil,
shouldError: false,
},
{
name: "err when not minimal object reference",
testRef: corev1.ObjectReference{
Kind: "FooKind",
APIVersion: "test.example.io",
},
startRefs: &[]corev1.ObjectReference{},
expectedRef: nil,
shouldError: true,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
foundRef, err := FindObjectReference(*tc.startRefs, tc.testRef)
if err != nil && !tc.shouldError {
t.Fatalf("Error occurred unexpectedly: %v", err)
}
if err != nil && tc.shouldError {
return
}
if !equality.Semantic.DeepEqual(foundRef, tc.expectedRef) {
t.Errorf("Unexpected object ref '%v', expected '%v'", foundRef, tc.expectedRef)
}
})
}
return
}

View File

@ -0,0 +1,50 @@
package testlib
import (
"fmt"
gomegatypes "github.com/onsi/gomega/types"
conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1"
)
// RepresentCondition - returns a GomegaMatcher useful for comparing conditions
func RepresentCondition(expected conditionsv1.Condition) gomegatypes.GomegaMatcher {
return &representConditionMatcher{
expected: expected,
}
}
type representConditionMatcher struct {
expected conditionsv1.Condition
}
// Match - compares two conditions
// two conditions are the same if they have the same type, status, reason, and message
func (matcher *representConditionMatcher) Match(actual interface{}) (success bool, err error) {
actualCondition, ok := actual.(conditionsv1.Condition)
if !ok {
return false, fmt.Errorf("RepresentConditionMatcher expects a Condition")
}
if matcher.expected.Type != actualCondition.Type {
return false, nil
}
if matcher.expected.Status != actualCondition.Status {
return false, nil
}
if matcher.expected.Reason != actualCondition.Reason {
return false, nil
}
if matcher.expected.Message != actualCondition.Message {
return false, nil
}
return true, nil
}
func (matcher *representConditionMatcher) FailureMessage(actual interface{}) (message string) {
return fmt.Sprintf("Expected\n\t%#v\nto match the condition\n\t%#v", actual, matcher.expected)
}
func (matcher *representConditionMatcher) NegatedFailureMessage(actual interface{}) (message string) {
return fmt.Sprintf("Expected\n\t%#v\nnot to match the condition\n\t%#v", actual, matcher.expected)
}

View File

View File

@ -0,0 +1,4 @@
// Package tools imports things required by build scripts
package tools
import _ "k8s.io/code-generator" // simply to force `go mod` to see them as dependencies

View File

@ -0,0 +1,14 @@
#!/bin/bash
SCRIPT_ROOT=$(dirname ${BASH_SOURCE})/..
CODEGEN_PKG=${CODEGEN_PKG:-$(cd ${SCRIPT_ROOT}; ls -d -1 ./vendor/k8s.io/code-generator 2>/dev/null || echo ../../../k8s.io/code-generator)}
verify="${VERIFY:-}"
bash ${CODEGEN_PKG}/generate-groups.sh "deepcopy" \
github.com/openshift/custom-resource-status/generated \
github.com/openshift/custom-resource-status \
"conditions:v1" \
"objectreferences:v1" \
--go-header-file ${SCRIPT_ROOT}/tools/empty.txt \
${verify}

View File

@ -0,0 +1,2 @@
.git/objects
./bin/*

View File

@ -0,0 +1,452 @@
# Created by .ignore support plugin (hsz.mobi)
### Emacs template
# -*- mode: gitignore; -*-
*~
\#*\#
/.emacs.desktop
/.emacs.desktop.lock
*.elc
auto-save-list
tramp
.\#*
# Org-mode
.org-id-locations
*_archive
# flymake-mode
*_flymake.*
# eshell files
/eshell/history
/eshell/lastdir
# elpa packages
/elpa/
# reftex files
*.rel
# AUCTeX auto folder
/auto/
# cask packages
.cask/
dist/
# Flycheck
flycheck_*.el
# server auth directory
/server/
# projectiles files
.projectile
# directory configuration
.dir-locals.el
### JetBrains template
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and Webstorm
# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
.idea/
# User-specific stuff:
.idea/**/workspace.xml
.idea/**/tasks.xml
.idea/dictionaries
# Sensitive or high-churn files:
.idea/**/dataSources/
.idea/**/dataSources.ids
.idea/**/dataSources.xml
.idea/**/dataSources.local.xml
.idea/**/sqlDataSources.xml
.idea/**/dynamic.xml
.idea/**/uiDesigner.xml
# Gradle:
.idea/**/gradle.xml
.idea/**/libraries
# CMake
cmake-build-debug/
# Mongo Explorer plugin:
.idea/**/mongoSettings.xml
## File-based project format:
*.iws
## Plugin-specific files:
# IntelliJ
out/
# mpeltonen/sbt-idea plugin
.idea_modules/
# JIRA plugin
atlassian-ide-plugin.xml
# Cursive Clojure plugin
.idea/replstate.xml
# Crashlytics plugin (for Android Studio and IntelliJ)
com_crashlytics_export_strings.xml
crashlytics.properties
crashlytics-build.properties
fabric.properties
### macOS template
# General
*.DS_Store
.AppleDouble
.LSOverride
# Icon must end with two \r
Icon
# Thumbnails
._*
# Files that might appear in the root of a volume
.DocumentRevisions-V100
.fseventsd
.Spotlight-V100
.TemporaryItems
.Trashes
.VolumeIcon.icns
.com.apple.timemachine.donotpresent
# Directories potentially created on remote AFP share
.AppleDB
.AppleDesktop
Network Trash Folder
Temporary Items
.apdisk
### Go template
# Binaries for programs and plugins
*.exe
*.dll
*.so
*.dylib
# Test binary, build with `go test -c`
*.test
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736
.glide/
### Vim template
# Swap
[._]*.s[a-v][a-z]
[._]*.sw[a-p]
[._]s[a-v][a-z]
[._]sw[a-p]
# Session
Session.vim
# Temporary
.netrwhist
*~
# Auto-generated tag files
tags
.idea/alm.iml
.idea/modules.xml
.idea/workspace.xml
### VisualStudioCode ###
.vscode/*
.history
### VisualStudio ###
## Ignore Visual Studio temporary files, build results, and
## files generated by popular Visual Studio add-ons.
##
## Get latest from https://github.com/github/gitignore/blob/master/VisualStudio.gitignore
# User-specific files
*.suo
*.user
*.userosscache
*.sln.docstates
# User-specific files (MonoDevelop/Xamarin Studio)
*.userprefs
# Build results
[Dd]ebug/
[Dd]ebugPublic/
[Rr]elease/
[Rr]eleases/
x64/
x86/
bld/
[Bb]in/
[Oo]bj/
# [Ll]og/ TODO: Remove this rule
# Visual Studio 2015 cache/options directory
.vs/
# Uncomment if you have tasks that create the project's static files in wwwroot
#wwwroot/
# MSTest test Results
[Tt]est[Rr]esult*/
[Bb]uild[Ll]og.*
# NUNIT
*.VisualState.xml
TestResult.xml
# Build Results of an ATL Project
[Dd]ebugPS/
[Rr]eleasePS/
dlldata.c
# .NET Core
project.lock.json
project.fragment.lock.json
artifacts/
**/Properties/launchSettings.json
*_i.c
*_p.c
*_i.h
*.ilk
*.meta
*.obj
*.pch
*.pdb
*.pgc
*.pgd
*.rsp
*.sbr
*.tlb
*.tli
*.tlh
*.tmp
*.tmp_proj
*.log
*.vspscc
*.vssscc
.builds
*.pidb
*.svclog
*.scc
# Chutzpah Test files
_Chutzpah*
# Visual C++ cache files
ipch/
*.aps
*.ncb
*.opendb
*.opensdf
*.sdf
*.cachefile
*.VC.db
*.VC.VC.opendb
# Visual Studio profiler
*.psess
*.vsp
*.vspx
*.sap
# TFS 2012 Local Workspace
$tf/
# Guidance Automation Toolkit
*.gpState
# ReSharper is a .NET coding add-in
_ReSharper*/
*.[Rr]e[Ss]harper
*.DotSettings.user
# JustCode is a .NET coding add-in
.JustCode
# TeamCity is a build add-in
_TeamCity*
# DotCover is a Code Coverage Tool
*.dotCover
# Visual Studio code coverage results
*.coverage
*.coveragexml
# NCrunch
_NCrunch_*
.*crunch*.local.xml
nCrunchTemp_*
# MightyMoose
*.mm.*
AutoTest.Net/
# Web workbench (sass)
.sass-cache/
# Installshield output folder
[Ee]xpress/
# DocProject is a documentation generator add-in
DocProject/buildhelp/
DocProject/Help/*.HxT
DocProject/Help/*.HxC
DocProject/Help/*.hhc
DocProject/Help/*.hhk
DocProject/Help/*.hhp
DocProject/Help/Html2
DocProject/Help/html
# Click-Once directory
publish/
# Publish Web Output
*.[Pp]ublish.xml
*.azurePubxml
# TODO: Uncomment the next line to ignore your web deploy settings.
# By default, sensitive information, such as encrypted password
# should be stored in the .pubxml.user file.
#*.pubxml
*.pubxml.user
*.publishproj
# Microsoft Azure Web App publish settings. Comment the next line if you want to
# checkin your Azure Web App publish settings, but sensitive information contained
# in these scripts will be unencrypted
PublishScripts/
# Microsoft Azure Build Output
csx/
*.build.csdef
# Microsoft Azure Emulator
ecf/
rcf/
# Windows Store app package directories and files
AppPackages/
BundleArtifacts/
Package.StoreAssociation.xml
_pkginfo.txt
# Visual Studio cache files
# files ending in .cache can be ignored
*.[Cc]ache
# but keep track of directories ending in .cache
!*.[Cc]ache/
# Others
ClientBin/
~$*
*~
*.dbmdl
*.dbproj.schemaview
*.jfm
*.pfx
*.publishsettings
orleans.codegen.cs
# Since there are multiple workflows, uncomment next line to ignore bower_components
# (https://github.com/github/gitignore/pull/1529#issuecomment-104372622)
#bower_components/
# RIA/Silverlight projects
Generated_Code/
# Backup & report files from converting an old project file
# to a newer Visual Studio version. Backup files are not needed,
# because we have git ;-)
_UpgradeReport_Files/
Backup*/
UpgradeLog*.XML
UpgradeLog*.htm
# SQL Server files
*.mdf
*.ldf
*.ndf
# Business Intelligence projects
*.rdl.data
*.bim.layout
*.bim_*.settings
# Microsoft Fakes
FakesAssemblies/
# GhostDoc plugin setting file
*.GhostDoc.xml
# Node.js Tools for Visual Studio
.ntvs_analysis.dat
node_modules/
# Typescript v1 declaration files
typings/
# Visual Studio 6 build log
*.plg
# Visual Studio 6 workspace options file
*.opt
# Visual Studio 6 auto-generated workspace file (contains which files were open etc.)
*.vbw
# Visual Studio LightSwitch build output
**/*.HTMLClient/GeneratedArtifacts
**/*.DesktopClient/GeneratedArtifacts
**/*.DesktopClient/ModelManifest.xml
**/*.Server/GeneratedArtifacts
**/*.Server/ModelManifest.xml
_Pvt_Extensions
# Paket dependency manager
.paket/paket.exe
paket-files/
# FAKE - F# Make
.fake/
# JetBrains Rider
.idea/
*.sln.iml
# CodeRush
.cr/
# Python Tools for Visual Studio (PTVS)
__pycache__/
*.pyc
# Cake - Uncomment if you are using it
# tools/**
# !tools/packages.config
# Telerik's JustMock configuration file
*.jmconfig
# BizTalk build output
*.btp.cs
*.btm.cs
*.odx.cs
*.xsd.cs
# project-specific
*.bak
**/resources/*
/build
/apiserver.local.config
e2e.namespace
minikube.kubeconfig

View File

@ -0,0 +1,120 @@
local utils = import '.gitlab-ci/utils.libsonnet';
local vars = import '.gitlab-ci/vars.libsonnet';
local baseJob = import '.gitlab-ci/base_jobs.libsonnet';
local k8s = utils.k8s;
local mergeJob = utils.ci.mergeJob;
local images = vars.images;
local docker = utils.docker;
local stages_list = [
// gitlab-ci stages
'docker_base',
'docker_build',
'deploy_preview',
'test_setup',
'tests',
'test_teardown',
'integration',
'docker_release',
'deploy_staging',
'teardown',
];
local stages = utils.set(stages_list);
// List CI jobs
local jobs = {
'container-base-build': baseJob.dockerBuild {
stage: stages.docker_base,
script: docker.build_and_push(images.base.name,
cache=false,
args={ sshkey: vars.deploy_keys.operator_client },
extra_opts=["-f base.Dockerfile"]),
only: ["schedules", "tags"],
},
'container-build': baseJob.dockerBuild {
// Build and push the olm container.
// Docker Tag is the branch/tag name
stage: stages.docker_build,
before_script+: [
"mkdir -p $PWD/bin",
],
// builds a single multistage dockerfile and tags images based on labels
// on the intermediate builds
script: docker.multibuild_and_push("upstream.Dockerfile", labelImageMap={
'builder': images.ci.olm.name,
'olm': images.prerelease.olm.name,
'e2e': images.e2e.name,
}),
only: ['master', 'tags'],
},
'container-release': baseJob.dockerBuild {
// ! Only master
// push the container to the 'prod' repository
stage: stages.docker_release,
before_script+: ["mkdir -p $PWD/bin"],
script:
docker.rename(images.prerelease.olm.name, images.release.olm.name) +
docker.rename(images.e2e.name, images.e2elatest.name),
only: ['master'],
},
'tag-release': baseJob.dockerBuild {
// ! Only tags
// push the container to the 'prod' repository
stage: stages.docker_release,
before_script+: ["mkdir -p $PWD/bin"],
script:
docker.rename(images.prerelease.olm.name, images.tag.olm.name) +
docker.rename(images.e2e.name, images.e2elatest.name),
only: ['tags'],
},
"deploy-staging": baseJob.Deploy {
local _vars = self.localvars,
localvars+:: {
image: images.release,
domain: "alm-staging.k8s.devtable.com",
namespace: "ci-alm-staging",
channel: "staging",
helm_opts: ["--force"],
kubeconfig: "$CD_KUBECONFIG",
},
stage: stages.deploy_staging,
script+: [],
environment+: {
name: "staging",
},
only: ['master'],
},
"deploy-openshift": baseJob.Deploy {
local _vars = self.localvars,
localvars+:: {
image: images.release,
domain: "console.apps.ui-preserve.origin-gce.dev.openshift.com",
namespace: "operator-lifecycle-manager",
channel: "staging",
helm_opts: ["--force"],
kubeconfig: "$OPENSHIFT_KUBECONFIG",
params+:: {
watchedNamespaces: "",
},
},
stage: stages.deploy_staging,
script+: [
"curl -X POST --data-urlencode \"payload={\\\"text\\\": \\\"New OLM Operator quay.io/coreos/olm:${CI_COMMIT_REF_SLUG}-${CI_COMMIT_SHA} deployed to ${OPENSHIFT_HOST}/k8s/ns/operator-lifecycle-manager/deployments/alm-operator\\\"}\" ${TEAMUI_SLACK_URL}",
],
environment+: {
name: "openshift",
},
only: ['master'],
},
};
{
stages: stages_list,
variables: vars.global,
} + jobs

View File

@ -0,0 +1,155 @@
# Generated from .gitlab-ci.jsonnet
# DO NOT EDIT THIS FILE BY HAND -- YOUR CHANGES WILL BE OVERWRITTEN
---
container-base-build:
before_script:
- docker login -u $DOCKER_USER -p $DOCKER_PASS quay.io
image: docker:git
only:
- schedules
- tags
script:
- 'docker build --build-arg sshkey=$OPERATORCLENT_RSA_B64 --no-cache -f base.Dockerfile -t quay.io/coreos/olm-ci:base . '
- docker push quay.io/coreos/olm-ci:base
stage: docker_base
tags:
- kubernetes
variables:
DOCKER_DRIVER: overlay2
DOCKER_HOST: tcp://docker-host.gitlab.svc.cluster.local:2375
container-build:
before_script:
- docker login -u $DOCKER_USER -p $DOCKER_PASS quay.io
- mkdir -p $PWD/bin
image: docker:git
only:
- master
- tags
script:
- docker build -f upstream.Dockerfile .
- "docker tag $(docker images --filter 'label=stage=builder' --format '{{.CreatedAt}}\t{{.ID}}' | sort -nr | head -n 1 | cut -f2) quay.io/coreos/olm-ci:${CI_COMMIT_REF_SLUG}"
- "docker tag $(docker images --filter 'label=stage=e2e' --format '{{.CreatedAt}}\t{{.ID}}' | sort -nr | head -n 1 | cut -f2) quay.io/coreos/olm-e2e:${CI_COMMIT_REF_SLUG}-${SHA8}"
- "docker tag $(docker images --filter 'label=stage=olm' --format '{{.CreatedAt}}\t{{.ID}}' | sort -nr | head -n 1 | cut -f2) quay.io/coreos/olm-ci:${CI_COMMIT_REF_SLUG}-pre"
- docker push quay.io/coreos/olm-ci:${CI_COMMIT_REF_SLUG}
- docker push quay.io/coreos/olm-e2e:${CI_COMMIT_REF_SLUG}-${SHA8}
- docker push quay.io/coreos/olm-ci:${CI_COMMIT_REF_SLUG}-pre
stage: docker_build
tags:
- kubernetes
variables:
DOCKER_DRIVER: overlay2
DOCKER_HOST: tcp://docker-host.gitlab.svc.cluster.local:2375
container-release:
before_script:
- docker login -u $DOCKER_USER -p $DOCKER_PASS quay.io
- mkdir -p $PWD/bin
image: docker:git
only:
- master
script:
- docker pull quay.io/coreos/olm-ci:${CI_COMMIT_REF_SLUG}-pre
- docker tag quay.io/coreos/olm-ci:${CI_COMMIT_REF_SLUG}-pre quay.io/coreos/olm:${CI_COMMIT_REF_SLUG}-${SHA8}
- docker push quay.io/coreos/olm:${CI_COMMIT_REF_SLUG}-${SHA8}
- docker pull quay.io/coreos/olm-e2e:${CI_COMMIT_REF_SLUG}-${SHA8}
- docker tag quay.io/coreos/olm-e2e:${CI_COMMIT_REF_SLUG}-${SHA8} quay.io/coreos/olm-e2e:latest
- docker push quay.io/coreos/olm-e2e:latest
stage: docker_release
tags:
- kubernetes
variables:
DOCKER_DRIVER: overlay2
DOCKER_HOST: tcp://docker-host.gitlab.svc.cluster.local:2375
deploy-openshift:
before_script:
- 'echo "version: 1.0.0-${CI_COMMIT_REF_SLUG}-pre" >> deploy/chart/Chart.yaml'
- 'echo "{\"catalog.image.ref\": \"quay.io/coreos/olm:${CI_COMMIT_REF_SLUG}-${SHA8}\", \"catalog_namespace\": \"operator-lifecycle-manager\", \"namespace\": \"operator-lifecycle-manager\", \"olm.image.ref\":
\"quay.io/coreos/olm:${CI_COMMIT_REF_SLUG}-${SHA8}\", \"package.image.ref\": \"quay.io/coreos/olm:${CI_COMMIT_REF_SLUG}-${SHA8}\", \"watchedNamespaces\": \"\"}" > params.json'
- cat params.json
environment:
name: openshift
url: https://console.apps.ui-preserve.origin-gce.dev.openshift.com
image: quay.io/coreos/alm-ci-build:latest
only:
- master
script:
- echo $OPENSHIFT_KUBECONFIG | base64 -d > kubeconfig
- export KUBECONFIG=./kubeconfig
- charttmpdir=`mktemp -d 2>/dev/null || mktemp -d -t 'charttmpdir'`;mkdir -p ${charttmpdir};helm template -n olm --set namespace=operator-lifecycle-manager deploy/chart --set catalog.image.ref=quay.io/coreos/olm:${CI_COMMIT_REF_SLUG}-${SHA8}
--set catalog_namespace=operator-lifecycle-manager --set namespace=operator-lifecycle-manager --set olm.image.ref=quay.io/coreos/olm:${CI_COMMIT_REF_SLUG}-${SHA8} --set package.image.ref=quay.io/coreos/olm:${CI_COMMIT_REF_SLUG}-${SHA8}
--set watchedNamespaces= --output-dir ${charttmpdir};chartfilenames=$(ls ${charttmpdir}/olm/templates/*.yaml);echo ${chartfilenames};for f in ${chartfilenames};do if [[ $f == *.configmap.yaml ]];then
kubectl replace --force -f ${f};else kubectl apply -f ${f};fi;done;
- kubectl create secret docker-registry coreos-pull-secret --docker-server quay.io --docker-username $DOCKER_USER --docker-password $DOCKER_PASS --docker-email ignored@example.com --namespace=operator-lifecycle-manager
|| true
- kubectl rollout status -w deployment/olm-operator --namespace=operator-lifecycle-manager
- kubectl rollout status -w deployment/catalog-operator --namespace=operator-lifecycle-manager
- 'curl -X POST --data-urlencode "payload={\"text\": \"New OLM Operator quay.io/coreos/olm:${CI_COMMIT_REF_SLUG}-${CI_COMMIT_SHA} deployed to ${OPENSHIFT_HOST}/k8s/ns/operator-lifecycle-manager/deployments/alm-operator\"}"
${TEAMUI_SLACK_URL}'
stage: deploy_staging
tags:
- kubernetes
variables:
ALM_DOMAIN: console.apps.ui-preserve.origin-gce.dev.openshift.com
K8S_NAMESPACE: operator-lifecycle-manager
deploy-staging:
before_script:
- 'echo "version: 1.0.0-${CI_COMMIT_REF_SLUG}-pre" >> deploy/chart/Chart.yaml'
- 'echo "{\"catalog.image.ref\": \"quay.io/coreos/olm:${CI_COMMIT_REF_SLUG}-${SHA8}\", \"catalog_namespace\": \"ci-alm-staging\", \"namespace\": \"ci-alm-staging\", \"olm.image.ref\": \"quay.io/coreos/olm:${CI_COMMIT_REF_SLUG}-${SHA8}\",
\"package.image.ref\": \"quay.io/coreos/olm:${CI_COMMIT_REF_SLUG}-${SHA8}\", \"watchedNamespaces\": \"ci-alm-staging\"}" > params.json'
- cat params.json
environment:
name: staging
url: https://alm-staging.k8s.devtable.com
image: quay.io/coreos/alm-ci-build:latest
only:
- master
script:
- echo $CD_KUBECONFIG | base64 -d > kubeconfig
- export KUBECONFIG=./kubeconfig
- charttmpdir=`mktemp -d 2>/dev/null || mktemp -d -t 'charttmpdir'`;mkdir -p ${charttmpdir};helm template -n olm --set namespace=ci-alm-staging deploy/chart --set catalog.image.ref=quay.io/coreos/olm:${CI_COMMIT_REF_SLUG}-${SHA8}
--set catalog_namespace=ci-alm-staging --set namespace=ci-alm-staging --set olm.image.ref=quay.io/coreos/olm:${CI_COMMIT_REF_SLUG}-${SHA8} --set package.image.ref=quay.io/coreos/olm:${CI_COMMIT_REF_SLUG}-${SHA8}
--set watchedNamespaces=ci-alm-staging --output-dir ${charttmpdir};chartfilenames=$(ls ${charttmpdir}/olm/templates/*.yaml);echo ${chartfilenames};for f in ${chartfilenames};do if [[ $f == *.configmap.yaml
]];then kubectl replace --force -f ${f};else kubectl apply -f ${f};fi;done;
- kubectl create secret docker-registry coreos-pull-secret --docker-server quay.io --docker-username $DOCKER_USER --docker-password $DOCKER_PASS --docker-email ignored@example.com --namespace=ci-alm-staging
|| true
- kubectl rollout status -w deployment/olm-operator --namespace=ci-alm-staging
- kubectl rollout status -w deployment/catalog-operator --namespace=ci-alm-staging
stage: deploy_staging
tags:
- kubernetes
variables:
ALM_DOMAIN: alm-staging.k8s.devtable.com
K8S_NAMESPACE: ci-alm-staging
stages:
- docker_base
- docker_build
- deploy_preview
- test_setup
- tests
- test_teardown
- integration
- docker_release
- deploy_staging
- teardown
tag-release:
before_script:
- docker login -u $DOCKER_USER -p $DOCKER_PASS quay.io
- mkdir -p $PWD/bin
image: docker:git
only:
- tags
script:
- docker pull quay.io/coreos/olm-ci:${CI_COMMIT_REF_SLUG}-pre
- docker tag quay.io/coreos/olm-ci:${CI_COMMIT_REF_SLUG}-pre quay.io/coreos/olm:${CI_COMMIT_TAG}
- docker push quay.io/coreos/olm:${CI_COMMIT_TAG}
- docker pull quay.io/coreos/olm-e2e:${CI_COMMIT_REF_SLUG}-${SHA8}
- docker tag quay.io/coreos/olm-e2e:${CI_COMMIT_REF_SLUG}-${SHA8} quay.io/coreos/olm-e2e:latest
- docker push quay.io/coreos/olm-e2e:latest
stage: docker_release
tags:
- kubernetes
variables:
DOCKER_DRIVER: overlay2
DOCKER_HOST: tcp://docker-host.gitlab.svc.cluster.local:2375
variables:
FAILFASTCI_NAMESPACE: operator-framework
GET_SOURCES_ATTEMPTS: '10'

View File

@ -0,0 +1,91 @@
local utils = import 'utils.libsonnet';
local vars = import 'vars.libsonnet';
local k8s = utils.k8s;
local helm = utils.helm;
local docker = utils.docker;
local appr = utils.appr;
{
local job_tags = { tags: ["kubernetes"] },
dockerBuild: {
// base job to manage containers (build / push)
image: "docker:git",
variables: {
DOCKER_DRIVER: "overlay2",
DOCKER_HOST: "tcp://docker-host.gitlab.svc.cluster.local:2375",
},
before_script: [
"docker login -u $DOCKER_USER -p $DOCKER_PASS quay.io",
],
} + job_tags,
Deploy: {
local this = self,
local _vars = self.localvars,
localvars:: {
appversion: "1.0.0-%s" % self.image.olm.tag,
apprepo: "quay.io/coreos/olm-ci-app",
appname: self.namespace,
chart: "deploy/chart",
app: "%s@%s" % [self.apprepo, self.appversion],
domain: "olm-%s.k8s.devtable.com" % "${CI_COMMIT_REF_SLUG}",
namespace: "ci-olm-%s" % "${CI_COMMIT_REF_SLUG}",
image: vars.images.prerelease,
channel: null,
helm_opts: [],
kubeconfig: "$CD_KUBECONFIG",
params: {
"olm.image.ref": _vars.image.olm.name,
"catalog.image.ref": _vars.image.olm.name,
"package.image.ref": _vars.image.olm.name,
watchedNamespaces: _vars.namespace,
catalog_namespace: _vars.namespace,
namespace: _vars.namespace,
},
},
variables: {
K8S_NAMESPACE: _vars.namespace,
ALM_DOMAIN: _vars.domain,
},
image: "quay.io/coreos/alm-ci-build:latest",
environment: {
name: "review/%s" % _vars.appname,
url: "https://%s" % _vars.domain,
},
before_script: [
'echo "version: 1.0.0-${CI_COMMIT_REF_SLUG}-pre" >> %s/Chart.yaml' % _vars.chart,
'echo %s > params.json' % std.escapeStringJson(_vars.params),
"cat params.json",
],
script:
k8s.setKubeConfig(_vars.kubeconfig) +
helm.templateApply("olm", _vars.chart, _vars.namespace, _vars.params) +
k8s.createPullSecret("coreos-pull-secret",
_vars.namespace,
"quay.io",
"$DOCKER_USER",
"$DOCKER_PASS") +
k8s.waitForDeployment("olm-operator", _vars.namespace) +
k8s.waitForDeployment("catalog-operator", _vars.namespace)
} + job_tags,
DeployStop: self.Deploy {
variables+: { GIT_STRATEGY: "none" },
environment+: {
action: "stop",
},
before_script: [],
script:
k8s.setKubeConfig(self.localvars.kubeconfig) + [
"kubectl delete apiservice v1alpha1.packages.apps.redhat.com --ignore-not-found=true",
"kubectl delete ns --ignore-not-found=true %s" % self.localvars.namespace,
"kubectl get pods -o wide -n %s" % self.localvars.namespace,
],
} + job_tags,
}

View File

@ -0,0 +1,239 @@
{
local topSelf = self,
# Generate a sequence array from 1 to i
seq(i):: (
[x for x in std.range(1, i)]
),
objectFieldsHidden(obj):: (
std.setDiff(std.objectFieldsAll(obj), std.objectFields(obj))
),
objectFlatten(obj):: (
// Merge 1 level dict depth into toplevel
local visible = {
[k]: obj[j][k]
for j in std.objectFieldsAll(obj)
for k in std.objectFieldsAll(obj[j])
};
visible
),
compact(array):: (
[x for x in array if x != null]
),
objectValues(obj):: (
local fields = std.objectFields(obj);
[obj[key] for key in fields]
),
objectMap(func, obj):: (
local fields = std.objectFields(obj);
{ [key]: func(obj[key]) for key in fields }
),
capitalize(str):: (
std.char(std.codepoint(str[0]) - 32) + str[1:]
),
test: [
self.capitalize("test"),
],
set(array)::
{ [key]: key for key in array },
containerName(repo, tag):: "%s:%s" % [repo, tag],
docker: {
local Docker = self,
login(server, user, password):: [
"docker login -u %s -p %s %s" % [user, password, server],
],
cp(image, src, dest):: [
"docker create %s | xargs -I{} docker cp {}:%s %s" % [image, src, dest],
],
run(image, cmd, opts=[]):: [
local optstr = std.join(" ", opts);
'docker run %s %s %s' % [optstr, image, cmd],
],
build_and_push(image, cache=true, args={}, extra_opts=[]):: (
Docker.build(image, cache, args, extra_opts) +
Docker.push(image)
),
multibuild_and_push(dockerfile, labelImageMap={}):: (
Docker.build_file(dockerfile) +
Docker.tag_from_labels(labelImageMap) +
Docker.push_all([labelImageMap[label] for label in std.objectFields(labelImageMap)])
),
build_file(dockerfile):: [
'docker build -f %s .' % [dockerfile],
],
tag_from_labels(labelImageMap={}):: [
"docker tag $(docker images --filter 'label=stage=%s' --format '{{.CreatedAt}}\t{{.ID}}' | sort -nr | head -n 1 | cut -f2) %s"
% [label, labelImageMap[label]] for label in std.objectFields(labelImageMap)
],
build(image, cache=true, args={},extra_opts=[]):: [
local cache_opt = if cache == false
then '--no-cache'
else if std.type(cache) == 'boolean'
then '--no-cache'
else '--cache-from=%s' % cache;
local buildargs_opt = std.join(" ", [
"--build-arg %s=%s" % [key, args[key]]
for key in std.objectFields(args)
]);
local opts = std.join(" ", [buildargs_opt, cache_opt] + extra_opts);
'docker build %s -t %s . ' % [opts, image],
],
push(image):: [
'docker push %s' % image,
],
push_all(images=[]):: (
['docker push %s' % image for image in images]
),
rename(src, dest):: [
'docker pull %s' % src,
'docker tag %s %s' % [src, dest],
'docker push %s' % [dest],
],
},
helm: {
templateApply(name, chartdir, namespace, vars={}):: [
local set_opts = [
"--set %s=%s" % [key, vars[key]]
for key in std.objectFields(vars)
];
std.join(" ", [
"charttmpdir=`mktemp -d 2>/dev/null || mktemp -d -t 'charttmpdir'`;" +
"mkdir -p ${charttmpdir};" +
"helm template -n %s --set namespace=%s %s %s --output-dir ${charttmpdir};" % [name, namespace, chartdir, std.join(" ", set_opts)] +
"chartfilenames=$(ls ${charttmpdir}/%s/templates/*.yaml);" % name +
"echo ${chartfilenames};" +
"for f in ${chartfilenames};" +
"do "+
"if [[ $f == *.configmap.yaml ]];" +
"then kubectl replace --force -f ${f};" +
"else kubectl apply -f ${f};" +
"fi;" +
"done;"
]),
],
// uses app-registry
upgrade(chartdir, appname, namespace="default", vars={}, extra_opts=[]):: [
local set_opts = [
"--set %s=%s" % [key, vars[key]]
for key in std.objectFields(vars)
];
std.join(" ",
[
"helm upgrade %s --force --install %s" % [appname, chartdir],
"--namespace=%s" % namespace,
] +
set_opts +
extra_opts),
],
},
appr: {
login(server, user, password):: [
"appr login -u %s -p %s %s" % [user, password, server],
],
push(name, channel=null, force=false):: [
std.join(" ",
["appr push %s" % name] +
if channel != null then ["--channel %s" % channel] else [] +
if force == true then ["-f"] else []),
],
},
k8s: {
setKubeConfig(kubeconfig):: [
"echo %s | base64 -d > kubeconfig" % kubeconfig,
"export KUBECONFIG=./kubeconfig",
],
waitForDeployment(deploymentName, namespace):: [
"kubectl rollout status -w deployment/%s --namespace=%s" % [deploymentName, namespace],
],
createNamespace(name):: [
"kubectl create ns %s" % name + " || true",
],
createPullSecret(name, namespace, server, user, password):: [
std.join(" ",
[
"kubectl create secret docker-registry %s" % name,
"--docker-server %s" % server,
"--docker-username %s" % user,
"--docker-password %s" % password,
"--docker-email ignored@example.com",
"--namespace=%s" % namespace,
"|| true",
]),
],
get(type, name, namespace, extra_opts=[]):: [
"kubectl get %s %s -n %s %s" % [
type,
name,
namespace,
std.join(" ", extra_opts),
],
],
apply(filepath, namespace=null, extra_opts=[]):: [
std.join(
" ",
["kubectl apply -f %s" % filepath] +
if namespace != null then ["--namespace %s" % namespace] else [] +
extra_opts
),
],
},
ci: {
mergeJob(base_job, jobs, stage=null):: {
[job_name]: base_job + jobs[job_name] +
if stage != null then { stage: stage } else {}
for job_name in std.objectFields(jobs)
},
only(key):: (
if key == "master"
then { only: ['master', 'tags'] }
else { only: ['branches'] }
),
setManual(key, values):: (
if std.objectHas(topSelf.set(values), key)
then { when: 'manual' }
else { only: ['branches'] }
),
},
}

View File

@ -0,0 +1,74 @@
local utils = import "utils.libsonnet";
{
deploy_keys: { operator_client: "$OPERATORCLENT_RSA_B64" },
olm_repo: "github.com/operator-framework/operator-lifecycle-manager",
global: {
// .gitlab-ci.yaml top `variables` key
FAILFASTCI_NAMESPACE: "operator-framework",
// increase attempts to handle occational auth failures against gitlab.com
GET_SOURCES_ATTEMPTS: "10",
},
paths: {
olm: {
src: "$GOPATH/src/%s" % $.olm_repo,
},
},
// internal variables
images: {
// Quay initial image, used in the Dockerfile FROM clause
base: {
repo: "quay.io/coreos/olm-ci",
tag: "base",
name: utils.containerName(self.repo, self.tag),
},
// release is a copy of the quayci image to the 'prod' repository
release: {
olm: {
repo: "quay.io/coreos/olm",
tag: "${CI_COMMIT_REF_SLUG}-${SHA8}",
name: utils.containerName(self.repo, self.tag),
},
},
tag: {
olm: {
repo: "quay.io/coreos/olm",
tag: "${CI_COMMIT_TAG}",
name: utils.containerName(self.repo, self.tag),
},
},
ci: {
olm: {
repo: "quay.io/coreos/olm-ci",
tag: "${CI_COMMIT_REF_SLUG}",
name: utils.containerName(self.repo, self.tag),
},
},
e2e: {
repo: "quay.io/coreos/olm-e2e",
tag: "${CI_COMMIT_REF_SLUG}-${SHA8}",
name: utils.containerName(self.repo, self.tag),
},
e2elatest: {
repo: "quay.io/coreos/olm-e2e",
tag: "latest",
name: utils.containerName(self.repo, self.tag),
},
prerelease: {
olm: {
repo: "quay.io/coreos/olm-ci",
tag: "${CI_COMMIT_REF_SLUG}-pre",
name: utils.containerName(self.repo, self.tag),
},
},
},
}

View File

@ -0,0 +1,553 @@
# Change Log
## [0.10.1](https://github.com/operator-framework/operator-lifecycle-manager/tree/0.10.1) (2019-06-05)
[Full Changelog](https://github.com/operator-framework/operator-lifecycle-manager/compare/0.10.0...0.10.1)
**Closed issues:**
- Unable to install the olm [\#882](https://github.com/operator-framework/operator-lifecycle-manager/issues/882)
- Doc for defining x-descriptors [\#808](https://github.com/operator-framework/operator-lifecycle-manager/issues/808)
- Api server backed resources missing permissions [\#804](https://github.com/operator-framework/operator-lifecycle-manager/issues/804)
- Seeing a consistent pattern of warnings installing OLM on OS3 cluster [\#803](https://github.com/operator-framework/operator-lifecycle-manager/issues/803)
- Localhost:9000 can't be accessed [\#800](https://github.com/operator-framework/operator-lifecycle-manager/issues/800)
- Unable to see operators after install/deploy the OML by following the installation guide [\#784](https://github.com/operator-framework/operator-lifecycle-manager/issues/784)
- Pull configmap-operator-registry from quay org "operator-framework" not "operatorframework" in 0.8.1 and master [\#779](https://github.com/operator-framework/operator-lifecycle-manager/issues/779)
- "no matches for kind" error during OLM installation [\#746](https://github.com/operator-framework/operator-lifecycle-manager/issues/746)
- need ability to specify roleRef in permissions [\#732](https://github.com/operator-framework/operator-lifecycle-manager/issues/732)
- none of the deployment works with okd 3.11 [\#715](https://github.com/operator-framework/operator-lifecycle-manager/issues/715)
- Subscription stuck at Upgrading [\#700](https://github.com/operator-framework/operator-lifecycle-manager/issues/700)
- Getting started guide does not work [\#677](https://github.com/operator-framework/operator-lifecycle-manager/issues/677)
- Installation sometimes fails [\#558](https://github.com/operator-framework/operator-lifecycle-manager/issues/558)
- Unhelpful "RequirementsNotMet" message when attempting to deploy Template Service Broker Operator CSV [\#543](https://github.com/operator-framework/operator-lifecycle-manager/issues/543)
**Other changes:**
- Bump release version to 0.10.1 [\#887](https://github.com/operator-framework/operator-lifecycle-manager/pull/887) ([dinhxuanvu](https://github.com/dinhxuanvu))
- Add Internal types for operators.coreos.com API group [\#877](https://github.com/operator-framework/operator-lifecycle-manager/pull/877) ([njhale](https://github.com/njhale))
- Documentation: Fix a few typos [\#876](https://github.com/operator-framework/operator-lifecycle-manager/pull/876) ([marcoderama](https://github.com/marcoderama))
- feat\(install\): Add new install scripts for releases, update release [\#875](https://github.com/operator-framework/operator-lifecycle-manager/pull/875) ([ecordell](https://github.com/ecordell))
- Change Global Catalog Namespace for OpenShift [\#871](https://github.com/operator-framework/operator-lifecycle-manager/pull/871) ([alecmerdler](https://github.com/alecmerdler))
- fix\(operatorgroup\): Remove namespaces requirement for opgroup status [\#869](https://github.com/operator-framework/operator-lifecycle-manager/pull/869) ([dinhxuanvu](https://github.com/dinhxuanvu))
- docs\(release\): add basic steps for making release [\#867](https://github.com/operator-framework/operator-lifecycle-manager/pull/867) ([jpeeler](https://github.com/jpeeler))
- update manifests to 0.10.0 [\#866](https://github.com/operator-framework/operator-lifecycle-manager/pull/866) ([jpeeler](https://github.com/jpeeler))
- chore\(deps\): bump kube deps to 1.14 [\#864](https://github.com/operator-framework/operator-lifecycle-manager/pull/864) ([ecordell](https://github.com/ecordell))
- fix\(catalog\): close grpc connections before deleting them [\#861](https://github.com/operator-framework/operator-lifecycle-manager/pull/861) ([ecordell](https://github.com/ecordell))
## [0.10.0](https://github.com/operator-framework/operator-lifecycle-manager/tree/0.10.0) (2019-05-22)
[Full Changelog](https://github.com/operator-framework/operator-lifecycle-manager/compare/0.9.0...0.10.0)
**Closed issues:**
- Operator lifecycle manager and operator-sdk connection [\#862](https://github.com/operator-framework/operator-lifecycle-manager/issues/862)
- I am getting error while installing OLM [\#854](https://github.com/operator-framework/operator-lifecycle-manager/issues/854)
- Generated Subscriptions should have an owner reference to created Subscription [\#832](https://github.com/operator-framework/operator-lifecycle-manager/issues/832)
- replaces meta data for operator will throw fatal error when no previous version of operator is found. [\#831](https://github.com/operator-framework/operator-lifecycle-manager/issues/831)
- Operator catalog is created but now shown in the OLM UI [\#822](https://github.com/operator-framework/operator-lifecycle-manager/issues/822)
- After it be installed the URL to connect/use it should be informed to the user [\#785](https://github.com/operator-framework/operator-lifecycle-manager/issues/785)
- Add 'operatorgroups' to edit and view clusterroles [\#743](https://github.com/operator-framework/operator-lifecycle-manager/issues/743)
- upstream deployment: olm pod keeps crashing [\#714](https://github.com/operator-framework/operator-lifecycle-manager/issues/714)
Other changes:
- bump version to 0.10.0 [\#865](https://github.com/operator-framework/operator-lifecycle-manager/pull/865) ([jpeeler](https://github.com/jpeeler))
- \(refactor\) Move csv set and replace to a package [\#860](https://github.com/operator-framework/operator-lifecycle-manager/pull/860) ([tkashem](https://github.com/tkashem))
- fix\(unit\): Remove leftover println causing excessive log in unit test [\#859](https://github.com/operator-framework/operator-lifecycle-manager/pull/859) ([dinhxuanvu](https://github.com/dinhxuanvu))
- fix\(run\_console\_loca\): Fix command and improve output message [\#858](https://github.com/operator-framework/operator-lifecycle-manager/pull/858) ([camilamacedo86](https://github.com/camilamacedo86))
- test\(olm\): test role aggregation for aggregate apiservices [\#856](https://github.com/operator-framework/operator-lifecycle-manager/pull/856) ([ecordell](https://github.com/ecordell))
- fix\(unit\): TestUpdate no longer flakes [\#855](https://github.com/operator-framework/operator-lifecycle-manager/pull/855) ([ecordell](https://github.com/ecordell))
- chore\(deploy\): fix release scripts [\#852](https://github.com/operator-framework/operator-lifecycle-manager/pull/852) ([ecordell](https://github.com/ecordell))
- fix\(operatorgroup\): No targetNamespaces matched namespace selector [\#851](https://github.com/operator-framework/operator-lifecycle-manager/pull/851) ([dinhxuanvu](https://github.com/dinhxuanvu))
- Bug 1705649: fix olm-operators tolerations [\#850](https://github.com/operator-framework/operator-lifecycle-manager/pull/850) ([ravisantoshgudimetla](https://github.com/ravisantoshgudimetla))
- fix\(deploy\): add missing descriptions to manifests [\#848](https://github.com/operator-framework/operator-lifecycle-manager/pull/848) ([jpeeler](https://github.com/jpeeler))
- fix\(catalog\): fix issue where subscriptions sometimes get "stuck" [\#847](https://github.com/operator-framework/operator-lifecycle-manager/pull/847) ([ecordell](https://github.com/ecordell))
- fix\(deploy\): add missing descriptions [\#845](https://github.com/operator-framework/operator-lifecycle-manager/pull/845) ([jpeeler](https://github.com/jpeeler))
- Add Termination Message to Failing OLM Pods [\#844](https://github.com/operator-framework/operator-lifecycle-manager/pull/844) ([alecmerdler](https://github.com/alecmerdler))
- Fix tolerations [\#843](https://github.com/operator-framework/operator-lifecycle-manager/pull/843) ([ravisantoshgudimetla](https://github.com/ravisantoshgudimetla))
- fix\(catalog\): Fix subscriptions without a sourceNamespace hang forever [\#839](https://github.com/operator-framework/operator-lifecycle-manager/pull/839) ([dinhxuanvu](https://github.com/dinhxuanvu))
- fix\(resolver\): fixes a bug where resolved dependent subscriptions don't [\#838](https://github.com/operator-framework/operator-lifecycle-manager/pull/838) ([ecordell](https://github.com/ecordell))
- Refactor to avoid cache races [\#837](https://github.com/operator-framework/operator-lifecycle-manager/pull/837) ([jpeeler](https://github.com/jpeeler))
- Set limit on length of Status.Conditions of a csv [\#836](https://github.com/operator-framework/operator-lifecycle-manager/pull/836) ([tkashem](https://github.com/tkashem))
- Fix gRPC registry pod recreation [\#835](https://github.com/operator-framework/operator-lifecycle-manager/pull/835) ([njhale](https://github.com/njhale))
- Support semver ranges of versions to skip in the head of a channel [\#834](https://github.com/operator-framework/operator-lifecycle-manager/pull/834) ([ecordell](https://github.com/ecordell))
- test\(e2e\): wait for deployment to exist in csv replacement test [\#833](https://github.com/operator-framework/operator-lifecycle-manager/pull/833) ([ecordell](https://github.com/ecordell))
- Always set LastTransitionTime in OperatorStatusCondition [\#830](https://github.com/operator-framework/operator-lifecycle-manager/pull/830) ([soltysh](https://github.com/soltysh))
- Revert "chore\(cvo\): remove ClusterOperator from manifests" [\#828](https://github.com/operator-framework/operator-lifecycle-manager/pull/828) ([ecordell](https://github.com/ecordell))
- fix\(olm\): Fix the issue with missing events due to rate limit [\#827](https://github.com/operator-framework/operator-lifecycle-manager/pull/827) ([dinhxuanvu](https://github.com/dinhxuanvu))
- chore\(cvo\): remove ClusterOperator from manifests [\#826](https://github.com/operator-framework/operator-lifecycle-manager/pull/826) ([ecordell](https://github.com/ecordell))
- Add node-selector annotation to namespace [\#824](https://github.com/operator-framework/operator-lifecycle-manager/pull/824) ([ravisantoshgudimetla](https://github.com/ravisantoshgudimetla))
- chore\(cvo\): update openshift/api so that OperatorFailing is changed to OperatorDegraded [\#823](https://github.com/operator-framework/operator-lifecycle-manager/pull/823) ([ecordell](https://github.com/ecordell))
- Add validation details for `spec.maturity` field [\#821](https://github.com/operator-framework/operator-lifecycle-manager/pull/821) ([tlwu2013](https://github.com/tlwu2013))
- Subscription Status InstallPlan References [\#820](https://github.com/operator-framework/operator-lifecycle-manager/pull/820) ([njhale](https://github.com/njhale))
- Add priorityClassName [\#817](https://github.com/operator-framework/operator-lifecycle-manager/pull/817) ([jianzhangbjz](https://github.com/jianzhangbjz))
- Update catalog if image changes [\#816](https://github.com/operator-framework/operator-lifecycle-manager/pull/816) ([ecordell](https://github.com/ecordell))
- feat\(packageserver\): add additional info to package server output [\#813](https://github.com/operator-framework/operator-lifecycle-manager/pull/813) ([ecordell](https://github.com/ecordell))
- feat\(rbac\): restrict permissions for namespace admins [\#812](https://github.com/operator-framework/operator-lifecycle-manager/pull/812) ([ecordell](https://github.com/ecordell))
- chore\(cvo\): report progressing=true if the version has updated [\#811](https://github.com/operator-framework/operator-lifecycle-manager/pull/811) ([ecordell](https://github.com/ecordell))
- add logging and separate muxer for metrics [\#809](https://github.com/operator-framework/operator-lifecycle-manager/pull/809) ([jpeeler](https://github.com/jpeeler))
- fix\(catalog\): recreate registry pods when deleted [\#807](https://github.com/operator-framework/operator-lifecycle-manager/pull/807) ([njhale](https://github.com/njhale))
- documentation: Fix a few typos. [\#806](https://github.com/operator-framework/operator-lifecycle-manager/pull/806) ([marcoderama](https://github.com/marcoderama))
- Garbage Collection for OperatorGroup RBAC [\#795](https://github.com/operator-framework/operator-lifecycle-manager/pull/795) ([alecmerdler](https://github.com/alecmerdler))
- fix\(olm\): generate aggregated clusterroles for ownnamespace operatorgroups correctly [\#794](https://github.com/operator-framework/operator-lifecycle-manager/pull/794) ([ecordell](https://github.com/ecordell))
- Fixing indentation for spec field [\#787](https://github.com/operator-framework/operator-lifecycle-manager/pull/787) ([bergerhoffer](https://github.com/bergerhoffer))
- Add support for Windows WSL in run\_console\_local.sh [\#768](https://github.com/operator-framework/operator-lifecycle-manager/pull/768) ([leszko](https://github.com/leszko))
- fix the labels.provider of packagemanifest issue [\#766](https://github.com/operator-framework/operator-lifecycle-manager/pull/766) ([jianzhangbjz](https://github.com/jianzhangbjz))
- fix\(deployment\): Clean up orphaned deployments [\#759](https://github.com/operator-framework/operator-lifecycle-manager/pull/759) ([dinhxuanvu](https://github.com/dinhxuanvu))
- Add Provided APIs to PackageManifest [\#754](https://github.com/operator-framework/operator-lifecycle-manager/pull/754) ([alecmerdler](https://github.com/alecmerdler))
- Fix small typo. [\#751](https://github.com/operator-framework/operator-lifecycle-manager/pull/751) ([lveyde](https://github.com/lveyde))
- fix\(olm\): add deletion monitoring for api services [\#750](https://github.com/operator-framework/operator-lifecycle-manager/pull/750) ([jpeeler](https://github.com/jpeeler))
## [0.9.0](https://github.com/operator-framework/operator-lifecycle-manager/tree/0.9.0) (2019-04-11)
[Full Changelog](https://github.com/operator-framework/operator-lifecycle-manager/compare/0.8.1...0.9.0)
**Implemented enhancements:**
- feat\(packageserver\): support apps.redhat.com and operators.coreos.com [\#788](https://github.com/operator-framework/operator-lifecycle-manager/pull/788) ([njhale](https://github.com/njhale))
- fix\(metrics\): add service monitor config [\#682](https://github.com/operator-framework/operator-lifecycle-manager/pull/682) ([jpeeler](https://github.com/jpeeler))
**Fixed bugs:**
- fix\(annotation\): don't annotate deployments that aren't owned by a CSV [\#792](https://github.com/operator-framework/operator-lifecycle-manager/pull/792) ([ecordell](https://github.com/ecordell))
- fix\(packageserver\): add struct tags to PackageManifestList [\#791](https://github.com/operator-framework/operator-lifecycle-manager/pull/791) ([njhale](https://github.com/njhale))
- fix\(olm\): use hashes for provided api labels [\#778](https://github.com/operator-framework/operator-lifecycle-manager/pull/778) ([ecordell](https://github.com/ecordell))
**Closed issues:**
- git add issue [\#797](https://github.com/operator-framework/operator-lifecycle-manager/issues/797)
- Unable to install the latest version on minishift [\#780](https://github.com/operator-framework/operator-lifecycle-manager/issues/780)
- scripts/install\_local.sh: upstream-operators.catalogsource.yaml not written correctly [\#772](https://github.com/operator-framework/operator-lifecycle-manager/issues/772)
- flag provided but not defined: -writeStatusName [\#770](https://github.com/operator-framework/operator-lifecycle-manager/issues/770)
- ClusterServiceVersion Status: Failed [\#769](https://github.com/operator-framework/operator-lifecycle-manager/issues/769)
- Upstream quickstart and latest manifests should deploy same OLM image [\#747](https://github.com/operator-framework/operator-lifecycle-manager/issues/747)
**Other changes:**
- Fix modules [\#805](https://github.com/operator-framework/operator-lifecycle-manager/pull/805) ([njhale](https://github.com/njhale))
- Cut Release 0.9.0 [\#802](https://github.com/operator-framework/operator-lifecycle-manager/pull/802) ([alecmerdler](https://github.com/alecmerdler))
- Bump OLM Release Version to 0.9.0 [\#801](https://github.com/operator-framework/operator-lifecycle-manager/pull/801) ([alecmerdler](https://github.com/alecmerdler))
- Update Kubernetes dependencies to 1.12.7 [\#793](https://github.com/operator-framework/operator-lifecycle-manager/pull/793) ([jpeeler](https://github.com/jpeeler))
- chore\(build\): build binaries locally and load them into containers [\#777](https://github.com/operator-framework/operator-lifecycle-manager/pull/777) ([ecordell](https://github.com/ecordell))
- chore\(catalog\): add ClusterOperator status for catalog operator [\#776](https://github.com/operator-framework/operator-lifecycle-manager/pull/776) ([ecordell](https://github.com/ecordell))
- chore\(deploy\): set priorityclass on olm pods [\#775](https://github.com/operator-framework/operator-lifecycle-manager/pull/775) ([ecordell](https://github.com/ecordell))
- chore\(api\): bump operatorgroup and packagemanifest to v1 [\#774](https://github.com/operator-framework/operator-lifecycle-manager/pull/774) ([ecordell](https://github.com/ecordell))
- chore\(manifests\): recut 0.8.1 [\#771](https://github.com/operator-framework/operator-lifecycle-manager/pull/771) ([njhale](https://github.com/njhale))
- docs\(subscriptions\): add improved sub status proposal [\#741](https://github.com/operator-framework/operator-lifecycle-manager/pull/741) ([njhale](https://github.com/njhale))
## [0.8.1](https://github.com/operator-framework/operator-lifecycle-manager/tree/0.8.1) (2019-03-20)
[Full Changelog](https://github.com/operator-framework/operator-lifecycle-manager/compare/0.7.1...0.8.1)
**Implemented enhancements:**
- Grant namespace list to global operators [\#764](https://github.com/operator-framework/operator-lifecycle-manager/pull/764) ([ecordell](https://github.com/ecordell))
- feat\(csv\): requeue csvs on change to CRDs [\#763](https://github.com/operator-framework/operator-lifecycle-manager/pull/763) ([njhale](https://github.com/njhale))
- Add upstream catalog [\#762](https://github.com/operator-framework/operator-lifecycle-manager/pull/762) ([ecordell](https://github.com/ecordell))
- refactor\(images\): use operator-framework namespace for all olm images [\#752](https://github.com/operator-framework/operator-lifecycle-manager/pull/752) ([njhale](https://github.com/njhale))
- clusteroperator: Report when OLM reaches "level" and check syncs [\#748](https://github.com/operator-framework/operator-lifecycle-manager/pull/748) ([smarterclayton](https://github.com/smarterclayton))
- feat\(test\): adds CSV phase reporting for package server [\#745](https://github.com/operator-framework/operator-lifecycle-manager/pull/745) ([jpeeler](https://github.com/jpeeler))
- Add minKubeVersion validation to templates [\#739](https://github.com/operator-framework/operator-lifecycle-manager/pull/739) ([dinhxuanvu](https://github.com/dinhxuanvu))
- OperatorGroup expansion/contraction [\#736](https://github.com/operator-framework/operator-lifecycle-manager/pull/736) ([ecordell](https://github.com/ecordell))
- docs\(operatorgroups\): add more extensive docs [\#721](https://github.com/operator-framework/operator-lifecycle-manager/pull/721) ([njhale](https://github.com/njhale))
- add all-in-one yaml and helper script [\#720](https://github.com/operator-framework/operator-lifecycle-manager/pull/720) ([dmesser](https://github.com/dmesser))
- feat\(csv\): allow use verb in permissions [\#717](https://github.com/operator-framework/operator-lifecycle-manager/pull/717) ([ecordell](https://github.com/ecordell))
- Add Graceful Shutdown to PackageManifest Server [\#711](https://github.com/operator-framework/operator-lifecycle-manager/pull/711) ([alecmerdler](https://github.com/alecmerdler))
- feat\(catalogsource\): allow grpc source types that don't require an image [\#709](https://github.com/operator-framework/operator-lifecycle-manager/pull/709) ([njhale](https://github.com/njhale))
- remove minishift support [\#704](https://github.com/operator-framework/operator-lifecycle-manager/pull/704) ([leseb](https://github.com/leseb))
- fix\(packagemanifest\): Add InstallModes info from CSV to PackageManifest [\#697](https://github.com/operator-framework/operator-lifecycle-manager/pull/697) ([dinhxuanvu](https://github.com/dinhxuanvu))
- fix\(installplans\): add ability to apply Services [\#674](https://github.com/operator-framework/operator-lifecycle-manager/pull/674) ([njhale](https://github.com/njhale))
- test\(validation\): add test to verify OLM can use bundles with CRDs with min/max fields [\#672](https://github.com/operator-framework/operator-lifecycle-manager/pull/672) ([njhale](https://github.com/njhale))
- test\(catalog\): add e2e test to verify configmap changes are detected [\#670](https://github.com/operator-framework/operator-lifecycle-manager/pull/670) ([ecordell](https://github.com/ecordell))
- fix\(operatorgroups\): write out CSV status on OperatorGroup issues [\#669](https://github.com/operator-framework/operator-lifecycle-manager/pull/669) ([njhale](https://github.com/njhale))
- Add minimum kube version to CSV & check it against server version [\#663](https://github.com/operator-framework/operator-lifecycle-manager/pull/663) ([dinhxuanvu](https://github.com/dinhxuanvu))
- Watch all namespaces in local setup by removing watchedNamespaces in local-values [\#657](https://github.com/operator-framework/operator-lifecycle-manager/pull/657) ([chancez](https://github.com/chancez))
- Write cluster operator status after successful startup \(attempt \#2\) [\#652](https://github.com/operator-framework/operator-lifecycle-manager/pull/652) ([jpeeler](https://github.com/jpeeler))
- Consume Operator Registry from PackageManifest Server [\#650](https://github.com/operator-framework/operator-lifecycle-manager/pull/650) ([njhale](https://github.com/njhale))
- update codegen for 2019 [\#646](https://github.com/operator-framework/operator-lifecycle-manager/pull/646) ([jcantrill](https://github.com/jcantrill))
- test\(catalog\): add an e2e test verifying that the rh-operators catalog starts [\#643](https://github.com/operator-framework/operator-lifecycle-manager/pull/643) ([ecordell](https://github.com/ecordell))
- feat\(csv\): add installmodes to CSV spec [\#640](https://github.com/operator-framework/operator-lifecycle-manager/pull/640) ([njhale](https://github.com/njhale))
- feat\(resolver\): take all subscriptions into account when resolving [\#638](https://github.com/operator-framework/operator-lifecycle-manager/pull/638) ([ecordell](https://github.com/ecordell))
- Requeue subscriptions on catalogsource change [\#633](https://github.com/operator-framework/operator-lifecycle-manager/pull/633) ([ecordell](https://github.com/ecordell))
- Add view permissions for CRDs on provided APIs [\#618](https://github.com/operator-framework/operator-lifecycle-manager/pull/618) ([ecordell](https://github.com/ecordell))
- Require pluralname for APIServiceDefinitions in CSVs [\#617](https://github.com/operator-framework/operator-lifecycle-manager/pull/617) ([ecordell](https://github.com/ecordell))
- Verify CRD's condition to ensure it's registered with k8s API \(rebased\) [\#614](https://github.com/operator-framework/operator-lifecycle-manager/pull/614) ([jpeeler](https://github.com/jpeeler))
- chore\(release\): switch image-reference tag to operator-lifecycle-manager [\#612](https://github.com/operator-framework/operator-lifecycle-manager/pull/612) ([ecordell](https://github.com/ecordell))
- feat\(package-server\): create subscription manifest [\#609](https://github.com/operator-framework/operator-lifecycle-manager/pull/609) ([njhale](https://github.com/njhale))
- use quay.io vs registry.reg-aws.openshift.com [\#608](https://github.com/operator-framework/operator-lifecycle-manager/pull/608) ([jboyd01](https://github.com/jboyd01))
- feat\(ci\): remove e2e tests from gitlab [\#602](https://github.com/operator-framework/operator-lifecycle-manager/pull/602) ([ecordell](https://github.com/ecordell))
- feat\(package-server\): self-host package-server with CSV [\#594](https://github.com/operator-framework/operator-lifecycle-manager/pull/594) ([njhale](https://github.com/njhale))
- test\(csv\): use mock-ext-server for owned apiservice e2e test [\#593](https://github.com/operator-framework/operator-lifecycle-manager/pull/593) ([njhale](https://github.com/njhale))
- Add operator namespace [\#591](https://github.com/operator-framework/operator-lifecycle-manager/pull/591) ([jpeeler](https://github.com/jpeeler))
- Don't process CSVs without operatorgroup [\#589](https://github.com/operator-framework/operator-lifecycle-manager/pull/589) ([ecordell](https://github.com/ecordell))
- Adding description to package manifest object [\#587](https://github.com/operator-framework/operator-lifecycle-manager/pull/587) ([galletti94](https://github.com/galletti94))
- Propagate Labels from `CatalogSource` to `PackageManifests` [\#576](https://github.com/operator-framework/operator-lifecycle-manager/pull/576) ([alecmerdler](https://github.com/alecmerdler))
- Make use a valid clusterPermissions rule verb [\#575](https://github.com/operator-framework/operator-lifecycle-manager/pull/575) ([wongma7](https://github.com/wongma7))
- Create role bindings for operator service accounts [\#571](https://github.com/operator-framework/operator-lifecycle-manager/pull/571) ([ecordell](https://github.com/ecordell))
- feat\(olm\): use cache listers in olm-operator [\#569](https://github.com/operator-framework/operator-lifecycle-manager/pull/569) ([njhale](https://github.com/njhale))
- Operator group follow ups [\#568](https://github.com/operator-framework/operator-lifecycle-manager/pull/568) ([jpeeler](https://github.com/jpeeler))
- feat\(mocks\): generate fakes and mocks in a container [\#564](https://github.com/operator-framework/operator-lifecycle-manager/pull/564) ([njhale](https://github.com/njhale))
- Remove ns annotator [\#563](https://github.com/operator-framework/operator-lifecycle-manager/pull/563) ([ecordell](https://github.com/ecordell))
- Create registry pods for ConfigMap CatalogSources [\#556](https://github.com/operator-framework/operator-lifecycle-manager/pull/556) ([ecordell](https://github.com/ecordell))
- Switch to go modules [\#554](https://github.com/operator-framework/operator-lifecycle-manager/pull/554) ([ecordell](https://github.com/ecordell))
- feat\(make\): add e2e command for running from ci-operator [\#552](https://github.com/operator-framework/operator-lifecycle-manager/pull/552) ([ecordell](https://github.com/ecordell))
- Add test case for supporting multi-version CRD [\#548](https://github.com/operator-framework/operator-lifecycle-manager/pull/548) ([dinhxuanvu](https://github.com/dinhxuanvu))
- Verify Native APIs Present for ClusterServiceVersion [\#541](https://github.com/operator-framework/operator-lifecycle-manager/pull/541) ([alecmerdler](https://github.com/alecmerdler))
- feat\(csv\): detect req and dep change in succeeded/failed phases [\#536](https://github.com/operator-framework/operator-lifecycle-manager/pull/536) ([njhale](https://github.com/njhale))
- Add `assign` verb to csv crd [\#531](https://github.com/operator-framework/operator-lifecycle-manager/pull/531) ([eriknelson](https://github.com/eriknelson))
- Emit Kubernetes Events for ClusterServiceVersion [\#530](https://github.com/operator-framework/operator-lifecycle-manager/pull/530) ([alecmerdler](https://github.com/alecmerdler))
- feat\(csv\): add cert rotation for owned APIServices [\#525](https://github.com/operator-framework/operator-lifecycle-manager/pull/525) ([njhale](https://github.com/njhale))
- Pass Annotations to PackageManifests [\#521](https://github.com/operator-framework/operator-lifecycle-manager/pull/521) ([alecmerdler](https://github.com/alecmerdler))
- feat\(deploy\): add debug variable to all deployments [\#518](https://github.com/operator-framework/operator-lifecycle-manager/pull/518) ([jpeeler](https://github.com/jpeeler))
- feat\(build\): allow skipping minikube during local builds [\#516](https://github.com/operator-framework/operator-lifecycle-manager/pull/516) ([jpeeler](https://github.com/jpeeler))
- Add possible nonResourceURL verbs to validation [\#511](https://github.com/operator-framework/operator-lifecycle-manager/pull/511) ([eriknelson](https://github.com/eriknelson))
- Add CatalogSource-Specific Fields to PackageManifest [\#505](https://github.com/operator-framework/operator-lifecycle-manager/pull/505) ([alecmerdler](https://github.com/alecmerdler))
- Add Runlevels + docker labels [\#502](https://github.com/operator-framework/operator-lifecycle-manager/pull/502) ([ecordell](https://github.com/ecordell))
- Make use a valid clusterPermissions rule verb [\#499](https://github.com/operator-framework/operator-lifecycle-manager/pull/499) ([wongma7](https://github.com/wongma7))
- Always Return Global `PackageManifests` [\#494](https://github.com/operator-framework/operator-lifecycle-manager/pull/494) ([alecmerdler](https://github.com/alecmerdler))
- refine cluster-admin OLM workflow [\#482](https://github.com/operator-framework/operator-lifecycle-manager/pull/482) ([madorn](https://github.com/madorn))
- add OperatorGroup [\#480](https://github.com/operator-framework/operator-lifecycle-manager/pull/480) ([jpeeler](https://github.com/jpeeler))
- feat\(olm\): add RBAC requirements check for CSVs [\#479](https://github.com/operator-framework/operator-lifecycle-manager/pull/479) ([njhale](https://github.com/njhale))
- Allow resolving ClusterRoles [\#477](https://github.com/operator-framework/operator-lifecycle-manager/pull/477) ([ecordell](https://github.com/ecordell))
- Implement Watch for `PackageManifest` API [\#476](https://github.com/operator-framework/operator-lifecycle-manager/pull/476) ([alecmerdler](https://github.com/alecmerdler))
**Fixed bugs:**
- OLM Unable to Upgrade Through Multiple Versions [\#755](https://github.com/operator-framework/operator-lifecycle-manager/issues/755)
- Subscription steps through multiple upgrades [\#761](https://github.com/operator-framework/operator-lifecycle-manager/pull/761) ([ecordell](https://github.com/ecordell))
- Fix typo - k8s version format [\#760](https://github.com/operator-framework/operator-lifecycle-manager/pull/760) ([tlwu2013](https://github.com/tlwu2013))
- fix\(csv\): properly detect apiservice and crd conflicts [\#758](https://github.com/operator-framework/operator-lifecycle-manager/pull/758) ([njhale](https://github.com/njhale))
- Emit `InstallSucceeded` Event for CSV [\#749](https://github.com/operator-framework/operator-lifecycle-manager/pull/749) ([alecmerdler](https://github.com/alecmerdler))
- fix\(packageserver\): set packageserver to IfNotPresent [\#738](https://github.com/operator-framework/operator-lifecycle-manager/pull/738) ([ecordell](https://github.com/ecordell))
- fix\(installmodes\): update support logic to match expected behavior [\#733](https://github.com/operator-framework/operator-lifecycle-manager/pull/733) ([njhale](https://github.com/njhale))
- fix\(packagesever\): add missing name field to api definition [\#731](https://github.com/operator-framework/operator-lifecycle-manager/pull/731) ([ecordell](https://github.com/ecordell))
- fix\(owners\): remove cross-namespace and cluster-\>namespace ownerrefs [\#729](https://github.com/operator-framework/operator-lifecycle-manager/pull/729) ([ecordell](https://github.com/ecordell))
- fix\(csv\): remove regex on base64 image data [\#723](https://github.com/operator-framework/operator-lifecycle-manager/pull/723) ([ecordell](https://github.com/ecordell))
- fix\(ocp\): add csv and deployment for package server [\#722](https://github.com/operator-framework/operator-lifecycle-manager/pull/722) ([jpeeler](https://github.com/jpeeler))
- Fix API Validation for `OperatorGroup` Spec [\#716](https://github.com/operator-framework/operator-lifecycle-manager/pull/716) ([alecmerdler](https://github.com/alecmerdler))
- fix\(manifests\): start manifest file names with 0000\_50\_olm\_\* [\#712](https://github.com/operator-framework/operator-lifecycle-manager/pull/712) ([njhale](https://github.com/njhale))
- fix\(olm\): properly detect cluster operator API [\#710](https://github.com/operator-framework/operator-lifecycle-manager/pull/710) ([jpeeler](https://github.com/jpeeler))
- fix\(registry-pods\): add everything toleration to registry pods [\#708](https://github.com/operator-framework/operator-lifecycle-manager/pull/708) ([njhale](https://github.com/njhale))
- Make e2e more robust [\#703](https://github.com/operator-framework/operator-lifecycle-manager/pull/703) ([jpeeler](https://github.com/jpeeler))
- fix\(deploy\): Add a component prefix to manifests [\#702](https://github.com/operator-framework/operator-lifecycle-manager/pull/702) ([smarterclayton](https://github.com/smarterclayton))
- fix\(csv\): only allow one CSV per provided API across intersecting operatorgroups [\#701](https://github.com/operator-framework/operator-lifecycle-manager/pull/701) ([njhale](https://github.com/njhale))
- fix\(olm\): Remove the "v" prefix in minKubeVersion if presents [\#699](https://github.com/operator-framework/operator-lifecycle-manager/pull/699) ([dinhxuanvu](https://github.com/dinhxuanvu))
- Fix README.md links for CRDs, Descriptor, and Package [\#695](https://github.com/operator-framework/operator-lifecycle-manager/pull/695) ([ron1](https://github.com/ron1))
- fix\(olm\): Fix CSVs api-servers battle for ownership of APIServices [\#690](https://github.com/operator-framework/operator-lifecycle-manager/pull/690) ([dinhxuanvu](https://github.com/dinhxuanvu))
- fix\(subscriptions\): fix race between subscription sync and cache [\#689](https://github.com/operator-framework/operator-lifecycle-manager/pull/689) ([njhale](https://github.com/njhale))
- fix\(reconciler\): set command in pod spec of registry images [\#688](https://github.com/operator-framework/operator-lifecycle-manager/pull/688) ([ecordell](https://github.com/ecordell))
- fix\(permissions\): Generate unique Names for permissions [\#687](https://github.com/operator-framework/operator-lifecycle-manager/pull/687) ([ecordell](https://github.com/ecordell))
- Correct URL for Package Server CSV Link [\#685](https://github.com/operator-framework/operator-lifecycle-manager/pull/685) ([alecmerdler](https://github.com/alecmerdler))
- Ensure Owner References on ConfigMaps for CatalogSources [\#681](https://github.com/operator-framework/operator-lifecycle-manager/pull/681) ([alecmerdler](https://github.com/alecmerdler))
- fix\(cm-reconciler\): query for pods to overwrite by CatalogSource label [\#680](https://github.com/operator-framework/operator-lifecycle-manager/pull/680) ([njhale](https://github.com/njhale))
- fix\(installplan\): fix bug where too many installplans can be created [\#679](https://github.com/operator-framework/operator-lifecycle-manager/pull/679) ([ecordell](https://github.com/ecordell))
- fix\(subscriptions\): respect startingCSV [\#676](https://github.com/operator-framework/operator-lifecycle-manager/pull/676) ([njhale](https://github.com/njhale))
- Add view role verbs to admin/edit role aggregation. [\#673](https://github.com/operator-framework/operator-lifecycle-manager/pull/673) ([cliles](https://github.com/cliles))
- add view rbac to 'admin' and 'edit' default roles [\#671](https://github.com/operator-framework/operator-lifecycle-manager/pull/671) ([aweiteka](https://github.com/aweiteka))
- fix\(packageserver\): don't error out when listing [\#667](https://github.com/operator-framework/operator-lifecycle-manager/pull/667) ([njhale](https://github.com/njhale))
- fix\(operatorgroups\): use copied csv for update status [\#665](https://github.com/operator-framework/operator-lifecycle-manager/pull/665) ([njhale](https://github.com/njhale))
- fix\(deploy\): add linux nodeselector [\#653](https://github.com/operator-framework/operator-lifecycle-manager/pull/653) ([ecordell](https://github.com/ecordell))
- fix\(metrics\): remove resources that aren't updated [\#637](https://github.com/operator-framework/operator-lifecycle-manager/pull/637) ([jpeeler](https://github.com/jpeeler))
- fix\(crds\): remove category all from CRDs [\#636](https://github.com/operator-framework/operator-lifecycle-manager/pull/636) ([ecordell](https://github.com/ecordell))
- fix\(charts\): add operator\_namespace to run-local values [\#634](https://github.com/operator-framework/operator-lifecycle-manager/pull/634) ([njhale](https://github.com/njhale))
- fix\(deploy\): add runlevel to openshift-operators [\#620](https://github.com/operator-framework/operator-lifecycle-manager/pull/620) ([ecordell](https://github.com/ecordell))
- fix\(olm\): don't annotate target namespace on copied CSVs [\#616](https://github.com/operator-framework/operator-lifecycle-manager/pull/616) ([jpeeler](https://github.com/jpeeler))
- fix\(e2e\): make operator group test more robust [\#603](https://github.com/operator-framework/operator-lifecycle-manager/pull/603) ([jpeeler](https://github.com/jpeeler))
- fix\(e2e\): fix CSV tests [\#599](https://github.com/operator-framework/operator-lifecycle-manager/pull/599) ([njhale](https://github.com/njhale))
- Fix a typo in documentation [\#596](https://github.com/operator-framework/operator-lifecycle-manager/pull/596) ([smanpathak](https://github.com/smanpathak))
- Fix `client-go` Dependency [\#592](https://github.com/operator-framework/operator-lifecycle-manager/pull/592) ([alecmerdler](https://github.com/alecmerdler))
- Fix Panic in PackageManifest Server [\#590](https://github.com/operator-framework/operator-lifecycle-manager/pull/590) ([alecmerdler](https://github.com/alecmerdler))
- fix\(annotations\): merge CSV and pod template annotations when installing deployments [\#585](https://github.com/operator-framework/operator-lifecycle-manager/pull/585) ([njhale](https://github.com/njhale))
- Add `packagemanifest:aggregated-apiserver-clusterrole` [\#583](https://github.com/operator-framework/operator-lifecycle-manager/pull/583) ([alecmerdler](https://github.com/alecmerdler))
- fix\(csv-requeue\): requeue on namespace all if all namespaces are watched [\#572](https://github.com/operator-framework/operator-lifecycle-manager/pull/572) ([njhale](https://github.com/njhale))
- Fix Go Modules [\#561](https://github.com/operator-framework/operator-lifecycle-manager/pull/561) ([alecmerdler](https://github.com/alecmerdler))
- fix\(queueInformers\): use separate queue for each namespace [\#560](https://github.com/operator-framework/operator-lifecycle-manager/pull/560) ([njhale](https://github.com/njhale))
- fix\(olm\): set lister up for deployments [\#550](https://github.com/operator-framework/operator-lifecycle-manager/pull/550) ([jpeeler](https://github.com/jpeeler))
- Fix olm pod name so log to be written correctly to olm.log [\#549](https://github.com/operator-framework/operator-lifecycle-manager/pull/549) ([dinhxuanvu](https://github.com/dinhxuanvu))
- Fix template values for 'olm' parameters [\#537](https://github.com/operator-framework/operator-lifecycle-manager/pull/537) ([rhuss](https://github.com/rhuss))
- fix\(e2e\): switch to port 5443 for owned apiservice test [\#527](https://github.com/operator-framework/operator-lifecycle-manager/pull/527) ([njhale](https://github.com/njhale))
- fix\(package-server\): make secure port configurable and default to 5443 [\#524](https://github.com/operator-framework/operator-lifecycle-manager/pull/524) ([ecordell](https://github.com/ecordell))
- fix\(olm\): don't annotate namespaces on operator creation [\#523](https://github.com/operator-framework/operator-lifecycle-manager/pull/523) ([ecordell](https://github.com/ecordell))
- fix\(build\): add cleanup for package release files [\#517](https://github.com/operator-framework/operator-lifecycle-manager/pull/517) ([jpeeler](https://github.com/jpeeler))
- fix\(metrics\): use CRD client instead of k8s [\#515](https://github.com/operator-framework/operator-lifecycle-manager/pull/515) ([jpeeler](https://github.com/jpeeler))
- fix\(requirements\): add support for non resource url rules [\#514](https://github.com/operator-framework/operator-lifecycle-manager/pull/514) ([njhale](https://github.com/njhale))
- Rewrite Namespace when Fetching Global PackageManifests [\#513](https://github.com/operator-framework/operator-lifecycle-manager/pull/513) ([alecmerdler](https://github.com/alecmerdler))
- Small log statement fix with olm requirements [\#508](https://github.com/operator-framework/operator-lifecycle-manager/pull/508) ([eriknelson](https://github.com/eriknelson))
- fix\(requirements\): remove extra empty permission requirement statuses [\#506](https://github.com/operator-framework/operator-lifecycle-manager/pull/506) ([njhale](https://github.com/njhale))
- fix\(ci\): use deployment namespace for catalog\_namespace [\#504](https://github.com/operator-framework/operator-lifecycle-manager/pull/504) ([njhale](https://github.com/njhale))
- fix 30\_14-imagestream.yaml formatting [\#500](https://github.com/operator-framework/operator-lifecycle-manager/pull/500) ([mrogers950](https://github.com/mrogers950))
- Add missing binary to fix run-local-shift [\#497](https://github.com/operator-framework/operator-lifecycle-manager/pull/497) ([font](https://github.com/font))
- feat\(csv\): install owned APIServices [\#492](https://github.com/operator-framework/operator-lifecycle-manager/pull/492) ([njhale](https://github.com/njhale))
- fix\(build\): don't vendor [\#490](https://github.com/operator-framework/operator-lifecycle-manager/pull/490) ([ecordell](https://github.com/ecordell))
- fix\(deploy\): yaml error in imagereferences [\#486](https://github.com/operator-framework/operator-lifecycle-manager/pull/486) ([ecordell](https://github.com/ecordell))
- fix\(deploy\): only use replace on configmaps [\#483](https://github.com/operator-framework/operator-lifecycle-manager/pull/483) ([ecordell](https://github.com/ecordell))
**Closed issues:**
- Continuous Delivery via OLM [\#742](https://github.com/operator-framework/operator-lifecycle-manager/issues/742)
- Operator install plans fail w/repeat count regexp parsing error with OLM 0.8.1+ on OCP 3.11 [\#735](https://github.com/operator-framework/operator-lifecycle-manager/issues/735)
- Generated aggregated cluster roles contains group id in resource name [\#730](https://github.com/operator-framework/operator-lifecycle-manager/issues/730)
- package server panic when deploying operator source from operator-marketplace [\#728](https://github.com/operator-framework/operator-lifecycle-manager/issues/728)
- Help needed with CR display name [\#725](https://github.com/operator-framework/operator-lifecycle-manager/issues/725)
- OLM complains with "Policy rule not satisfied for service account" [\#724](https://github.com/operator-framework/operator-lifecycle-manager/issues/724)
- operator does not upgrade packagserver [\#706](https://github.com/operator-framework/operator-lifecycle-manager/issues/706)
- Unable to deploy OLM on minishift [\#705](https://github.com/operator-framework/operator-lifecycle-manager/issues/705)
- How to enable all workspaces so that OLM watches them? [\#698](https://github.com/operator-framework/operator-lifecycle-manager/issues/698)
- README links to CRDs, Descriptors, and Packages are broken [\#694](https://github.com/operator-framework/operator-lifecycle-manager/issues/694)
- MountVolume.SetUp failed for volume "config-volume" : secrets "alertmanager-alertmanager-main" not found [\#648](https://github.com/operator-framework/operator-lifecycle-manager/issues/648)
- Use CR definitions instead of configmap data [\#644](https://github.com/operator-framework/operator-lifecycle-manager/issues/644)
- `make run-local` for Minikube fails with "packageserver" deployment failure [\#642](https://github.com/operator-framework/operator-lifecycle-manager/issues/642)
- Question: instalingl the OLM via the CVO [\#628](https://github.com/operator-framework/operator-lifecycle-manager/issues/628)
- change catalog operator default namespace [\#627](https://github.com/operator-framework/operator-lifecycle-manager/issues/627)
- manifests: OLM is creating a namespace without run-level [\#619](https://github.com/operator-framework/operator-lifecycle-manager/issues/619)
- Non operator-sdk operators [\#610](https://github.com/operator-framework/operator-lifecycle-manager/issues/610)
- Unable to retrieve pull secret openshift-operator-lifecycle-manager/coreos-pull-secret for openshift-operator-lifecycle-manager/olm-operator... [\#607](https://github.com/operator-framework/operator-lifecycle-manager/issues/607)
- package-server pod keeps crashing [\#598](https://github.com/operator-framework/operator-lifecycle-manager/issues/598)
- OLM GUI does not have permission to list its OLM CRDs [\#597](https://github.com/operator-framework/operator-lifecycle-manager/issues/597)
- OLM compatibility with cluster monitoring Operator [\#581](https://github.com/operator-framework/operator-lifecycle-manager/issues/581)
- ClusterRoleBinding against aggregated-apiserver-clusterrole without role manifest [\#577](https://github.com/operator-framework/operator-lifecycle-manager/issues/577)
- Failed to update catalog source `rh-operators` status [\#544](https://github.com/operator-framework/operator-lifecycle-manager/issues/544)
- Latest console image is broken [\#540](https://github.com/operator-framework/operator-lifecycle-manager/issues/540)
- Question: How to "enable" the OLM in the console for Minishift ? [\#538](https://github.com/operator-framework/operator-lifecycle-manager/issues/538)
- olm-operator local run expects `master` image tag which is unavailable [\#529](https://github.com/operator-framework/operator-lifecycle-manager/issues/529)
- CSV waits for wrong CRD version to be available [\#507](https://github.com/operator-framework/operator-lifecycle-manager/issues/507)
- Add support for specifying multiple CRDs in one yaml file [\#495](https://github.com/operator-framework/operator-lifecycle-manager/issues/495)
- make run-local-shift fails on minishift due to the lack of helm [\#488](https://github.com/operator-framework/operator-lifecycle-manager/issues/488)
- CSV support for ClusterRoles [\#473](https://github.com/operator-framework/operator-lifecycle-manager/issues/473)
**Other changes:**
- docs\(git\): add changelog [\#765](https://github.com/operator-framework/operator-lifecycle-manager/pull/765) ([njhale](https://github.com/njhale))
- chore\(manifests\): regenerate manifests to include new anyOf validation [\#744](https://github.com/operator-framework/operator-lifecycle-manager/pull/744) ([njhale](https://github.com/njhale))
- docs: update CSV link [\#713](https://github.com/operator-framework/operator-lifecycle-manager/pull/713) ([robszumski](https://github.com/robszumski))
- chore\(deploy\): use downstream image for openshift builds [\#693](https://github.com/operator-framework/operator-lifecycle-manager/pull/693) ([ecordell](https://github.com/ecordell))
- chore\(modules\): update operator-registry module to v1.0.6 [\#691](https://github.com/operator-framework/operator-lifecycle-manager/pull/691) ([njhale](https://github.com/njhale))
- Add godoc for InstallModeTypes [\#683](https://github.com/operator-framework/operator-lifecycle-manager/pull/683) ([pmorie](https://github.com/pmorie))
- chore\(deploy\): change 30 prefix to 50 [\#678](https://github.com/operator-framework/operator-lifecycle-manager/pull/678) ([ecordell](https://github.com/ecordell))
- Cut 0.8.1 [\#662](https://github.com/operator-framework/operator-lifecycle-manager/pull/662) ([ecordell](https://github.com/ecordell))
- Update metering InstallModes to support SingleNamespace [\#658](https://github.com/operator-framework/operator-lifecycle-manager/pull/658) ([chancez](https://github.com/chancez))
- chore\(ci\): remove ci checks for PRs [\#654](https://github.com/operator-framework/operator-lifecycle-manager/pull/654) ([ecordell](https://github.com/ecordell))
- Remove operatorsource installation [\#651](https://github.com/operator-framework/operator-lifecycle-manager/pull/651) ([kevinrizza](https://github.com/kevinrizza))
- update Service Catalog memory & cpu limits [\#649](https://github.com/operator-framework/operator-lifecycle-manager/pull/649) ([jboyd01](https://github.com/jboyd01))
- Add Metering Operator to catalog [\#647](https://github.com/operator-framework/operator-lifecycle-manager/pull/647) ([EmilyM1](https://github.com/EmilyM1))
- fix 1663113. Add component image ENV vars to cluster-logging-operator [\#645](https://github.com/operator-framework/operator-lifecycle-manager/pull/645) ([jcantrill](https://github.com/jcantrill))
- Updated MongoDB Operator to 0.6 [\#641](https://github.com/operator-framework/operator-lifecycle-manager/pull/641) ([rodrigovalin](https://github.com/rodrigovalin))
- chore\(fake\): add fake for registry client [\#630](https://github.com/operator-framework/operator-lifecycle-manager/pull/630) ([ecordell](https://github.com/ecordell))
- increase memory limits on all service catalog pods [\#629](https://github.com/operator-framework/operator-lifecycle-manager/pull/629) ([jboyd01](https://github.com/jboyd01))
- Install operatorsource crd and default cr [\#622](https://github.com/operator-framework/operator-lifecycle-manager/pull/622) ([kevinrizza](https://github.com/kevinrizza))
- Updated CSV marketplace manifests [\#621](https://github.com/operator-framework/operator-lifecycle-manager/pull/621) ([ecordell](https://github.com/ecordell))
- add rbac for servicebindings/finalizers [\#615](https://github.com/operator-framework/operator-lifecycle-manager/pull/615) ([jboyd01](https://github.com/jboyd01))
- Regen manifests [\#611](https://github.com/operator-framework/operator-lifecycle-manager/pull/611) ([ecordell](https://github.com/ecordell))
- Descheduler operator CRD, CSV [\#584](https://github.com/operator-framework/operator-lifecycle-manager/pull/584) ([ravisantoshgudimetla](https://github.com/ravisantoshgudimetla))
- chore\(docs\): fix markdown lint warnings [\#574](https://github.com/operator-framework/operator-lifecycle-manager/pull/574) ([jpeeler](https://github.com/jpeeler))
- docs\(arch\): add operator groups [\#573](https://github.com/operator-framework/operator-lifecycle-manager/pull/573) ([jpeeler](https://github.com/jpeeler))
- Creating csv, crd, and packages for clusterlogging and elasticsearch … [\#570](https://github.com/operator-framework/operator-lifecycle-manager/pull/570) ([jcantrill](https://github.com/jcantrill))
- Cut 0.8.0 [\#567](https://github.com/operator-framework/operator-lifecycle-manager/pull/567) ([ecordell](https://github.com/ecordell))
- chore\(build\): remove vendor commands from base dockerfile [\#566](https://github.com/operator-framework/operator-lifecycle-manager/pull/566) ([ecordell](https://github.com/ecordell))
- chore\(release\): bump version to 0.8.0 [\#565](https://github.com/operator-framework/operator-lifecycle-manager/pull/565) ([ecordell](https://github.com/ecordell))
- Service Catalog CSV: update resource limitations [\#562](https://github.com/operator-framework/operator-lifecycle-manager/pull/562) ([jboyd01](https://github.com/jboyd01))
- Update AMQ logo to new brand standard [\#547](https://github.com/operator-framework/operator-lifecycle-manager/pull/547) ([rhamilto](https://github.com/rhamilto))
- use OpenShift's ServiceCatalog build, update rbac + more [\#545](https://github.com/operator-framework/operator-lifecycle-manager/pull/545) ([jboyd01](https://github.com/jboyd01))
- Add Service to Owned Resources for Prometheus Operator [\#539](https://github.com/operator-framework/operator-lifecycle-manager/pull/539) ([alecmerdler](https://github.com/alecmerdler))
- Add `ConfigMap` as an Owned Resource for Prometheus [\#535](https://github.com/operator-framework/operator-lifecycle-manager/pull/535) ([alecmerdler](https://github.com/alecmerdler))
- chore\(release\): cut release 0.7.4 [\#534](https://github.com/operator-framework/operator-lifecycle-manager/pull/534) ([njhale](https://github.com/njhale))
- chore\(release\): bump version number to 0.7.4 [\#533](https://github.com/operator-framework/operator-lifecycle-manager/pull/533) ([njhale](https://github.com/njhale))
- \[Doc\] Add note about helm binary requirement [\#528](https://github.com/operator-framework/operator-lifecycle-manager/pull/528) ([aditya-konarde](https://github.com/aditya-konarde))
- chore\(package-server\): enable auth by default [\#526](https://github.com/operator-framework/operator-lifecycle-manager/pull/526) ([njhale](https://github.com/njhale))
- More Descriptors for Dynatrace Operator [\#522](https://github.com/operator-framework/operator-lifecycle-manager/pull/522) ([alecmerdler](https://github.com/alecmerdler))
- More Descriptors for Couchbase Operator [\#520](https://github.com/operator-framework/operator-lifecycle-manager/pull/520) ([alecmerdler](https://github.com/alecmerdler))
- Update OKD install instructions [\#519](https://github.com/operator-framework/operator-lifecycle-manager/pull/519) ([font](https://github.com/font))
- bump 0.7.3 [\#512](https://github.com/operator-framework/operator-lifecycle-manager/pull/512) ([ecordell](https://github.com/ecordell))
- chore\(deploy\): update release scripts to not use 3 different images [\#510](https://github.com/operator-framework/operator-lifecycle-manager/pull/510) ([ecordell](https://github.com/ecordell))
- chore\(deploy\): put all binaries in the same image [\#509](https://github.com/operator-framework/operator-lifecycle-manager/pull/509) ([ecordell](https://github.com/ecordell))
- chore\(deploy\): use runlevel label on namespace [\#503](https://github.com/operator-framework/operator-lifecycle-manager/pull/503) ([smarterclayton](https://github.com/smarterclayton))
- add example and additional info about optional CSV metadata \(replace… [\#501](https://github.com/operator-framework/operator-lifecycle-manager/pull/501) ([madorn](https://github.com/madorn))
- Fix for Federation CSV [\#498](https://github.com/operator-framework/operator-lifecycle-manager/pull/498) ([alecmerdler](https://github.com/alecmerdler))
- Dropping label for origin releasepayload [\#496](https://github.com/operator-framework/operator-lifecycle-manager/pull/496) ([abhinavdahiya](https://github.com/abhinavdahiya))
- fix\(deploy\): add base images for ci-operator [\#493](https://github.com/operator-framework/operator-lifecycle-manager/pull/493) ([ecordell](https://github.com/ecordell))
- rh-operators: add FederationV2 v0.0.2 [\#491](https://github.com/operator-framework/operator-lifecycle-manager/pull/491) ([font](https://github.com/font))
- change go build to use 1.10 [\#489](https://github.com/operator-framework/operator-lifecycle-manager/pull/489) ([jpeeler](https://github.com/jpeeler))
- Revert "fix\(deploy\): remove package-server until ci-operator is outpu… [\#487](https://github.com/operator-framework/operator-lifecycle-manager/pull/487) ([ecordell](https://github.com/ecordell))
- re-cut 0.7.1 [\#484](https://github.com/operator-framework/operator-lifecycle-manager/pull/484) ([ecordell](https://github.com/ecordell))
- certified-operators: rectify form of business for Dynatrace [\#478](https://github.com/operator-framework/operator-lifecycle-manager/pull/478) ([baichinger](https://github.com/baichinger))
- Cut 0.7.1 [\#475](https://github.com/operator-framework/operator-lifecycle-manager/pull/475) ([ecordell](https://github.com/ecordell))
## [0.7.1](https://github.com/operator-framework/operator-lifecycle-manager/tree/0.7.1) (2018-09-19)
[Full Changelog](https://github.com/operator-framework/operator-lifecycle-manager/compare/0.7.0...0.7.1)
**Closed issues:**
- Tag "master" of quay.io/coreos/package-server does not exist [\#471](https://github.com/operator-framework/operator-lifecycle-manager/issues/471)
**Other changes:**
- feat\(ci\): push tags to quay [\#474](https://github.com/operator-framework/operator-lifecycle-manager/pull/474) ([ecordell](https://github.com/ecordell))
## [0.7.0](https://github.com/operator-framework/operator-lifecycle-manager/tree/0.7.0) (2018-09-19)
[Full Changelog](https://github.com/operator-framework/operator-lifecycle-manager/compare/v3.11.0...0.7.0)
**Closed issues:**
- minishift steps need to be updated for service catalog [\#469](https://github.com/operator-framework/operator-lifecycle-manager/issues/469)
- The `--service-catalog` clusterup option is invalid for OS 3.10+ [\#468](https://github.com/operator-framework/operator-lifecycle-manager/issues/468)
- `The ConfigMap "rh-operators" is invalid` while installing on upstream k8s [\#467](https://github.com/operator-framework/operator-lifecycle-manager/issues/467)
**Other changes:**
- 0.7.0 packages api [\#472](https://github.com/operator-framework/operator-lifecycle-manager/pull/472) ([ecordell](https://github.com/ecordell))
- Issue \#469 update the service catalog instruction for minishift. [\#470](https://github.com/operator-framework/operator-lifecycle-manager/pull/470) ([praveenkumar](https://github.com/praveenkumar))
- Documentation: Change to full path [\#466](https://github.com/operator-framework/operator-lifecycle-manager/pull/466) ([LiliC](https://github.com/LiliC))
- All Namespaces Support for PackageManifest API [\#465](https://github.com/operator-framework/operator-lifecycle-manager/pull/465) ([alecmerdler](https://github.com/alecmerdler))
- Create OWNERS [\#464](https://github.com/operator-framework/operator-lifecycle-manager/pull/464) ([ecordell](https://github.com/ecordell))
- test\(e2e\): add OwnerReference GC behavior test [\#463](https://github.com/operator-framework/operator-lifecycle-manager/pull/463) ([njhale](https://github.com/njhale))
- Cut 0.7.0 [\#462](https://github.com/operator-framework/operator-lifecycle-manager/pull/462) ([ecordell](https://github.com/ecordell))
- List out options for Descriptors [\#461](https://github.com/operator-framework/operator-lifecycle-manager/pull/461) ([madorn](https://github.com/madorn))
- cut 0.7.0 images [\#460](https://github.com/operator-framework/operator-lifecycle-manager/pull/460) ([ecordell](https://github.com/ecordell))
- feat\(olm\): support depending on APIservices [\#459](https://github.com/operator-framework/operator-lifecycle-manager/pull/459) ([ecordell](https://github.com/ecordell))
- Improvements on the MongoDB Enterprise Operator CSV. [\#458](https://github.com/operator-framework/operator-lifecycle-manager/pull/458) ([rodrigovalin](https://github.com/rodrigovalin))
- \[WIP\] feat\(catalog\): add RBAC step resolution [\#457](https://github.com/operator-framework/operator-lifecycle-manager/pull/457) ([njhale](https://github.com/njhale))
- add metrics [\#452](https://github.com/operator-framework/operator-lifecycle-manager/pull/452) ([jpeeler](https://github.com/jpeeler))
## [v3.11.0](https://github.com/operator-framework/operator-lifecycle-manager/tree/v3.11.0) (2018-09-13)
[Full Changelog](https://github.com/operator-framework/operator-lifecycle-manager/compare/0.6.0...v3.11.0)
**Implemented enhancements:**
- Instructions for Running OLM UI [\#431](https://github.com/operator-framework/operator-lifecycle-manager/pull/431) ([alecmerdler](https://github.com/alecmerdler))
- Action Descriptors [\#426](https://github.com/operator-framework/operator-lifecycle-manager/pull/426) ([alecmerdler](https://github.com/alecmerdler))
**Fixed bugs:**
- `./scripts/run\_console\_local.sh` doesn't provide a usable console with `make run-local` or `make run-local-shift` [\#437](https://github.com/operator-framework/operator-lifecycle-manager/issues/437)
**Closed issues:**
- Can't deploy OLM onto OpenShift [\#436](https://github.com/operator-framework/operator-lifecycle-manager/issues/436)
- Creation of CRD defined in 05-catalogsource.crd.yaml fails [\#416](https://github.com/operator-framework/operator-lifecycle-manager/issues/416)
**Other changes:**
- fix\(vendor\): revendor dependencies [\#456](https://github.com/operator-framework/operator-lifecycle-manager/pull/456) ([njhale](https://github.com/njhale))
- fix\(vendor\): revendor dependencies [\#455](https://github.com/operator-framework/operator-lifecycle-manager/pull/455) ([njhale](https://github.com/njhale))
- feat\(olm\): attempt to cleanup namespace annotations on shutdown [\#454](https://github.com/operator-framework/operator-lifecycle-manager/pull/454) ([ecordell](https://github.com/ecordell))
- feat\(olm\): attempt to cleanup namespace annotations on shutdown [\#453](https://github.com/operator-framework/operator-lifecycle-manager/pull/453) ([ecordell](https://github.com/ecordell))
- \[WIP\] fix\(ci\): add pipeline queueing [\#451](https://github.com/operator-framework/operator-lifecycle-manager/pull/451) ([njhale](https://github.com/njhale))
- fix\(prometheus\): add securityContext to alertmanager example [\#450](https://github.com/operator-framework/operator-lifecycle-manager/pull/450) ([ecordell](https://github.com/ecordell))
- docs: install OLM with 'kubectl create -f' instead of 'kubectl apply -f' [\#449](https://github.com/operator-framework/operator-lifecycle-manager/pull/449) ([djwhatle](https://github.com/djwhatle))
- feat\(cmd\): add version flags [\#448](https://github.com/operator-framework/operator-lifecycle-manager/pull/448) ([njhale](https://github.com/njhale))
- feat\(catalog\): add aggregated roles for each resolved CRD [\#447](https://github.com/operator-framework/operator-lifecycle-manager/pull/447) ([ecordell](https://github.com/ecordell))
- docs: add CSV guide [\#446](https://github.com/operator-framework/operator-lifecycle-manager/pull/446) ([robszumski](https://github.com/robszumski))
- feat\(run\_console\_local\): add docker pull for console image [\#445](https://github.com/operator-framework/operator-lifecycle-manager/pull/445) ([njhale](https://github.com/njhale))
- chore\(rbac\): add olm-specific ClusterRole [\#444](https://github.com/operator-framework/operator-lifecycle-manager/pull/444) ([ecordell](https://github.com/ecordell))
- Remove outdated install instructions [\#443](https://github.com/operator-framework/operator-lifecycle-manager/pull/443) ([ecordell](https://github.com/ecordell))
- chore\(deploy\): remove ansible scripts for install [\#442](https://github.com/operator-framework/operator-lifecycle-manager/pull/442) ([ecordell](https://github.com/ecordell))
- Fix Local Console Script with Minikube [\#441](https://github.com/operator-framework/operator-lifecycle-manager/pull/441) ([alecmerdler](https://github.com/alecmerdler))
- Add/update CSVs for RH and partners [\#440](https://github.com/operator-framework/operator-lifecycle-manager/pull/440) ([robszumski](https://github.com/robszumski))
- feat\(osbs\): add non-multistage dockerfile [\#439](https://github.com/operator-framework/operator-lifecycle-manager/pull/439) ([njhale](https://github.com/njhale))
- Remove broker files for repo migration [\#438](https://github.com/operator-framework/operator-lifecycle-manager/pull/438) ([jpeeler](https://github.com/jpeeler))
- Allow use of existing KUBECONFIG env var for e2e [\#435](https://github.com/operator-framework/operator-lifecycle-manager/pull/435) ([jpeeler](https://github.com/jpeeler))
- \[WIP\] Package Extension API Server [\#433](https://github.com/operator-framework/operator-lifecycle-manager/pull/433) ([njhale](https://github.com/njhale))
- Separate internal API client wrappers from generated external clients [\#432](https://github.com/operator-framework/operator-lifecycle-manager/pull/432) ([ecordell](https://github.com/ecordell))
- Catalog renaming OCS -\> rh-operators [\#429](https://github.com/operator-framework/operator-lifecycle-manager/pull/429) ([ecordell](https://github.com/ecordell))
- Red Hat CD Readiness [\#428](https://github.com/operator-framework/operator-lifecycle-manager/pull/428) ([njhale](https://github.com/njhale))
- test\(e2e\): add catalog loading between operator restart test [\#427](https://github.com/operator-framework/operator-lifecycle-manager/pull/427) ([njhale](https://github.com/njhale))
- updated prometheus csv according to the integration into openshift [\#425](https://github.com/operator-framework/operator-lifecycle-manager/pull/425) ([sichvoge](https://github.com/sichvoge))
- fix\(catalog\): add check for loaded catalogs [\#424](https://github.com/operator-framework/operator-lifecycle-manager/pull/424) ([njhale](https://github.com/njhale))
- feat\(subscription\): add dedicated currentCSV field to subscription [\#423](https://github.com/operator-framework/operator-lifecycle-manager/pull/423) ([njhale](https://github.com/njhale))
- fix\(operatorclient\): remove wait for deployment rollout [\#422](https://github.com/operator-framework/operator-lifecycle-manager/pull/422) ([njhale](https://github.com/njhale))
- fix\(e2e\): remove deployment cleanup [\#421](https://github.com/operator-framework/operator-lifecycle-manager/pull/421) ([njhale](https://github.com/njhale))
- Set Status on Subscription with Invalid Catalog Source [\#420](https://github.com/operator-framework/operator-lifecycle-manager/pull/420) ([alecmerdler](https://github.com/alecmerdler))
- feat\(catalog\): add catalog status block updates [\#419](https://github.com/operator-framework/operator-lifecycle-manager/pull/419) ([njhale](https://github.com/njhale))
- Deployment fixes [\#418](https://github.com/operator-framework/operator-lifecycle-manager/pull/418) ([ecordell](https://github.com/ecordell))
- Fixes \#416 [\#417](https://github.com/operator-framework/operator-lifecycle-manager/pull/417) ([mvazquezc](https://github.com/mvazquezc))
- \[WIP\] feat\(catalog\): add namespace awareness to plan execution [\#415](https://github.com/operator-framework/operator-lifecycle-manager/pull/415) ([njhale](https://github.com/njhale))
- Make key resources linkable in philosophy doc [\#414](https://github.com/operator-framework/operator-lifecycle-manager/pull/414) ([pmorie](https://github.com/pmorie))
- Cut 0.6.0 [\#413](https://github.com/operator-framework/operator-lifecycle-manager/pull/413) ([ecordell](https://github.com/ecordell))
- \[WIP\] feat\(resolver\): add namespace and channel awareness [\#402](https://github.com/operator-framework/operator-lifecycle-manager/pull/402) ([njhale](https://github.com/njhale))
## [0.6.0](https://github.com/operator-framework/operator-lifecycle-manager/tree/0.6.0) (2018-08-07)
[Full Changelog](https://github.com/operator-framework/operator-lifecycle-manager/compare/0.5.0...0.6.0)
**Closed issues:**
- Attempting upstream installation against kube \>= 1.11 fails with validation errors [\#400](https://github.com/operator-framework/operator-lifecycle-manager/issues/400)
- make schema-check fails with missing vendored dependency [\#389](https://github.com/operator-framework/operator-lifecycle-manager/issues/389)
**Other changes:**
- chore\(vendor\): remove vendor directory [\#412](https://github.com/operator-framework/operator-lifecycle-manager/pull/412) ([ecordell](https://github.com/ecordell))
- fix\(e2e\) + fix\(csv upgrade\) + fix\(leak\) [\#411](https://github.com/operator-framework/operator-lifecycle-manager/pull/411) ([ecordell](https://github.com/ecordell))
- minor: broken link [\#408](https://github.com/operator-framework/operator-lifecycle-manager/pull/408) ([Jiri-Kremser](https://github.com/Jiri-Kremser))
- fix\(csv\): CSV update process optimization regression [\#407](https://github.com/operator-framework/operator-lifecycle-manager/pull/407) ([ecordell](https://github.com/ecordell))
- Use GroupVersionKind in Spec Descriptor for Selector [\#405](https://github.com/operator-framework/operator-lifecycle-manager/pull/405) ([alecmerdler](https://github.com/alecmerdler))
- Catalog Cleanup [\#404](https://github.com/operator-framework/operator-lifecycle-manager/pull/404) ([ecordell](https://github.com/ecordell))
- 1.11 updates [\#403](https://github.com/operator-framework/operator-lifecycle-manager/pull/403) ([ecordell](https://github.com/ecordell))
- test\(catalog\_versions\): add multi-catalogsource aware resolution tests [\#401](https://github.com/operator-framework/operator-lifecycle-manager/pull/401) ([njhale](https://github.com/njhale))
- Add CatalogSource Namespace To CRD Validation [\#399](https://github.com/operator-framework/operator-lifecycle-manager/pull/399) ([alecmerdler](https://github.com/alecmerdler))
- chore\(deps\): update k8s libs to 1.11 versions [\#398](https://github.com/operator-framework/operator-lifecycle-manager/pull/398) ([ecordell](https://github.com/ecordell))
- docs: add access control workflow [\#397](https://github.com/operator-framework/operator-lifecycle-manager/pull/397) ([robszumski](https://github.com/robszumski))
- test\(installplan\): multi source e2e [\#396](https://github.com/operator-framework/operator-lifecycle-manager/pull/396) ([njhale](https://github.com/njhale))
- chore\(ci\): update gitlab-ci.yml from jsonnet [\#395](https://github.com/operator-framework/operator-lifecycle-manager/pull/395) ([ecordell](https://github.com/ecordell))
- fix\(installplan\): type InstallPlan.Status.CatalogSources to \[\]string … [\#394](https://github.com/operator-framework/operator-lifecycle-manager/pull/394) ([njhale](https://github.com/njhale))
- Deploy CI to `operator-lifecycle-manager` Namespace [\#393](https://github.com/operator-framework/operator-lifecycle-manager/pull/393) ([alecmerdler](https://github.com/alecmerdler))
- Fix invalid `minishift start` option in build\_local\_shift.sh [\#392](https://github.com/operator-framework/operator-lifecycle-manager/pull/392) ([jsm84](https://github.com/jsm84))
- vendor: re-run `dep ensure` [\#390](https://github.com/operator-framework/operator-lifecycle-manager/pull/390) ([jzelinskie](https://github.com/jzelinskie))
- feat\(catalog\_resources\): add prometheus 0.22.1 [\#388](https://github.com/operator-framework/operator-lifecycle-manager/pull/388) ([ecordell](https://github.com/ecordell))
- feat\(catalog\): multiple CatalogSource resolution [\#386](https://github.com/operator-framework/operator-lifecycle-manager/pull/386) ([njhale](https://github.com/njhale))
## [0.5.0](https://github.com/operator-framework/operator-lifecycle-manager/tree/0.5.0) (2018-07-23)
[Full Changelog](https://github.com/operator-framework/operator-lifecycle-manager/compare/0.4.0...0.5.0)
**Implemented enhancements:**
- Separate Manifest Validator [\#365](https://github.com/operator-framework/operator-lifecycle-manager/pull/365) ([alecmerdler](https://github.com/alecmerdler))
- Manual Approval of Install Plans [\#347](https://github.com/operator-framework/operator-lifecycle-manager/pull/347) ([alecmerdler](https://github.com/alecmerdler))
- GitLab CI Fix [\#341](https://github.com/operator-framework/operator-lifecycle-manager/pull/341) ([alecmerdler](https://github.com/alecmerdler))
**Fixed bugs:**
- Prevent ownership conflicts for CRDs [\#375](https://github.com/operator-framework/operator-lifecycle-manager/pull/375) ([ecordell](https://github.com/ecordell))
- Fix InstallPlanReference in Subscription Status [\#359](https://github.com/operator-framework/operator-lifecycle-manager/pull/359) ([ecordell](https://github.com/ecordell))
- Fix Subscriptions being Updated Without Changes [\#357](https://github.com/operator-framework/operator-lifecycle-manager/pull/357) ([alecmerdler](https://github.com/alecmerdler))
- Slack Webhook Rename [\#337](https://github.com/operator-framework/operator-lifecycle-manager/pull/337) ([alecmerdler](https://github.com/alecmerdler))
- Clarify Status Condition when Catalog Source Not Found [\#335](https://github.com/operator-framework/operator-lifecycle-manager/pull/335) ([alecmerdler](https://github.com/alecmerdler))
**Closed issues:**
- Typos in architecture doc [\#376](https://github.com/operator-framework/operator-lifecycle-manager/issues/376)
- make manifests error when rendering templates with helm [\#369](https://github.com/operator-framework/operator-lifecycle-manager/issues/369)
- make run-local missing charts [\#368](https://github.com/operator-framework/operator-lifecycle-manager/issues/368)
- make run-local is broken [\#366](https://github.com/operator-framework/operator-lifecycle-manager/issues/366)
- make build fails [\#360](https://github.com/operator-framework/operator-lifecycle-manager/issues/360)
- vendoring fails when run `make vendor` [\#350](https://github.com/operator-framework/operator-lifecycle-manager/issues/350)
**Other changes:**
- fix SingleSourceResolver to use plan namespace for resolved CSVs [\#387](https://github.com/operator-framework/operator-lifecycle-manager/pull/387) ([njhale](https://github.com/njhale))
- Add CatalogSource Name to InstallPlan Steps [\#385](https://github.com/operator-framework/operator-lifecycle-manager/pull/385) ([njhale](https://github.com/njhale))
- Fix OpenShift Deploy CI [\#384](https://github.com/operator-framework/operator-lifecycle-manager/pull/384) ([alecmerdler](https://github.com/alecmerdler))
- Deploy to OpenShift Cluster [\#383](https://github.com/operator-framework/operator-lifecycle-manager/pull/383) ([alecmerdler](https://github.com/alecmerdler))
- fix\(docs\): correct spelling of philosopy.md [\#382](https://github.com/operator-framework/operator-lifecycle-manager/pull/382) ([aravindhp](https://github.com/aravindhp))
- Cut 0.5.0 [\#381](https://github.com/operator-framework/operator-lifecycle-manager/pull/381) ([ecordell](https://github.com/ecordell))
- Add CatalogSource Namespace to Subscription Objects [\#380](https://github.com/operator-framework/operator-lifecycle-manager/pull/380) ([alecmerdler](https://github.com/alecmerdler))
- fix\(docs\): typos in architecture.md [\#377](https://github.com/operator-framework/operator-lifecycle-manager/pull/377) ([ecordell](https://github.com/ecordell))
- Fix a small typo [\#374](https://github.com/operator-framework/operator-lifecycle-manager/pull/374) ([fabiand](https://github.com/fabiand))
- fix\(servicebroker\): add main\_test.go to servicebroker cmd [\#372](https://github.com/operator-framework/operator-lifecycle-manager/pull/372) ([ecordell](https://github.com/ecordell))
- fix\(make\): fixes run-local and run-local-shift [\#371](https://github.com/operator-framework/operator-lifecycle-manager/pull/371) ([ecordell](https://github.com/ecordell))
- Remove tectonic-operators [\#370](https://github.com/operator-framework/operator-lifecycle-manager/pull/370) ([ecordell](https://github.com/ecordell))
- fix\(make\): set version var for run-local [\#367](https://github.com/operator-framework/operator-lifecycle-manager/pull/367) ([ecordell](https://github.com/ecordell))
- Resolve Install Plan before Requiring Approval [\#364](https://github.com/operator-framework/operator-lifecycle-manager/pull/364) ([alecmerdler](https://github.com/alecmerdler))
- readme: fix broken link to CSV example [\#363](https://github.com/operator-framework/operator-lifecycle-manager/pull/363) ([robszumski](https://github.com/robszumski))
- Fix memory usage in catalog operator [\#362](https://github.com/operator-framework/operator-lifecycle-manager/pull/362) ([ecordell](https://github.com/ecordell))
- Tests for Runaway Control Loops [\#361](https://github.com/operator-framework/operator-lifecycle-manager/pull/361) ([alecmerdler](https://github.com/alecmerdler))
- Fix the deploy to kubernetes command in install.md [\#358](https://github.com/operator-framework/operator-lifecycle-manager/pull/358) ([aravindhp](https://github.com/aravindhp))
- Ansible playbook [\#356](https://github.com/operator-framework/operator-lifecycle-manager/pull/356) ([ecordell](https://github.com/ecordell))
- Add metering to a separate, upstream-only catalog [\#354](https://github.com/operator-framework/operator-lifecycle-manager/pull/354) ([ecordell](https://github.com/ecordell))
- chore\(deps\): commit vendored dependencies [\#352](https://github.com/operator-framework/operator-lifecycle-manager/pull/352) ([ecordell](https://github.com/ecordell))
- feat\(servicebroker\): list one serviceclass per package [\#349](https://github.com/operator-framework/operator-lifecycle-manager/pull/349) ([ericavonb](https://github.com/ericavonb))
- Add installPlanApproval to Subscription-v1 [\#348](https://github.com/operator-framework/operator-lifecycle-manager/pull/348) ([ecordell](https://github.com/ecordell))
- Strip Markdown Descriptions from OSB [\#346](https://github.com/operator-framework/operator-lifecycle-manager/pull/346) ([alecmerdler](https://github.com/alecmerdler))
- test validate service broker api version function [\#345](https://github.com/operator-framework/operator-lifecycle-manager/pull/345) ([ericavonb](https://github.com/ericavonb))
- OSB Rebase \(part 2\) [\#344](https://github.com/operator-framework/operator-lifecycle-manager/pull/344) ([ecordell](https://github.com/ecordell))
- Rebased OSB backbone [\#342](https://github.com/operator-framework/operator-lifecycle-manager/pull/342) ([ecordell](https://github.com/ecordell))
- Update manifests for 0.4.0 release [\#340](https://github.com/operator-framework/operator-lifecycle-manager/pull/340) ([ecordell](https://github.com/ecordell))
\* *This Change Log was automatically generated by [github_changelog_generator](https://github.com/skywinder/Github-Changelog-Generator)*

View File

@ -0,0 +1,90 @@
## Tooling
### Requirements
| Requirement | Purpose | macOS |
|-------------|-----------------------|----------------------|
| Go | Compiler | brew install go |
| Dep | Dependency Management | brew install dep |
| Docker | Packaging | [Docker for Mac] |
| jsonnet | JSON templating tool | brew install jsonnet |
| ffctl | Gitlab CI format | pip install ffctl |
[Docker for Mac]: https://store.docker.com/editions/community/docker-ce-desktop-mac
### Usage
#### Testing
This project uses the built-in testing support for golang.
To run the tests for all go packages outside of the vendor directory, run:
```sh
$ make test
```
To run the e2e tests locally:
```sh
$ make e2e-local
```
To run a specific e2e test locally:
```sh
$ make e2e-local TEST=TestCreateInstallPlanManualApproval
```
#### Building
Ensure your version of go is up to date; check that you're running v1.9 with the
command:
```sh
$ go version
```
To build the go binary, run:
```sh
$ make build
```
#### Packaging
ALM is packaged as a set of manifests for a tectonic-x-operator specialization (tectonic-alm-operator).
A new version can be generated from the helm chart by:
1. Modifying the `deploy/tectonic-alm-operator/values.yaml` file for the release to include new SHAs of the container images.
1. Running the `package` make command, which takes a single variable (`ver`)
For example:
```
make ver=0.3.0 package
```
Will generate a new set of manifests from the helm chart in `deploy/chart` combined with the `values.yaml` file in `deploy/tectonic-alm-operator`, and output the rendered templates to `deploy/tectonic-alm-operator/manifests/0.3.0`.
See the documentation in `deploy/tectonic-alm-operator` for how to take the new manifests and package them as a new version of `tectonic-alm-operator`.
### Dependency Management
#### Using make
These commands are handled for you via the Makefile. To install the project
dependencies, run:
```sh
$ make vendor
```
To update dependencies, run:
```sh
$ make vendor-update
# verify changes
$ make test
$ make e2e-local-docker
```
The Makefile recipes for testing and builds ensure the project's dependencies
are properly installed and vendored before running.

View File

@ -0,0 +1,55 @@
FROM openshift/origin-release:golang-1.12 as builder
RUN yum update -y
RUN yum install -y make git
ENV GO111MODULE auto
ENV GOPATH /go
ENV PATH $GOPATH/bin:/usr/local/go/bin:$PATH
WORKDIR /build
# copy just enough of the git repo to parse HEAD, used to record version in OLM binaries
COPY .git/HEAD .git/HEAD
COPY .git/refs/heads/. .git/refs/heads
RUN mkdir -p .git/objects
COPY Makefile Makefile
COPY OLM_VERSION OLM_VERSION
COPY pkg pkg
COPY vendor vendor
COPY cmd cmd
COPY test test
COPY go.mod go.mod
COPY go.sum go.sum
RUN make build
FROM openshift/origin-base
ADD manifests/ /manifests
LABEL io.openshift.release.operator=true
# Copy the binary to a standard location where it will run.
COPY --from=builder /build/bin/olm /bin/olm
COPY --from=builder /build/bin/catalog /bin/catalog
COPY --from=builder /build/bin/package-server /bin/package-server
# This image doesn't need to run as root user.
USER 1001
EXPOSE 8080
EXPOSE 5443
# Apply labels as needed. ART build automation fills in others required for
# shipping, including component NVR (name-version-release) and image name. OSBS
# applies others at build time. So most required labels need not be in the source.
#
# io.k8s.display-name is required and is displayed in certain places in the
# console (someone correct this if that's no longer the case)
#
# io.k8s.description is equivalent to "description" and should be defined per
# image; otherwise the parent image's description is inherited which is
# confusing at best when examining images.
#
LABEL io.k8s.display-name="OpenShift Operator Lifecycle Manager" \
io.k8s.description="This is a component of OpenShift Container Platform and manages the lifecycle of operators." \
maintainer="Odin Team <aos-odin@redhat.com>"

View File

@ -0,0 +1,775 @@
# Improved Subscription Status
Status: Pending
Version: Alpha
Implementation Owner: TBD
## Motivation
The `Subscription` `CustomResource` needs to expose useful information when a failure scenario is encountered. Failures can be encountered throughout a `Subscription`'s existence and can include issues with `InstallPlan` resolution, `CatalogSource` connectivity, `ClusterServiceVersion` (CSV) status, and more. To surface this information, explicit status for `Subscriptions` will be introduced via [status conditions](#status-conditions) which will be set by new, specialized status sync handlers for resources of interest (`Subscriptions`, `InstallPlan`s, `CatalogSource`s and CSVs).
### Following Conventions
In order to design a status that makes sense in the context of kubernetes resources, it's important to conform to current conventions. This will also help us avoid pitfalls that may have already been solved.
#### Status Conditions
The [kube api-conventions docs](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties) state that:
> Conditions should be added to explicitly convey properties that users and components care about rather than requiring those properties to be inferred from other observations.
A few internal Kubernetes resources that implement status conditions:
- [NodeStatus](https://github.com/kubernetes/kubernetes/blob/6c31101257bfcd47fa53702cea07fe2eedf2ad92/pkg/apis/core/types.go#L3556)
- [DeploymentStatus](https://github.com/kubernetes/kubernetes/blob/f5574bf62a051c4a41a3fff717cc0bad735827eb/pkg/apis/apps/types.go#L415)
- [DaemonSetStatus](https://github.com/kubernetes/kubernetes/blob/f5574bf62a051c4a41a3fff717cc0bad735827eb/pkg/apis/apps/types.go#L582)
- [ReplicaSetStatus](https://github.com/kubernetes/kubernetes/blob/f5574bf62a051c4a41a3fff717cc0bad735827eb/pkg/apis/apps/types.go#L751)
Introducing status conditions will let us have an explicit, level-based view of the current abnormal state of a `Subscription`. They are essentially orthogonal states (regions) of the compound state (`SubscriptionStatus`)¹. A conditionᵢ has a set of sub states [Unknown, True, False] each with sub states of their own [Reasonsᵢ],where Reasonsᵢ contains the set of transition reasons for conditionᵢ. This compound state can be used to inform a decision about performing an operation on the cluster.
> 1. [What is a statechart?](https://statecharts.github.io/what-is-a-statechart.html); see 'A state can have many "regions"'
#### References to Related Objects
The [kube api-convention docs](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#references-to-related-objects) state that:
> References to specific objects, especially specific resource versions and/or specific fields of those objects, are specified using the ObjectReference type (or other types representing strict subsets of it).
Rather than building our own abstractions to reference managed resources (like `InstallPlan`s), we can take advantage of the pre-existing `ObjectReference` type.
## Proposal
### Changes to SubscriptionStatus
- Introduce a `SubscriptionCondition` type
- Describes a single state of a `Subscription` explicity
- Introduce a `SubscriptionConditionType` field
- Describes the type of a condition
- Introduce a `Conditions` field of type `[]SubscriptionCondition` to `SubscriptionStatus`
- Describes multiple potentially orthogonal states of a `Subscription` explicitly
- Introduce an `InstallPlanRef` field of type [*corev1.ObjectReference](https://github.com/kubernetes/kubernetes/blob/f5574bf62a051c4a41a3fff717cc0bad735827eb/pkg/apis/core/types.go#L3993)
- To replace custom type with existing apimachinery type
- Deprecate the `Install` field
- Value will be kept up to date to support older clients until a major version change
- Introduce a `SubscriptionCatalogStatus` type
- Describes a Subscription's view of a CatalogSource's status
- Introduce a `CatalogStatus` field of type `[]SubscriptionCatalogStatus`
- CatalogStatus contains the Subscription's view of its relevant CatalogSources' status
### Changes to Subscription Reconciliation
Changes to `Subscription` reconciliation can be broken into three parts:
1. Phase in use of `SubscriptionStatus.Install` with `SubscriptionStatus.InstallPlanRef`:
- Write to `Install` and `InstallPlanRef` but still read from `Install`
- Read from `InstallPlanRef`
- Stop writing to `Install`
2. Create independent sync handlers and workqueues for resources of interest (status-handler) that only update specific `SubscriptionStatus` fields and `StatusConditions`:
- Build actionable state reactively through objects of interest
- Treat omitted `SubscriptionConditionTypes` in `SubscriptionStatus.Conditions` as having `ConditionStatus` "Unknown"
- Add new status-handlers with new workqueues for:
- `Subscription`s
- `CatalogSource`s
- `InstallPlan`s
- CSVs
- These sync handlers can be phased-in incrementally:
- Add a conditions block and the `UpToDate` field, and ensure the `UpToDate` field is set properly when updating status
- Pick one condition to start detecting, and write its status
- Repeat with other conditions. This is a good opportunity to parallelize work immediate value to end-users (they start seeing the new conditions ASAP)
- Once all conditions are being synchronized, start using them to set the state of other fields (e.g. `UpToDate`)
3. Add status-handler logic to toggle the `SubscriptionStatus.UpToDate` field:
- Whenever `SubscriptionStatus.InstalledCSV == SubscriptionStatus.CurrentCSV` and `SubscriptionStatus.Conditions` has a `SubscriptionConditionType` of type `SubscriptionInstalledCSVReplacementAvailable` with `Status == "True"`, set `SubscriptionStatus.UpToDate = true`
- Whenever `SubscriptionStatus.InstalledCSV != SubscriptionStatus.CurrentCSV`, set `SubscriptionStatus.UpToDate = false`
## Implementation
### SubscriptionStatus
Updated SusbcriptionStatus resource:
```go
import (
// ...
corev1 "k8s.io/kubernetes/pkg/apis/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
// ...
)
type SubscriptionStatus struct {
// ObservedGeneration is the generation observed by the Subscription controller.
// +optional
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
// CurrentCSV is the CSV the Subscription is progressing to.
// +optional
CurrentCSV string `json:"currentCSV,omitempty"`
// InstalledCSV is the CSV currently installed by the Subscription.
// +optional
InstalledCSV string `json:"installedCSV,omitempty"`
// Install is a reference to the latest InstallPlan generated for the Subscription.
// DEPRECATED: InstallPlanRef
// +optional
Install *InstallPlanReference `json:"installplan,omitempty"`
// State represents the current state of the Subscription
// +optional
State SubscriptionState `json:"state,omitempty"`
// Reason is the reason the Subscription was transitioned to its current state.
// +optional
Reason ConditionReason `json:"reason,omitempty"`
// InstallPlanRef is a reference to the latest InstallPlan that contains the Subscription's current CSV.
// +optional
InstallPlanRef *corev1.ObjectReference `json:"installPlanRef,omitempty"`
// CatalogStatus contains the Subscription's view of its relevant CatalogSources' status.
// It is used to determine SubscriptionStatusConditions related to CatalogSources.
// +optional
CatalogStatus []SubscriptionCatalogStatus `json:"catalogStatus,omitempty"`
// UpToDate is true when the latest CSV for the Subscription's package and channel is installed and running; false otherwise.
//
// This field is not a status SubscriptionCondition because it "represents a well-known state that applies to all instances of a kind"
// (see https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties).
// In this case, all Subscriptions are either up to date or not up to date.
UpToDate bool `json:"UpToDate"`
// LastUpdated represents the last time that the Subscription status was updated.
LastUpdated metav1.Time `json:"lastUpdated"`
// Conditions is a list of the latest available observations about a Subscription's current state.
// +optional
Conditions []SubscriptionCondition `json:"conditions,omitempty"`
}
// SubscriptionCatalogHealth describes a Subscription's view of a CatalogSource's status.
type SubscriptionCatalogStatus struct {
// CatalogSourceRef is a reference to a CatalogSource.
CatalogSourceRef *corev1.ObjectReference `json:"catalogSourceRef"`
// LastUpdated represents the last time that the CatalogSourceHealth changed
LastUpdated `json:"lastUpdated"`
// Healthy is true if the CatalogSource is healthy; false otherwise.
Healthy bool `json:"healthy"`
}
// SubscriptionConditionType indicates an explicit state condition about a Subscription in "abnormal-true"
// polarity form (see https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties).
type SusbcriptionConditionType string
const (
// SubscriptionResolutionFails indicates the Subscription has failed to resolve a set
SubscriptionResolutionFailed SubscriptionConditionType = "ResolutionFailed"
// SubscriptionCatalogSourcesUnhealthy indicates that some or all of the CatalogSources to be used in resolution are unhealthy.
SubscriptionCatalogSourcesUnhealthy SubscriptionConditionType = "CatalogSourcesUnhealthy"
// SubscriptionCatalogSourceInvalid indicates the CatalogSource specified in the SubscriptionSpec is not valid.
SubscriptionCatalogSourceInvalid SubscriptionConditionType = "CatalogSourceInvalid"
// SubscriptionPackageChannelInvalid indicates the package and channel specified in the SubscriptionSpec is not valid.
SubscriptionPackageChannelInvalid SubscriptionConditionType = "PackageChannelInvalid"
// SubscriptionInstallPlanFailed indicates the InstallPlan responsible for installing the current CSV has failed.
SubscriptionInstallPlanFailed SubscriptionConditionType = "InstallPlanFailed"
// SubscriptionInstallPlanMissing indicates the InstallPlan responsible for installing the current CSV is missing.
SubscriptionInstallPlanMissing SubscriptionConditionType = "InstallPlanMissing"
// SubscriptionInstallPlanAwaitingManualApproval indicates the InstallPlan responsible for installing the current CSV is waiting
// for manual approval.
SubscriptionInstallPlanAwaitingManualApproval SubscriptionConditionType = "InstallPlanAwaitingManualApproval"
// SubscriptionInstalledCSVReplacementAvailable indicates there exists a replacement for the installed CSV.
SubscriptionInstalledCSVReplacementAvailable SubscriptionConditionType = "InstalledCSVReplacementAvailable"
// SubscriptionInstalledCSVMissing indicates the installed CSV is missing.
SubscriptionInstalledCSVMissing SubscriptionConditionType = "InstalledCSVMissing"
// SubscriptionInstalledCSVFailed indicates the installed CSV has failed.
SubscriptionInstalledCSVFailed SubscriptionConditionType = "InstalledCSVFailed"
)
type SubscriptionCondition struct {
// Type is the type of Subscription condition.
Type SubscriptionConditionType `json:"type" description:"type of Subscription condition"`
// Status is the status of the condition, one of True, False, Unknown.
Status corev1.ConditionStatus `json:"status" description:"status of the condition, one of True, False, Unknown"`
// Reason is a one-word CamelCase reason for the condition's last transition.
// +optional
Reason string `json:"reason,omitempty" description:"one-word CamelCase reason for the condition's last transition"`
// Message is a human-readable message indicating details about last transition.
// +optional
Message string `json:"message,omitempty" description:"human-readable message indicating details about last transition"`
// LastHeartbeatTime is the last time we got an update on a given condition
// +optional
LastHeartbeatTime *metav1.Time `json:"lastHeartbeatTime,omitempty" description:"last time we got an update on a given condition"`
// LastTransitionTime is the last time the condition transit from one status to another
// +optional
LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty" description:"last time the condition transit from one status to another"`
}
```
### Subscription Reconciliation
Phasing in `SusbcriptionStatus.InstallPlanRef`:
- Create a helper function to convert `ObjectReference`s into `InstallPlanReference`s in _pkg/api/apis/operators/v1alpha1/subscription_types.go_
```go
package v1alpha1
import (
// ...
corev1 "k8s.io/api/core/v1"
// ...
)
// ...
func NewInstallPlanReference(ref *corev1.ObjectReference) *InstallPlanReference {
return &InstallPlanReference{
APIVersion: ref.APIVersion,
Kind: ref.Kind,
Name: ref.Name,
UID: ref.UID,
}
}
```
- Define an interface and method for generating `ObjectReferences` for `InstallPlan`s in _pkg/api/apis/operators/referencer.go_
```go
package operators
import (
"fmt"
// ...
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta"
// ...
"github.com/operator-framework/operator-lifecycle-manager/pkg/api/apis/operators/v1alpha1"
"github.com/operator-framework/operator-lifecycle-manager/pkg/api/apis/operators/v1alpha2"
)
// CannotReferenceError indicates that an ObjectReference could not be generated for a resource.
type CannotReferenceError struct{
obj interface{}
msg string
}
// Error returns the error's error string.
func (err *CannotReferenceError) Error() string {
return fmt.Sprintf("cannot reference %v: %s", obj, msg)
}
// NewCannotReferenceError returns a pointer to a CannotReferenceError instantiated with the given object and message.
func NewCannotReferenceError(obj interface{}, msg string) *CannotReferenceError {
return &CannotReferenceError{obj: obj, msg: msg}
}
// ObjectReferencer knows how to return an ObjectReference for a resource.
type ObjectReferencer interface {
// ObjectReferenceFor returns an ObjectReference for the given resource.
ObjectReferenceFor(obj interface{}) (*corev1.ObjectReference, error)
}
// ObjectReferencerFunc is a function type that implements ObjectReferencer.
type ObjectReferencerFunc func(obj interface{}) (*corev1.ObjectReference, error)
// ObjectReferenceFor returns an ObjectReference for the current resource by invoking itself.
func (f ObjectReferencerFunc) ObjectReferenceFor(obj interface{}) (*corev1.ObjectReference, error) {
return f(obj)
}
// OperatorsObjectReferenceFor generates an ObjectReference for the given resource if it's provided by the operators.coreos.com API group.
func OperatorsObjectReferenceFor(obj interface{}) (*corev1.ObjectReference, error) {
// Attempt to access ObjectMeta
objMeta, err := meta.Accessor(obj)
if err != nil {
return nil, NewCannotReferenceError(obj, err.Error())
}
ref := &corev1.ObjectReference{
Namespace: objMeta.GetNamespace(),
Name: objMeta.GetName(),
UID: objMeta.GetUI(),
}
switch objMeta.(type) {
case *v1alpha1.ClusterServiceVersion:
ref.Kind = v1alpha1.ClusterServiceVersionKind
ref.APIVersion = v1alpha1.SchemeGroupVersion.String()
case *v1alpha1.InstallPlan:
ref.Kind = v1alpha1.InstallPlanKind
ref.APIVersion = v1alpha1.SchemeGroupVersion.String()
case *v1alpha1.Subscription:
ref.Kind = v1alpha1.SubscriptionKind
ref.APIVersion = v1alpha1.SchemeGroupVersion.String()
case *v1alpha1.CatalogSource:
ref.Kind = v1alpha1.CatalogSourceKind
ref.APIVersion = v1alpha1.SchemeGroupVersion.String()
case *v1.OperatorGroup:
ref.Kind = v1alpha2.OperatorGroupKind
ref.APIVersion = v1alpha2.SchemeGroupVersion.String()
case v1alpha1.ClusterServiceVersion:
ref.Kind = v1alpha1.ClusterServiceVersionKind
ref.APIVersion = v1alpha1.SchemeGroupVersion.String()
case v1alpha1.InstallPlan:
ref.Kind = v1alpha1.InstallPlanKind
ref.APIVersion = v1alpha1.SchemeGroupVersion.String()
case v1alpha1.Subscription:
ref.Kind = v1alpha1.SubscriptionKind
ref.APIVersion = v1alpha1.SchemeGroupVersion.String()
case v1alpha1.CatalogSource:
ref.Kind = v1alpha1.CatalogSourceKind
ref.APIVersion = v1alpha1.SchemeGroupVersion.String()
case v1.OperatorGroup:
ref.Kind = v1alpha2.OperatorGroupKind
ref.APIVersion = v1alpha2.SchemeGroupVersion.String()
default:
return nil, NewCannotReferenceError(objMeta, "resource not a valid olm kind")
}
return ref, nil
}
type ReferenceSet map[*corev1.ObjectReference]struct{}
type ReferenceSetBuilder interface {
Build(obj interface{}) (ReferenceSet, error)
}
type ReferenceSetBuilderFunc func(obj interface{}) (ReferenceSet, error)
func (f ReferenceSetBuilderFunc) Build(obj interface{}) (ReferenceSet, error) {
return f(obj)
}
func BuildOperatorsReferenceSet(obj interface{}) (ReferenceSet, error) {
referencer := ObjectReferencer(OperatorsObjectReferenceFor)
obj := []interface{}
set := make(ReferenceSet)
switch v := obj.(type) {
case []*v1alpha1.ClusterServiceVersion:
for _, o := range v {
ref, err := referencer.ObjectReferenceFor(o)
if err != nil {
return nil, err
}
set[ref] = struct{}{}
}
case []*v1alpha1.InstallPlan:
for _, o := range v {
ref, err := referencer.ObjectReferenceFor(o)
if err != nil {
return nil, err
}
set[ref] = struct{}{}
}
case []*v1alpha1.Subscription:
for _, o := range v {
ref, err := referencer.ObjectReferenceFor(o)
if err != nil {
return nil, err
}
set[ref] = struct{}{}
}
case []*v1alpha1.CatalogSource:
for _, o := range v {
ref, err := referencer.ObjectReferenceFor(o)
if err != nil {
return nil, err
}
set[ref] = struct{}{}
}
case []*v1.OperatorGroup:
for _, o := range v {
ref, err := referencer.ObjectReferenceFor(o)
if err != nil {
return nil, err
}
set[ref] = struct{}{}
}
case []v1alpha1.ClusterServiceVersion:
for _, o := range v {
ref, err := referencer.ObjectReferenceFor(o)
if err != nil {
return nil, err
}
set[ref] = struct{}{}
}
case []v1alpha1.InstallPlan:
for _, o := range v {
ref, err := referencer.ObjectReferenceFor(o)
if err != nil {
return nil, err
}
set[ref] = struct{}{}
}
case []v1alpha1.Subscription:
for _, o := range v {
ref, err := referencer.ObjectReferenceFor(o)
if err != nil {
return nil, err
}
set[ref] = struct{}{}
}
case []v1alpha1.CatalogSource:
for _, o := range v {
ref, err := referencer.ObjectReferenceFor(o)
if err != nil {
return nil, err
}
set[ref] = struct{}{}
}
case []v1.OperatorGroup:
for _, o := range v {
ref, err := referencer.ObjectReferenceFor(o)
if err != nil {
return nil, err
}
set[ref] = struct{}{}
}
default:
// Could be a single resource
ref, err := referencer.ObjectReferenceFor(o)
if err != nil {
return nil, err
}
set[ref] = struct{}{}
}
return set, nil
}
```
- Add an `ObjectReferencer` field to the [catalog-operator](https://github.com/operator-framework/operator-lifecycle-manager/blob/22691a771a330fc05608a7ec1516d31a17a13ded/pkg/controller/operators/catalog/operator.go#L58)
```go
package catalog
import (
// ...
"github.com/operator-framework/operator-lifecycle-manager/pkg/api/apis/operators"
// ...
)
// ...
type Operator struct {
// ...
referencer operators.ObjectReferencer
}
// ...
func NewOperator(kubeconfigPath string, logger *logrus.Logger, wakeupInterval time.Duration, configmapRegistryImage, operatorNamespace string, watchedNamespaces ...string) (*Operator, error) {
// ...
op := &Operator{
// ...
referencer: operators.ObjectReferencerFunc(operators.OperatorsObjectReferenceFor),
}
// ...
}
// ...
```
- Generate `ObjectReference`s in [ensureInstallPlan(...)](https://github.com/operator-framework/operator-lifecycle-manager/blob/22691a771a330fc05608a7ec1516d31a17a13ded/pkg/controller/operators/catalog/operator.go#L804)
```go
func (o *Operator) ensureInstallPlan(logger *logrus.Entry, namespace string, subs []*v1alpha1.Subscription, installPlanApproval v1alpha1.Approval, steps []*v1alpha1.Step) (*corev1.ObjectReference, error) {
// ...
for _, installPlan := range installPlans {
if installPlan.Status.CSVManifestsMatch(steps) {
logger.Infof("found InstallPlan with matching manifests: %s", installPlan.GetName())
return a.referencer.ObjectReferenceFor(installPlan), nil
}
}
// ...
}
```
Write to `SusbcriptionStatus.InstallPlan` and `SubscriptionStatus.InstallPlanRef`:
- Generate `ObjectReference`s in [createInstallPlan(...)](https://github.com/operator-framework/operator-lifecycle-manager/blob/22691a771a330fc05608a7ec1516d31a17a13ded/pkg/controller/operators/catalog/operator.go#L863)
```go
func (o *Operator) createInstallPlan(namespace string, subs []*v1alpha1.Subscription, installPlanApproval v1alpha1.Approval, steps []*v1alpha1.Step) (*corev1.ObjectReference, error) {
// ...
return a.referencer.ObjectReferenceFor(res), nil
}
```
- Use `ObjectReference` to populate both `SusbcriptionStatus.InstallPlan` and `SubscriptionStatus.InstallPlanRef` in [updateSubscriptionStatus](https://github.com/operator-framework/operator-lifecycle-manager/blob/22691a771a330fc05608a7ec1516d31a17a13ded/pkg/controller/operators/catalog/operator.go#L774)
```go
func (o *Operator) updateSubscriptionStatus(namespace string, subs []*v1alpha1.Subscription, installPlanRef *corev1.ObjectReference) error {
// ...
for _, sub := range subs {
// ...
if installPlanRef != nil {
sub.Status.InstallPlanRef = installPlanRef
sub.Status.Install = v1alpha1.NewInstallPlanReference(installPlanRef)
sub.Status.State = v1alpha1.SubscriptionStateUpgradePending
}
// ...
}
// ...
}
```
Phase in orthogonal `SubscriptionStatus` condition updates (pick a condition type to start with):
- Pick `SubscriptionCatalogSourcesUnhealthy`
- Add `SusbcriptionCondition` getter and setter helper methods to `SubscriptionStatus`
```go
// GetCondition returns the SubscriptionCondition of the given type if it exists in the SubscriptionStatus' Conditions; returns a condition of the given type with a ConditionStatus of "Unknown" if not found.
func (status SubscriptionStatus) GetCondition(conditionType SubscriptionConditionType) SubscriptionCondition {
for _, cond := range status.Conditions {
if cond.Type == conditionType {
return cond
}
}
return SubscriptionCondition{
Type: conditionType,
Status: corev1.ConditionUnknown,
// ...
}
}
// SetCondition sets the given SubscriptionCondition in the SubscriptionStatus' Conditions.
func (status SubscriptionStatus) SetCondition(condition SubscriptionCondition) {
for i, cond := range status.Conditions {
if cond.Type == condition.Type {
cond[i] = condition
return
}
}
status.Conditions = append(status.Conditions, condition)
}
```
- Add a `ReferenceSetBuilder` field to the [catalog-operator](https://github.com/operator-framework/operator-lifecycle-manager/blob/22691a771a330fc05608a7ec1516d31a17a13ded/pkg/controller/operators/catalog/operator.go#L58)
```go
package catalog
import (
// ...
"github.com/operator-framework/operator-lifecycle-manager/pkg/api/apis/operators"
// ...
)
// ...
type Operator struct {
// ...
referenceSetBuilder operators.ReferenceSetBuilder
}
// ...
func NewOperator(kubeconfigPath string, logger *logrus.Logger, wakeupInterval time.Duration, configmapRegistryImage, operatorNamespace string, watchedNamespaces ...string) (*Operator, error) {
// ...
op := &Operator{
// ...
referenceSetBuilder: operators.ReferenceSetBuilderFunc(operators.BuildOperatorsReferenceSet),
}
// ...
}
// ...
```
- Define a new `CatalogSource` sync function that checks the health of a given `CatalogSource` and the health of every `CatalogSource` in its namespace and the global namespace and updates all `Subscription`s that have visibility on it with the condition state
```go
// syncSusbcriptionCatalogStatus generates a SubscriptionCatalogStatus for a CatalogSource and updates the
// status of all Subscriptions in its namespace; for CatalogSources in the global catalog namespace, Subscriptions
// in all namespaces are updated.
func (o *Operator) syncSubscriptionCatalogStatus(obj interface{}) (syncError error) {
catsrc, ok := obj.(*v1alpha1.CatalogSource)
if !ok {
o.Log.Debugf("wrong type: %#v", obj)
return fmt.Errorf("casting CatalogSource failed")
}
logger := o.Log.WithFields(logrus.Fields{
"catsrc": catsrc.GetName(),
"namespace": catsrc.GetNamespace(),
"id": queueinformer.NewLoopID(),
})
logger.Debug("syncing subscription catalogsource status")
// Get SubscriptionCatalogStatus
sourceKey := resolver.CatalogKey{Name: owner.Name, Namespace: metaObj.GetNamespace()}
status := o.getSubscriptionCatalogStatus(logger, sourceKey, a.referencer.ObjectReferenceFor(catsrc))
// Update the status of all Subscriptions that can view this CatalogSource
syncError = updateSubscriptionCatalogStatus(logger, status)
}
// getSubscriptionCatalogStatus gets the SubscriptionCatalogStatus for a given SourceKey and ObjectReference.
func (o *Operator) getSubscriptionCatalogStatus(logger logrus.Entry, sourceKey resolver.SourceKey, *corev1.ObjectReference) *v1alpha1.SubscriptionCatalogStatus {
// TODO: Implement this
}
// updateSubscriptionCatalogStatus updates all Subscriptions in the CatalogSource namespace with the given SubscriptionCatalogStatus;
// for CatalogSources in the global catalog namespace, Subscriptions in all namespaces are updated.
func (o *Operator) updateSubscriptionCatalogStatus(logger logrus.Entry, status SubscriptionCatalogStatus) error {
// TODO: Implement this. It should handle removing CatalogStatus entries to non-existent CatalogSources.
}
```
- Define a new `Subscription` sync function that checks the `CatalogStatus` field and sets `SubscriptionCondition`s relating to `CatalogSource` status
```go
func (o *Operator) syncSubscriptionCatalogConditions(obj interface{}) (syncError error) {
sub, ok := obj.(*v1alpha1.Subscription)
if !ok {
o.Log.Debugf("wrong type: %#v", obj)
return fmt.Errorf("casting Subscription failed")
}
logger := o.Log.WithFields(logrus.Fields{
"sub": sub.GetName(),
"namespace": sub.GetNamespace(),
"id": queueinformer.NewLoopID(),
})
logger.Debug("syncing subscription catalogsource conditions")
// Get the list of CatalogSources visible to the Subscription
catsrcs, err := o.listResolvableCatalogSources(sub.GetNamespace())
if err != nil {
logger.WithError(err).Warn("could not list resolvable catalogsources")
syncError = err
return
}
// Build reference set from resolvable catalogsources
refSet, err := o.referenceSetBuilder.Build(catsrcs)
if err != nil {
logger.WithError(err).Warn("could not build object reference set of resolvable catalogsources")
syncError = err
return
}
// Defer an update to the Subscription
out := sub.DeepCopy()
defer func() {
// TODO: Implement update SubscriptionStatus using out if syncError == nil and Subscription has changed
}()
// Update CatalogSource related CatalogSourceConditions
currentSources = len(refSet)
knownSources = len(sub.Status.CatalogStatus)
// unhealthyUpdated is set to true when a change has been made to the condition of type SubscriptionCatalogSourcesUnhealthy
unhealthyUpdated := false
// TODO: Add flags for other condition types
if currentSources > knownSources {
// Flip SubscriptionCatalogSourcesUnhealthy to "Unknown"
condition := out.Status.GetCondition(v1alpha1.SubscriptionCatalogSourcesUnhealthy)
condition.Status = corev1.ConditionUnknown
condition.Reason = "MissingCatalogInfo"
condition.Message = fmt.Sprintf("info on health of %d/%d catalogsources not yet known", currentSources - knownSources, currentSources)
condition.LastSync = timeNow()
out.Status.SetCondition(condition)
unhealthyUpdated = true
}
// TODO: Add flags for other condition types to loop predicate
for i := 0; !unhealthyUpdated && i < knownSources; i++ {
status := sub.Status.CatalogSources
if !unhealthyUpdated {
if status.CatalogSourceRef == nil {
// Flip SubscriptionCatalogSourcesUnhealthy to "Unknown"
condition := out.Status.GetCondition(v1alpha1.SubscriptionCatalogSourcesUnhealthy)
condition.Status = corev1.ConditionUnknown
condition.Reason = "CatalogInfoInvalid"
condition.Message = "info missing reference to catalogsource"
condition.LastSync = timeNow()
out.Status.SetCondition(condition)
unhealthyUpdated = true
break
}
if _, ok := refSet[status.CatalogSourceRef]; !ok {
// Flip SubscriptionCatalogSourcesUnhealthy to "Unknown"
condition := out.Status.GetCondition(v1alpha1.SubscriptionCatalogSourcesUnhealthy)
condition.Status = corev1.ConditionUnknown
condition.Reason = "CatalogInfoInconsistent"
condition.Message = fmt.Sprintf("info found for non-existent catalogsource %s/%s", ref.Name, ref.Namespace)
condition.LastSync = timeNow()
out.Status.SetCondition(condition)
unhealthyUpdated = true
break
}
if !status.CatalogSourceRef.Healthy {
// Flip SubscriptionCatalogSourcesUnhealthy to "True"
condition := out.Status.GetCondition(v1alpha1.SubscriptionCatalogSourcesUnhealthy)
condition.Status = corev1.ConditionTrue
condition.Reason = "CatalogSourcesUnhealthy"
condition.Message = "one or more visible catalogsources are unhealthy"
condition.LastSync = timeNow()
out.Status.SetCondition(condition)
unhealthyUpdated = true
break
}
}
// TODO: Set any other conditions relating to the CatalogSource status
}
if !unhealthyUpdated {
// Flip SubscriptionCatalogSourcesUnhealthy to "False"
condition := out.Status.GetCondition(v1alpha1.SubscriptionCatalogSourcesUnhealthy)
condition.Status = corev1.ConditionFalse
condition.Reason = "CatalogSourcesHealthy"
condition.Message = "all catalogsources are healthy"
condition.LastSync = timeNow()
out.Status.SetCondition(condition)
unhealthyUpdated = true
}
}
// listResolvableCatalogSources returns a list of the CatalogSources that can be used in resolution for a Subscription in the given namespace.
func (o *Operator) listResolvableCatalogSources(namespace string) ([]v1alpha1.CatalogSource, error) {
// TODO: Implement this. Should be the union of CatalogSources in the given namespace and the global catalog namespace.
}
```
- Register new [QueueIndexer](https://github.com/operator-framework/operator-lifecycle-manager/blob/a88f5349eb80da2367b00a5191c0a7b50074f331/pkg/lib/queueinformer/queueindexer.go#L14)s with separate workqueues for handling `syncSubscriptionCatalogStatus` and `syncSubscriptionCatalogConditions` to the [catalog-operator](https://github.com/operator-framework/operator-lifecycle-manager/blob/22691a771a330fc05608a7ec1516d31a17a13ded/pkg/controller/operators/catalog/operator.go#L58). Use the same cache feeding other respective workqueues.
```go
package catalog
// ...
type Operator struct {
// ...
subscriptionCatalogStatusIndexer *queueinformer.QueueIndexer
subscriptionStatusIndexer *queueinformer.QueueIndexer
}
// ...
func NewOperator(kubeconfigPath string, logger *logrus.Logger, wakeupInterval time.Duration, configmapRegistryImage, operatorNamespace string, watchedNamespaces ...string) (*Operator, error) {
// ...
// Register separate queue for syncing SubscriptionStatus from CatalogSource updates
subCatStatusQueue := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "subCatStatus")
subCatQueueIndexer := queueinformer.NewQueueIndexer(subCatStatusQueue, op.catsrcIndexers, op.syncSubscriptionCatalogStatus, "subCatStatus", logger, metrics.NewMetricsNil())
op.RegisterQueueIndexer(subCatQueueIndexer)
op.subscriptionCatalogStatusIndexer = subCatQueueIndexer
// ...
// Register separate queue for syncing SubscriptionStatus
subStatusQueue := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "subStatus")
subQueueIndexer := queueinformer.NewQueueIndexer(csvStatusQueue, op.subIndexers, op.syncSubscriptionCatalogConditions, "subStatus", logger, metrics.NewMetricsNil())
op.RegisterQueueIndexer(subQueueIndexer)
op.subscriptionStatusIndexer = subQueueIndexer
// ...
}
// ...
```

View File

@ -0,0 +1,71 @@
## Requirement
Allow cluster administrator to specify a service account for an operator group so that all operator(s) associated with this operator group are deployed and run against the privileges granted to the service account.
`APIService` and `CustomResourceDefinition` will always be created by `OLM` using the `cluster-admin` role. The service account(s) associated with operator group(s) should never be granted privileges to write these resources.
If the specified service account does not have enough permission(s) for an operator that is being installed, useful and contextual information should be added to the status of the respective resource(s) so that it is easy for the administrator to troubleshoot and resolve the issue.
## Scenarios:
* Administrator creates a new operator group and specifies a service account. All operator(s) associated with this operator group are installed and run against the privileges granted to the service account.
* Administrator creates a new operator group and does not specify any service account. We will maintain backward compatibility. Same behavior as today.
* Existing operator group(s) (no service account is specified): We will maintain backward compatibility, same behavior as today.
* Administrator updates an existing operator group and specifies a service account. We can be permissive and allow the existing operator(s) to continue to run with their current privileges. When such an existing operator is going through an upgrade it should be reinstalled and run against the privileges granted to the service account like any new operator(s).
* The service account changes - permission may be added or taken away. Or existing service account is swapped with a new one.
* The administrator removes the service account from the operator group.
* The administrator has an untrusted operator and wants to run it with much less privileges than what the service account in the operator group allows.
## Scope
This feature will be implemented in phases. Phase 1 is scoped at:
* While creating permissions for an operator, use the service account specified in the operator group. This will ensure that the operator install will fail if it asks for a privilege not granted to the service account.
* The deployment of the operator(s) are carried out using the client bound to `cluster-admin` role granted to OLM. We are going to use a scoped client bound to the service account for deployment(s).
The following are not in scope for phase 1:
* We currently use `rbac authorizer` in `OLM` to check permission status. We are not introducing any change to `permissionStatus` function in this phase. In the future we can look into removing `rbac authorizer` from `OLM`. An alternate and more maintainable solution could be to use `SelfSubjectAccessReview` with a client bound to the service account of the operator.
## Proposed Changes
As part of the first phase, we propose the following changes:
* During reconciliation of `OperatorGroup` resource(s), if a service account is specified then:
* Make sure the service account exists.
* Update the Status of `OperatorGroup` with a reference to the `ServiceAccount`.
`OperatorGroupSpec` already has an attribute `ServiceAccount`. So the specification of `OperatorGroup` will not change. Also, we expect the `ServiceAccount` object to be in the same namespace as the `OperatorGroup`.
Add a new field in `OperatorGroupStatus` to refer to the resolved service account.
```go
ServiceAccountRef *corev1.ObjectReference `json:"serviceAccountRef,omitempty"`
```
* Add ability to create a client that is bound to the bearer token of the service account specified in the operator group.
* While creating `(Cluster)Role`, `(Cluster)RoleBinding` object(s) for an operator being installed, use the client crafted above so that it is confined to the privileges granted to the service account specified in the operator group. `installPlanTransitioner.ExecutePlan` function is responsible for creating these role(s). Here is how we get access to the `OperatorGroup`:
```go
func (o *Operator) ExecutePlan(plan *v1alpha1.InstallPlan) error {
...
// The operator group must be in the same namespace as the Installplan.
// 1. List all OperatorGroup resource(s) in the same namespace as Installplan.
list, err := lister.OperatorsV1().OperatorGroupLister().OperatorGroups(plan.GetNamespace()).List(labels.Everything())
// Although we expect one OperatorGroup in a namespace, we should be defensive.
// 2. Filter the list:
if len(Status.Namespaces) == 0 {
// Remove from the list.
}
// If the resulting list has more than one OperatorGroup treat it as an error condition.
}
```
* The `InstallPlan` status will reflect the error(s) encountered if `OLM` fails to create the roles.
### How to build a client bound to a service account:
`InClusterConfig` attaches bearer token to to the`rest.Config` object returned. See https://github.com/kubernetes/client-go/blob/master/rest/config.go#L399. We can do the following to create a client that binds to a service account:
* Call `InClusterConfig` to create a `rest.Config` bound to the POD's `serviceaccount`.
* Use `AnonymousClientConfig` function to copy the `rest.Config` without the bearer token. https://github.com/kubernetes/client-go/blob/master/rest/config.go#L491
* Set `BearerToken` from the secret associated with the service account.

View File

@ -0,0 +1,46 @@
# Access Control Philosophy
The [architecture][arch] is designed around a number of CRDs that ensure that the main personas of your clusters, the cluster admins and end users, have the appropriate permissions to get their jobs done while maintaining a degree of access control.
Using CRDs for this allows for default roles to be modeled using Kubernetes RBAC, which integrates into the wide variety of community tools like `kubectl` as well as the API server's audit log.
## End Users
End users are the engineers, operations and manager staff that utilize the cluster to run applications. OLM is designed to facilitate the installation and management of Operator instances in a self-service manner within a namespace.
Running an Operator manually requires access to cluster-level permissions, which end users don't typically have. Heres a typical list of tasks required:
1. Create Service Account for Operator
1. Create minimal Role for the Operator
1. Create Role Binding for Role and Service Account
1. Create the Custom Resource Definition
1. Create Operator Deployment, referencing the Service Account
1. Create an instance of the custom resource within a namespace
1. Operator uses Service Account to create the app resources (Deployments, Pods, etc)
In order to both ensure self-service _and_ minimal permissions, OLM generates these cluster-level resources on behalf of the end user, in a manner that is safe and auditable. Once an admin has installed/granted access to an Operator (see below), the end user only needs to:
1. Create an instance of the custom resource within a namespace
1. Operator uses Service Account to create the app resources (Deployments, Pods, etc)
As you can see, no cluster permissions are needed.
## Cluster Admins
Cluster admins have the ability to provide a selection of Operators for use on the cluster. These Operators are described in a Cluster Service Version (CSV) file which resides in a CatalogSource (along with the Operator's CRD and package manifests). The cluster admin can now select the teams and namespaces that can use this particular Operator, by creating a Subscription object, which will trigger the creation of an InstallPlan that points to a specific CatalogSource. Once the InstallPlan is approved, the OLM software is responsible for parsing the CatalogSource and performing the following:
1. Create the Custom Resource Definition
1. Create Service Account for Operator
1. Create minimal Role or ClusterRole for the Operator
1. Create Role or ClusterRoleBinding for Role or ClusterRole and Service Account
1. Create Operator Deployment, referencing the Service Account
Once a namespace is created, the end-users now have the ability to create instances of the Custom Resource in a self-service manner (see above). OLM also has the ability to control automatic updates of the Operators running in namespaces. See the [architecture][arch] for more details.
## Invent Your Own Personas
OLM uses standard Kubernetes RBAC so that admins can create customized personas in addition to the methods described above. For example, if you want to allow a larger group of namespace admins to subscribe to various Operators without being a cluster admin, they can be granted access to CRUD on Subscription objects.
If you want your end-users to be able to install CSVs themselves, they can be granted access to CSVs and Subscriptions. This is typically done when you are producing Operators as part of your product or internal platform.
[arch]: architecture.md

View File

@ -0,0 +1,137 @@
# Architecture
OLM is composed of two Operators: the OLM Operator and the Catalog Operator.
Each of these Operators are responsible for managing the CRDs that are the basis for the OLM framework:
| Resource | Short Name | Owner | Description |
|--------------------------|------------|---------|--------------------------------------------------------------------------------------------|
| ClusterServiceVersion | csv | OLM | application metadata: name, version, icon, required resources, installation, etc... |
| InstallPlan | ip | Catalog | calculated list of resources to be created in order to automatically install/upgrade a CSV |
| CatalogSource | catsrc | Catalog | a repository of CSVs, CRDs, and packages that define an application |
| Subscription | sub | Catalog | used to keep CSVs up to date by tracking a channel in a package |
| OperatorGroup | og | OLM | used to group multiple namespaces and prepare for use by an operator |
Each of these Operators are also responsible for creating resources:
| Operator | Creatable Resources |
|----------|----------------------------|
| OLM | Deployment |
| OLM | Service Account |
| OLM | (Cluster)Roles |
| OLM | (Cluster)RoleBindings |
| Catalog | Custom Resource Definition |
| Catalog | ClusterServiceVersion |
## What is a ClusterServiceVersion
ClusterServiceVersion combines metadata and runtime information about a service that allows OLM to manage it.
ClusterServiceVersion:
- Metadata (name, description, version, links, labels, icon, etc)
- Install strategy
- Type: Deployment
- Set of service accounts / required permissions
- Set of deployments
- CRDs
- Type
- Owned - managed by this service
- Required - must exist in the cluster for this service to run
- Resources - a list of k8s resources that the Operator interacts with
- Descriptors - annotate CRD spec and status fields to provide semantic information
## OLM Operator
The OLM Operator is responsible for deploying applications defined by ClusterServiceVersion resources once the required resources specified in the ClusterServiceVersion are present in the cluster.
The OLM Operator is not concerned with the creation of the required resources; users can choose to manually create these resources using `kubectl` or users can choose to create these resources using the Catalog Operator.
This separation of concern enables users incremental buy-in in terms of how much of the OLM framework they choose to leverage for their application.
While the OLM Operator is often configured to watch all namespaces, it can also be operated alongside other OLM Operators so long as they all manage separate namespaces.
### ClusterServiceVersion Control Loop
```
+------------------------------------------------------+
| |
| +--> Succeeded -+
v | |
None --> Pending --> InstallReady --> Installing -| |
^ +--> Failed <--+
| |
+----------------------------------------------+
\ /
+---------------------------------------------------------------+
|
v
Replacing --> Deleting
```
| Phase | Description |
|------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| None | initial phase, once seen by the Operator, it is immediately transitioned to `Pending` |
| Pending | requirements in the CSV are not met, once they are this phase transitions to `Installing` |
| InstallReady | all requirements in the CSV are present, the Operator will begin executing the install strategy |
| Installing | the install strategy is being executed and resources are being created, but not all components are reporting as ready |
| Succeeded | the execution of the Install Strategy was successful; if requirements disappear, or an APIService cert needs to be rotated this may transition back to `Pending`; if an installed component dissapears this may transition to `Failed`|
| Failed | upon failed execution of the Install Strategy, or an installed component dissapears the CSV transitions to this phase; if the component can be recreated by OLM, this may transition to `Pending` |
| Replacing | a newer CSV that replaces this one has been discovered in the cluster. This status means the CSV is marked for GC |
| Deleting | the GC loop has determined this CSV is safe to delete from the cluster. It will disappear soon. |
> Note: In order to transition, a CSV must first be an active member of an OperatorGroup
## Catalog Operator
The Catalog Operator is responsible for resolving and installing ClusterServiceVersions and the required resources they specify. It is also responsible for watching catalog sources for updates to packages in channels, and upgrading them (optionally automatically) to the latest available versions.
A user that wishes to track a package in a channel creates a Subscription resource configuring the desired package, channel, and the catalog source from which to pull updates. When updates are found, an appropriate InstallPlan is written into the namespace on behalf of the user.
Users can also create an InstallPlan resource directly, containing the names of the desired ClusterServiceVersions and an approval strategy and the Catalog Operator will create an execution plan for the creation of all of the required resources.
Once approved, the Catalog Operator will create all of the resources in an InstallPlan; this should then independently satisfy the OLM Operator, which will proceed to install the ClusterServiceVersions.
### InstallPlan Control Loop
```
None --> Planning +------>------->------> Installing --> Complete
| ^
v |
+--> RequiresApproval --+
```
| Phase | Description |
|------------------|------------------------------------------------------------------------------------------------|
| None | initial phase, once seen by the Operator, it is immediately transitioned to `Planning` |
| Planning | dependencies between resources are being resolved, to be stored in the InstallPlan `Status` |
| RequiresApproval | occurs when using manual approval, will not transition phase until `approved` field is true |
| Installing | resolved resources in the InstallPlan `Status` block are being created |
| Complete | all resolved resources in the `Status` block exist |
### Subscription Control Loop
```
None --> UpgradeAvailable --> UpgradePending --> AtLatestKnown -+
^ | |
| v v
+----------<---------------<--------+---------<--------+
```
| Phase | Description |
|------------------|---------------------------------------------------------------------------------------------------------------|
| None | initial phase, once seen by the Operator, it is immediately transitioned to `UpgradeAvailable` |
| UpgradeAvailable | catalog contains a CSV which replaces the `status.installedCSV`, but no `InstallPlan` has been created yet |
| UpgradePending | `InstallPlan` has been created (referenced in `status.installplan`) to install a new CSV |
| AtLatestKnown | `status.installedCSV` matches the latest available CSV in catalog |
## Catalog (Registry) Design
The Catalog Registry stores CSVs and CRDs for creation in a cluster, and stores metadata about packages and channels.
A package manifest is an entry in the catalog registry that associates a package identity with sets of ClusterServiceVersions. Within a package, channels point to a particular CSV. Because CSVs explicitly reference the CSV that they replace, a package manifest provides the catalog Operator all of the information that is required to update a CSV to the latest version in a channel (stepping through each intermediate version).
```
Package {name}
|
+-- Channel {name} --> CSV {version} (--> CSV {version - 1} --> ...)
|
+-- Channel {name} --> CSV {version}
|
+-- Channel {name} --> CSV {version}
```

View File

@ -0,0 +1,377 @@
# Building a Cluster Service Version (CSV) for the Operator Framework
This guide is intended to guide an Operator author to package a version of their Operator to run with the [Operator Lifecycle Manager](https://github.com/operator-framework/operator-lifecycle-manager). This will be a manual method that will walk through each section of the file, what its used for and how to populate it.
## What is a Cluster Service Version (CSV)?
A CSV is the metadata that accompanies your Operator container image. It can be used to populate user interfaces with info like your logo/description/version and it is also a source of technical information needed to run the Operator, like the RBAC rules it requires and which Custom Resources it manages or depends on.
The Lifecycle Manager will parse this and do all of the hard work to wire up the correct Roles and Role Bindings, ensure that the Operator is started (or updated) within the desired namespace and check for various other requirements, all without the end users having to do anything.
You can read about the [full architecture in more detail](architecture.md#what-is-a-clusterserviceversion).
## CSV Metadata
The object has the normal Kubernetes metadata. Since the CSV pertains to the specific version, the naming scheme is the name of the Operator + the semantic version number, eg `mongodboperator.v0.3`.
The namespace is used when a CSV will remain private to a specific namespace. Only users of that namespace will be able to view or instantiate the Operator. If you plan on distributing your Operator to many namespaces or clusters, you may want to explore bundling it into a [Catalog](architecture.md#catalog-registry-design).
The namespace listed in the CSV within a catalog is actually a placeholder, so it is common to simply list `placeholder`. Otherwise, loading a CSV directly into a namespace requires that namespace, of course.
```yaml
apiVersion: operators.coreos.com/v1alpha1
kind: ClusterServiceVersion
metadata:
name: mongodboperator.v0.3
namespace: placeholder
```
## Your Custom Resource Definitions
There are two types of CRDs that your Operator may use, ones that are “owned” by it and ones that it depends on, which are “required”.
### Owned CRDs
The CRDs owned by your Operator are the most important part of your CSV. This establishes the link between your Operator and the required RBAC rules, dependency management and other under-the-hood Kubernetes concepts.
Its common for your Operator to use multiple CRDs to link together concepts, such as top-level database configuration in one object and a representation of replica sets in another. List out each one in the CSV file.
**DisplayName**: A human readable version of your CRD name, eg. “MongoDB Standalone”
**Description**: A short description of how this CRD is used by the Operator or a description of the functionality provided by the CRD.
**Group**: The API group that this CRD belongs to, eg. database.example.com
**Kind**: The machine readable name of your CRD
**Name**: The full name of your CRD
The next two sections require more explanation.
**Resources**:
Your CRDs will own one or more types of Kubernetes objects. These are listed in the resources section to inform your end-users of the objects they might need to troubleshoot or how to connect to the application, such as the Service or Ingress rule that exposes a database.
Its recommended to only list out the objects that are important to a human, not an exhaustive list of everything you orchestrate. For example, ConfigMaps that store internal state that shouldnt be modified by a user shouldnt appear here.
**SpecDescriptors, StatusDescriptors, and ActionDescriptors**:
These are a way to hint UIs with certain inputs or outputs of your Operator that are most important to an end user. If your CRD contains the name of a Secret or ConfigMap that the user must provide, you can specify that here. These items will be linked and highlighted in compatible UIs.
There are three types of descriptors:
***SpecDescriptors***: A reference to fields in the `spec` block of an object.
***StatusDescriptors***: A reference to fields in the `status` block of an object.
***ActionDescriptors***: A reference to actions that can be performed on an object.
All Descriptors accept the following fields:
**DisplayName**: A human readable name for the Spec, Status, or Action.
**Description**: A short description of the Spec, Status, or Action and how it is used by the Operator.
**Path**: A dot-delimited path of the field on the object that this descriptor describes.
**X-Descriptors**: Used to determine which "capabilities" this descriptor has and which UI component to use. A canonical list of React UI X-Descriptors for OpenShift can be found [here](https://github.com/openshift/console/blob/master/frontend/public/components/operator-lifecycle-manager/descriptors/types.ts#L5-L26).
More information on Descriptors can be found [here](https://github.com/openshift/console/tree/master/frontend/public/components/operator-lifecycle-manager/descriptors).
Below is an example of a MongoDB “standalone” CRD that requires some user input in the form of a Secret and ConfigMap, and orchestrates Services, StatefulSets, Pods and ConfigMaps.
```yaml
- displayName: MongoDB Standalone
group: mongodb.com
kind: MongoDbStandalone
name: mongodbstandalones.mongodb.com
resources:
- kind: Service
name: ''
version: v1
- kind: StatefulSet
name: ''
version: v1beta2
- kind: Pod
name: ''
version: v1
- kind: ConfigMap
name: ''
version: v1
specDescriptors:
- description: Credentials for Ops Manager or Cloud Manager.
displayName: Credentials
path: credentials
x-descriptors:
- 'urn:alm:descriptor:com.tectonic.ui:selector:core:v1:Secret'
- description: Project this deployment belongs to.
displayName: Project
path: project
x-descriptors:
- 'urn:alm:descriptor:com.tectonic.ui:selector:core:v1:ConfigMap'
- description: MongoDB version to be installed.
displayName: Version
path: version
x-descriptors:
- 'urn:alm:descriptor:com.tectonic.ui:label'
statusDescriptors:
- description: The status of each of the Pods for the MongoDB cluster.
displayName: Pod Status
path: pods
x-descriptors:
- 'urn:alm:descriptor:com.tectonic.ui:podStatuses'
version: v1
description: >-
MongoDB Deployment consisting of only one host. No replication of
data.
```
### Required CRDs
Relying on other “required” CRDs is completely optional and only exists to reduce the scope of individual Operators and provide a way to compose multiple Operators together to solve an end-to-end use case. An example of this is an Operator that might set up an application and install an etcd cluster (from an etcd Operator) to use for distributed locking and a Postgres database (from a Postgres Operator) for data storage.
The Lifecycle Manager will check against the available CRDs and Operators in the cluster to fulfill these requirements. If suitable versions are found, the Operators will be started within the desired namespace and a Service Account created for each Operator to create/watch/modify the Kubernetes resources required.
**Name**: The full name of the CRD you require
**Version**: The version of that object API
**Kind**: The Kubernetes object kind
**DisplayName**: A human readable version of the CRD
**Description**: A summary of how the component fits in your larger architecture
```yaml
required:
- name: etcdclusters.etcd.database.coreos.com
version: v1beta2
kind: EtcdCluster
displayName: etcd Cluster
description: Represents a cluster of etcd nodes.
```
## CRD Templates
Users of your Operator will need to be aware of which options are required vs optional. You can provide templates for each of your CRDs with a minimum set of configuration as an annotation named `alm-examples`. Compatible UIs will pre-enter this template for users to further customize.
The annotation consists of a list of the `kind`, eg. the CRD name, and the corresponding `metadata` and `spec` of the Kubernetes object. Heres a full example that provides templates for `EtcdCluster`, `EtcdBackup` and `EtcdRestore`:
```yaml
metadata:
annotations:
alm-examples: >-
[{"apiVersion":"etcd.database.coreos.com/v1beta2","kind":"EtcdCluster","metadata":{"name":"example","namespace":"default"},"spec":{"size":3,"version":"3.2.13"}},{"apiVersion":"etcd.database.coreos.com/v1beta2","kind":"EtcdRestore","metadata":{"name":"example-etcd-cluster"},"spec":{"etcdCluster":{"name":"example-etcd-cluster"},"backupStorageType":"S3","s3":{"path":"<full-s3-path>","awsSecret":"<aws-secret>"}}},{"apiVersion":"etcd.database.coreos.com/v1beta2","kind":"EtcdBackup","metadata":{"name":"example-etcd-cluster-backup"},"spec":{"etcdEndpoints":["<etcd-cluster-endpoints>"],"storageType":"S3","s3":{"path":"<full-s3-path>","awsSecret":"<aws-secret>"}}}]
```
## Your API Services
As with CRDs, there are two types of APIServices that your Operator may use, “owned” and "required".
### Owned APIServices
When a CSV owns an APIService, it is responsible for describing the deployment of the extension api-server that backs it and the group-version-kinds it provides.
An APIService is uniquely identified by the group-version it provides and can be listed multiple times to denote the different kinds it is expected to provide.
**DisplayName**: A human readable version of your APIService name, eg. “MongoDB Standalone”
**Description**: A short description of how this APIService is used by the Operator or a description of the functionality provided by the APIService.
**Group**: Group that the APIService provides, eg. database.example.com .
**Version**: Version of the APIService, eg v1alpha1
**Kind**: A kind that the APIService is expected to provide.
**DeploymentName**:
Name of the deployment defined by your CSV that corresponds to your APIService (required for owned APIServices). During the CSV pending phase, the OLM Operator will search your CSV's InstallStrategy for a deployment spec with a matching name, and if not found, will not transition the CSV to the install ready phase.
**Resources**:
Your APIServices will own one or more types of Kubernetes objects. These are listed in the resources section to inform your end-users of the objects they might need to troubleshoot or how to connect to the application, such as the Service or Ingress rule that exposes a database.
Its recommended to only list out the objects that are important to a human, not an exhaustive list of everything you orchestrate. For example, ConfigMaps that store internal state that shouldnt be modified by a user shouldnt appear here.
**SpecDescriptors, StatusDescriptors, and ActionDescriptors**:
Essentially the same as for owned CRDs.
### APIService Resource Creation
The Lifecycle Manager is responsible for creating or replacing the Service and APIService resources for each unique owned APIService.
* Service pod selectors are copied from the CSV deployment matching the APIServiceDescription's DeploymentName.
* A new CA key/cert pair is generated for for each installation and the base64 encoded CA bundle is embedded in the respective APIService resource.
### APIService Serving Certs
The Lifecycle Manager handles generating a serving key/cert pair whenever an owned APIService is being installed. The serving cert has a CN containing the host name of the generated Service resource and is signed by the private key of the CA bundle embedded in the corresponding APIService resource. The cert is stored as a type `kubernetes.io/tls` Secret in the deployment namespace and a Volume named "apiservice-cert" is automatically appended to the Volumes section of the deployment in the CSV matching the APIServiceDescription's `DeploymentName` field. If one does not already exist, a VolumeMount with a matching name is also appended to all containers of that deployment. This allows users to define a VolumeMount with the expected name to accommodate any custom path requirements. The generated VolumeMount's path defaults to `/apiserver.local.config/certificates` and any existing VolumeMounts with the same path are replaced.
### Required APIServices
The Lifecycle Manager will ensure all required CSVs have an APIService that is available and all expected group-version-kinds are discoverable before attempting installation. This allows a CSV to rely on specific kinds provided by APIServices it does not own.
**DisplayName**: A human readable version of your APIService name, eg. “MongoDB Standalone”
**Description**: A short description of how this APIService is used by the Operator or a description of the functionality provided by the APIService.
**Group**: Group that the APIService provides, eg. database.example.com .
**Version**: Version of the APIService, eg v1alpha1
**Kind**: A kind that the APIService is expected to provide.
## Operator Metadata
The metadata section contains general metadata around the name, version and other info that aids users in discovery of your Operator.
**DisplayName**: Human readable name that describes your Operator and the CRDs that it implements
**Keywords**: A list of categories that your Operator falls into. Used for filtering within compatible UIs.
**Provider**: The name of the publishing entity behind the Operator
**Maturity**: Level of maturity the Operator has achieved at this version, eg. planning, pre-alpha, alpha, beta, stable, mature, inactive, or deprecated.
**Version**: The semanic version of the Operator. This value should be incremented each time a new Operator image is published.
**Icon**: a base64 encoded image of the Operator logo or the logo of the publisher. The `base64data` parameter contains the data and the `mediatype` specifies the type of image, eg. `image/png` or `image/svg`.
**Links**: A list of relevant links for the Operator. Common links include documentation, how-to guides, blog posts, and the company homepage.
**Maintainers**: A list of names and email addresses of the maintainers of the Operator code. This can be a list of individuals or a shared email alias, eg. support@example.com.
**Description**: A markdown blob that describes the Operator. Important information to include: features, limitations and common use-cases for the Operator. If your Operator manages different types of installs, eg. standalone vs clustered, it is useful to give an overview of how each differs from each other, or which ones are supported for production use.
**MinKubeVersion**: A minimum version of Kubernetes that server is supposed to have so operator(s) can be deployed. The Kubernetes version must be in "Major.Minor.Patch" format (e.g: 1.11.0).
**Labels** (optional): Any key/value pairs used to organize and categorize this CSV object.
**Selectors** (optional): A label selector to identify related resources. Set this to select on current labels applied to this CSV object (if applicable).
**InstallModes**: A set of `InstallMode`s that tell OLM which `OperatorGroup`s an Operator can belong to. Belonging to an `OperatorGroup` means that OLM provides the set of targeted namespaces as an annotation on the Operator's CSV and any deployments defined therein. These deployments can then utilize [the Downward API](https://kubernetes.io/docs/tasks/inject-data-application/downward-api-volume-expose-pod-information/#the-downward-api) to inject the list of namespaces into their container(s). An `InstallMode` consists of an `InstallModeType` field and a boolean `Supported` field. There are four `InstallModeTypes`:
* `OwnNamespace`: If supported, the operator can be a member of an `OperatorGroup` that selects its own namespace
* `SingleNamespace`: If supported, the operator can be a member of an `OperatorGroup` that selects one namespace
* `MultiNamespace`: If supported, the operator can be a member of an `OperatorGroup` that selects more than one namespace
* `AllNamespaces`: If supported, the operator can be a member of an `OperatorGroup` that selects all namespaces (target namespace set is the empty string "")
Here's an example:
```keywords: ['etcd', 'key value', 'database', 'coreos', 'open source']
version: 0.9.2
maturity: alpha
replaces: etcdoperator.v0.9.0
maintainers:
- name: CoreOS, Inc
email: support@coreos.com
provider:
name: CoreOS, Inc
labels:
alm-owner-etcd: etcdoperator
operated-by: etcdoperator
selector:
matchLabels:
alm-owner-etcd: etcdoperator
operated-by: etcdoperator
links:
- name: Blog
url: https://coreos.com/etcd
- name: Documentation
url: https://coreos.com/operators/etcd/docs/latest/
- name: etcd Operator Source Code
url: https://github.com/coreos/etcd-operator
icon:
- base64data: <base64-encoded-data>
mediatype: image/png
installModes:
- type: OwnNamespace
supported: true
- type: SingleNamespace
supported: true
- type: MultiNamespace
supported: false
- type: AllNamespaces
supported: true
```
## Operator Install
The install block is how the Lifecycle Manager will instantiate the Operator on the cluster. There are two subsections within install: one to describe the `deployment` that will be started within the desired namespace and one that describes the Role `permissions` required to successfully run the Operator.
Ensure that the `serviceAccountName` used in the `deployment` spec matches one of the Roles described under `permissions`.
Multiple Roles should be described to reduce the scope of any actions needed containers that the Operator may run on the cluster. For example, if you have a component that generates a TLS Secret upon start up, a Role that allows `create` but not `list` on Secrets is more secure than using a single all-powerful Service Account.
Heres a full example:
```yaml
install:
spec:
deployments:
- name: example-operator
spec:
replicas: 1
selector:
matchLabels:
k8s-app: example-operator
template:
metadata:
labels:
k8s-app: example-operator
spec:
containers:
image: 'quay.io/example/example-operator:v0.0.1'
imagePullPolicy: Always
name: example-operator
resources:
limits:
cpu: 200m
memory: 100Mi
requests:
cpu: 100m
memory: 50Mi
imagePullSecrets:
- name: ''
nodeSelector:
beta.kubernetes.io/os: linux
serviceAccountName: example-operator
permissions:
- serviceAccountName: example-operator
rules:
- apiGroups:
- ''
resources:
- configmaps
- secrets
- services
verbs:
- get
- list
- create
- update
- delete
- apiGroups:
- apps
resources:
- statefulsets
verbs:
- '*'
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- get
- list
- watch
- create
- delete
- apiGroups:
- mongodb.com
resources:
- '*'
verbs:
- '*'
- serviceAccountName: example-operator-list
rules:
- apiGroups:
- ''
resources:
- services
verbs:
- get
- list
strategy: deployment
```
## Full Examples
Several [complete examples of CSV files](https://github.com/operator-framework/community-operators) are stored in Github.

Some files were not shown because too many files have changed in this diff Show More