This is an automated email from the ASF dual-hosted git repository. astefanutti pushed a commit to branch main in repository https://gitbox.apache.org/repos/asf/camel-k.git
commit e95c94f898f52f3070805b1bbc5632cbb88af05d Author: Antonin Stefanutti <anto...@stefanutti.fr> AuthorDate: Thu Sep 30 17:47:46 2021 +0200 fix: Do not use cached client to create and retrieve build Pod --- pkg/cmd/operator/operator.go | 25 +++++++++++++++++++++++++ pkg/controller/build/build_controller.go | 4 ++-- pkg/controller/build/build_pod.go | 19 +++++++------------ pkg/controller/build/initialize_pod.go | 11 ++++++++--- pkg/controller/build/monitor_pod.go | 11 +++++++---- pkg/platform/operator.go | 22 +--------------------- 6 files changed, 50 insertions(+), 42 deletions(-) diff --git a/pkg/cmd/operator/operator.go b/pkg/cmd/operator/operator.go index 393c3d7..2013142 100644 --- a/pkg/cmd/operator/operator.go +++ b/pkg/cmd/operator/operator.go @@ -29,6 +29,7 @@ import ( coordination "k8s.io/api/coordination/v1" corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/selection" "k8s.io/client-go/tools/leaderelection/resourcelock" @@ -118,6 +119,10 @@ func Run(healthPort, monitoringPort int32, leaderElection bool) { } } + // Set the operator container image if it runs in-container + platform.OperatorImage, err = getOperatorImage(context.TODO(), c) + exitOnError(err, "cannot get operator container image") + if ok, err := kubernetes.CheckPermission(context.TODO(), c, coordination.GroupName, "leases", operatorNamespace, "", "create"); err != nil || !ok { leaderElection = false exitOnError(err, "cannot check permissions for creating Leases") @@ -184,6 +189,26 @@ func getWatchNamespace() (string, error) { return ns, nil } +// getOperatorImage returns the image currently used by the running operator if present (when running out of cluster, it may be absent). +func getOperatorImage(ctx context.Context, c ctrl.Reader) (string, error) { + ns := platform.GetOperatorNamespace() + name := platform.GetOperatorPodName() + if ns == "" || name == "" { + return "", nil + } + + pod := corev1.Pod{} + if err := c.Get(ctx, ctrl.ObjectKey{Namespace: ns, Name: name}, &pod); err != nil && k8serrors.IsNotFound(err) { + return "", nil + } else if err != nil { + return "", err + } + if len(pod.Spec.Containers) == 0 { + return "", fmt.Errorf("no containers found in operator pod") + } + return pod.Spec.Containers[0].Image, nil +} + func exitOnError(err error, msg string) { if err != nil { log.Error(err, msg) diff --git a/pkg/controller/build/build_controller.go b/pkg/controller/build/build_controller.go index a5af925..99ca1d0 100644 --- a/pkg/controller/build/build_controller.go +++ b/pkg/controller/build/build_controller.go @@ -161,9 +161,9 @@ func (r *reconcileBuild) Reconcile(ctx context.Context, request reconcile.Reques switch pl.Status.Build.BuildStrategy { case v1.IntegrationPlatformBuildStrategyPod: actions = []Action{ - newInitializePodAction(), + newInitializePodAction(r.reader), newScheduleAction(r.reader), - newMonitorPodAction(), + newMonitorPodAction(r.reader), newErrorRecoveryAction(), newErrorAction(), } diff --git a/pkg/controller/build/build_pod.go b/pkg/controller/build/build_pod.go index e81cadf..3cea938 100644 --- a/pkg/controller/build/build_pod.go +++ b/pkg/controller/build/build_pod.go @@ -34,7 +34,6 @@ import ( v1 "github.com/apache/camel-k/pkg/apis/camel/v1" "github.com/apache/camel-k/pkg/builder" - "github.com/apache/camel-k/pkg/client" "github.com/apache/camel-k/pkg/platform" "github.com/apache/camel-k/pkg/util/defaults" "github.com/apache/camel-k/pkg/util/kubernetes" @@ -114,7 +113,7 @@ var ( } ) -func newBuildPod(ctx context.Context, c client.Client, build *v1.Build) (*corev1.Pod, error) { +func newBuildPod(ctx context.Context, c ctrl.Reader, build *v1.Build) (*corev1.Pod, error) { pod := &corev1.Pod{ TypeMeta: metav1.TypeMeta{ APIVersion: corev1.SchemeGroupVersion.String(), @@ -138,7 +137,7 @@ func newBuildPod(ctx context.Context, c client.Client, build *v1.Build) (*corev1 for _, task := range build.Spec.Tasks { if task.Builder != nil { - err := addBuildTaskToPod(ctx, c, build, task.Builder.Name, pod) + err := addBuildTaskToPod(build, task.Builder.Name, pod) if err != nil { return nil, err } @@ -153,12 +152,12 @@ func newBuildPod(ctx context.Context, c client.Client, build *v1.Build) (*corev1 return nil, err } } else if task.S2i != nil { - err := addBuildTaskToPod(ctx, c, build, task.S2i.Name, pod) + err := addBuildTaskToPod(build, task.S2i.Name, pod) if err != nil { return nil, err } } else if task.Spectrum != nil { - err := addBuildTaskToPod(ctx, c, build, task.Spectrum.Name, pod) + err := addBuildTaskToPod(build, task.Spectrum.Name, pod) if err != nil { return nil, err } @@ -209,7 +208,7 @@ func buildPodName(build *v1.Build) string { return "camel-k-" + build.Name + "-builder" } -func addBuildTaskToPod(ctx context.Context, c ctrl.Reader, build *v1.Build, taskName string, pod *corev1.Pod) error { +func addBuildTaskToPod(build *v1.Build, taskName string, pod *corev1.Pod) error { if !hasBuilderVolume(pod) { // Add the EmptyDir volume used to share the build state across tasks pod.Spec.Volumes = append(pod.Spec.Volumes, corev1.Volume{ @@ -220,11 +219,7 @@ func addBuildTaskToPod(ctx context.Context, c ctrl.Reader, build *v1.Build, task }) } - // TODO: Move the retrieval of the operator image into the controller - operatorImage, err := platform.GetCurrentOperatorImage(ctx, c) - if err != nil { - return err - } + operatorImage := platform.OperatorImage if operatorImage == "" { operatorImage = defaults.ImageName + ":" + defaults.Version } @@ -250,7 +245,7 @@ func addBuildTaskToPod(ctx context.Context, c ctrl.Reader, build *v1.Build, task return nil } -func addBuildahTaskToPod(ctx context.Context, c client.Client, build *v1.Build, task *v1.BuildahTask, pod *corev1.Pod) error { +func addBuildahTaskToPod(ctx context.Context, c ctrl.Reader, build *v1.Build, task *v1.BuildahTask, pod *corev1.Pod) error { bud := []string{ "buildah", "bud", diff --git a/pkg/controller/build/initialize_pod.go b/pkg/controller/build/initialize_pod.go index bb0fb1b..4fd8cb8 100644 --- a/pkg/controller/build/initialize_pod.go +++ b/pkg/controller/build/initialize_pod.go @@ -20,17 +20,22 @@ package build import ( "context" + ctrl "sigs.k8s.io/controller-runtime/pkg/client" + "github.com/pkg/errors" v1 "github.com/apache/camel-k/pkg/apis/camel/v1" ) -func newInitializePodAction() Action { - return &initializePodAction{} +func newInitializePodAction(reader ctrl.Reader) Action { + return &initializePodAction{ + reader: reader, + } } type initializePodAction struct { baseAction + reader ctrl.Reader } // Name returns a common name of the action @@ -49,7 +54,7 @@ func (action *initializePodAction) Handle(ctx context.Context, build *v1.Build) return nil, errors.Wrap(err, "cannot delete build pod") } - pod, err := getBuilderPod(ctx, action.client, build) + pod, err := getBuilderPod(ctx, action.reader, build) if err != nil || pod != nil { // We return and wait for the pod to be deleted before de-queue the build pod. return nil, err diff --git a/pkg/controller/build/monitor_pod.go b/pkg/controller/build/monitor_pod.go index 3c3bf13..5ec3b61 100644 --- a/pkg/controller/build/monitor_pod.go +++ b/pkg/controller/build/monitor_pod.go @@ -39,12 +39,15 @@ import ( const timeoutAnnotation = "camel.apache.org/timeout" -func newMonitorPodAction() Action { - return &monitorPodAction{} +func newMonitorPodAction(reader ctrl.Reader) Action { + return &monitorPodAction{ + reader: reader, + } } type monitorPodAction struct { baseAction + reader ctrl.Reader } // Name returns a common name of the action @@ -59,7 +62,7 @@ func (action *monitorPodAction) CanHandle(build *v1.Build) bool { // Handle handles the builds func (action *monitorPodAction) Handle(ctx context.Context, build *v1.Build) (*v1.Build, error) { - pod, err := getBuilderPod(ctx, action.client, build) + pod, err := getBuilderPod(ctx, action.reader, build) if err != nil { return nil, err } @@ -68,7 +71,7 @@ func (action *monitorPodAction) Handle(ctx context.Context, build *v1.Build) (*v switch build.Status.Phase { case v1.BuildPhasePending: - if pod, err = newBuildPod(ctx, action.client, build); err != nil { + if pod, err = newBuildPod(ctx, action.reader, build); err != nil { return nil, err } // Set the Build as the Pod owner and controller diff --git a/pkg/platform/operator.go b/pkg/platform/operator.go index 7492158..8842ab6 100644 --- a/pkg/platform/operator.go +++ b/pkg/platform/operator.go @@ -19,12 +19,10 @@ package platform import ( "context" - "errors" "os" "strings" coordination "k8s.io/api/coordination/v1" - v1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" ctrl "sigs.k8s.io/controller-runtime/pkg/client" @@ -36,25 +34,7 @@ const operatorPodNameEnvVariable = "POD_NAME" const OperatorLockName = "camel-k-lock" -// GetCurrentOperatorImage returns the image currently used by the running operator if present (when running out of cluster, it may be absent). -func GetCurrentOperatorImage(ctx context.Context, c ctrl.Reader) (string, error) { - ns := GetOperatorNamespace() - name := GetOperatorPodName() - if ns == "" || name == "" { - return "", nil - } - - pod := v1.Pod{} - if err := c.Get(ctx, ctrl.ObjectKey{Namespace: ns, Name: name}, &pod); err != nil && k8serrors.IsNotFound(err) { - return "", nil - } else if err != nil { - return "", err - } - if len(pod.Spec.Containers) == 0 { - return "", errors.New("no containers found in operator pod") - } - return pod.Spec.Containers[0].Image, nil -} +var OperatorImage string // IsCurrentOperatorGlobal returns true if the operator is configured to watch all namespaces func IsCurrentOperatorGlobal() bool {