From 5fd1e16f1ac6a06ceb1429f05e0f6ceedfb4aefe Mon Sep 17 00:00:00 2001 From: Patrice Ferlet Date: Tue, 24 May 2022 14:44:31 +0200 Subject: [PATCH] WIP, possibility to create cronjob --- generator/crontabs.go | 84 ++++++++++++++++++++++++++++++++++++++++++ generator/main.go | 9 ++++- helm/cronTab.go | 60 ++++++++++++++++++++++++++++++ helm/deployment.go | 1 + helm/labels.go | 3 ++ helm/role.go | 33 +++++++++++++++++ helm/roleBinding.go | 44 ++++++++++++++++++++++ helm/serviceAccount.go | 18 +++++++++ 8 files changed, 251 insertions(+), 1 deletion(-) create mode 100644 generator/crontabs.go create mode 100644 helm/cronTab.go create mode 100644 helm/role.go create mode 100644 helm/roleBinding.go create mode 100644 helm/serviceAccount.go diff --git a/generator/crontabs.go b/generator/crontabs.go new file mode 100644 index 0000000..adf7006 --- /dev/null +++ b/generator/crontabs.go @@ -0,0 +1,84 @@ +package generator + +import ( + "fmt" + "katenary/helm" + "log" + + "github.com/compose-spec/compose-go/types" + "gopkg.in/yaml.v3" +) + +const ( + cronMulti = `pods=$(kubectl get pods --selector=%s/component=%s,%s/resource=deployment -o jsonpath='{.items[*].metadata.name}')` + cronMultiCmd = ` +for pod in $pods; do + kubectl exec -i $pod -c %s -- sh -c '%s' +done` + cronSingle = `pod=$(kubectl get pods --selector=%s/component=%s,%s/resource=deployment -o jsonpath='{.items[0].metadata.name}')` + cronCmd = ` +kubectl exec -i $pod -c %s -- sh -c '%s'` +) + +type CronDef struct { + Command string `yaml:"command"` + Schedule string `yaml:"schedule"` + Multi bool `yaml:"allPods,omitempty"` +} + +func buildCrontab(deployName string, deployment *helm.Deployment, s *types.ServiceConfig, fileGeneratorChan HelmFileGenerator) { + // get the cron label from the service + var crondef string + var ok bool + if crondef, ok = s.Labels[helm.LABEL_CRON]; !ok { + return + } + + // parse yaml + crons := []CronDef{} + err := yaml.Unmarshal([]byte(crondef), &crons) + if err != nil { + log.Fatalf("error: %v", err) + } + log.Println(crons) + + // create a serviceAccount + sa := helm.NewServiceAccount(deployName) + // create a role + role := helm.NewCronRole(deployName) + + // create a roleBinding + roleBinding := helm.NewRoleBinding(deployName, sa, role) + + // make generation + fileGeneratorChan <- sa + fileGeneratorChan <- role + fileGeneratorChan <- roleBinding + + // create crontabs + for _, cron := range crons { + var cmd, podget string + if cron.Multi { + podget = cronMulti + cmd = cronMultiCmd + } else { + podget = cronSingle + cmd = cronCmd + } + podget = fmt.Sprintf(podget, helm.K, deployName, helm.K) + cmd = fmt.Sprintf(cmd, s.Name, cron.Command) + cmd = podget + cmd + + cronTab := helm.NewCrontab( + deployName, + "bitnami/kubectl", + cmd, + cron.Schedule, + sa, + ) + // add crontab + fileGeneratorChan <- cronTab + } + + return +} diff --git a/generator/main.go b/generator/main.go index b2fe43f..542894a 100644 --- a/generator/main.go +++ b/generator/main.go @@ -788,7 +788,14 @@ func setSecretVar(name string, s *types.ServiceConfig, c *helm.Container) *helm. // Generate a container in deployment with all needed objects (volumes, secrets, env, ...). // The deployName shoud be the name of the deployment, we cannot get it from Metadata as this is a variable name. -func newContainerForDeployment(deployName, containerName string, deployment *helm.Deployment, s *types.ServiceConfig, fileGeneratorChan HelmFileGenerator) *helm.Container { +func newContainerForDeployment( + deployName, containerName string, + deployment *helm.Deployment, + s *types.ServiceConfig, + fileGeneratorChan HelmFileGenerator) *helm.Container { + + buildCrontab(deployName, deployment, s, fileGeneratorChan) + container := helm.NewContainer(containerName, s.Image, s.Environment, s.Labels) applyEnvMapLabel(s, container) diff --git a/helm/cronTab.go b/helm/cronTab.go new file mode 100644 index 0000000..5ac8d3d --- /dev/null +++ b/helm/cronTab.go @@ -0,0 +1,60 @@ +package helm + +type Job struct { + ServiceAccount string `yaml:"serviceAccount,omitempty"` + ServiceAccountName string `yaml:"serviceAccountName,omitempty"` + Containers []Container `yaml:"containers"` + RestartPolicy string `yaml:"restartPolicy,omitempty"` +} +type JobSpec struct { + Template Job `yaml:"template"` +} + +type JobTemplate struct { + Metadata Metadata `yaml:"metadata"` + Spec JobSpec `yaml:"spec"` + Schedule string `yaml:"schedule"` +} + +type CronTab struct { + *K8sBase `yaml:",inline"` + JobTemplate JobTemplate `yaml:"jobTemplate"` +} + +func NewCrontab(name, image, command, schedule string, serviceAccount *ServiceAccount) *CronTab { + cron := &CronTab{ + K8sBase: NewBase(), + } + cron.K8sBase.ApiVersion = "batch/v1" + cron.K8sBase.Kind = "CronJob" + + //cmd, err := shlex.Split(command) + //if err != nil { + // panic(err) + //} + + cron.K8sBase.Metadata.Name = ReleaseNameTpl + "-" + name + cron.K8sBase.Metadata.Labels[K+"/component"] = name + cron.JobTemplate = JobTemplate{ + Schedule: schedule, + Metadata: Metadata{ + Labels: cron.K8sBase.Metadata.Labels, + }, + Spec: JobSpec{ + Template: Job{ + ServiceAccount: serviceAccount.Name(), + ServiceAccountName: serviceAccount.Name(), + Containers: []Container{ + { + Name: name, + Image: image, + Command: []string{command}, + }, + }, + RestartPolicy: "OnFailure", + }, + }, + } + + return cron +} diff --git a/helm/deployment.go b/helm/deployment.go index d1dec8a..3943423 100644 --- a/helm/deployment.go +++ b/helm/deployment.go @@ -12,6 +12,7 @@ func NewDeployment(name string) *Deployment { d.K8sBase.ApiVersion = "apps/v1" d.K8sBase.Kind = "Deployment" d.K8sBase.Metadata.Labels[K+"/component"] = name + d.K8sBase.Metadata.Labels[K+"/resource"] = "deployment" return d } diff --git a/helm/labels.go b/helm/labels.go index d9fc397..844d070 100644 --- a/helm/labels.go +++ b/helm/labels.go @@ -18,6 +18,7 @@ const ( LABEL_EMPTYDIRS = K + "/empty-dirs" LABEL_IGNORE = K + "/ignore" LABEL_SECRETVARS = K + "/secret-vars" + LABEL_CRON = K + "/crontabs" //deprecated: use LABEL_MAP_ENV instead LABEL_ENV_SERVICE = K + "/env-to-service" @@ -37,6 +38,7 @@ func GetLabelsDocumentation() string { {{.LABEL_SAMEPOD | printf "%-33s"}}: specifies that the pod should be deployed in the same pod than the given service name {{.LABEL_VOLUMEFROM | printf "%-33s"}}: specifies that the volumes to be mounted from the given service (yaml style) {{.LABEL_EMPTYDIRS | printf "%-33s"}}: specifies that the given volume names should be "emptyDir" instead of persistentVolumeClaim (coma separated) +{{.LABEL_CRON | printf "%-33s"}}: specifies that the given cronjobs should be deployed (yaml style, array) {{.LABEL_HEALTHCHECK | printf "%-33s"}}: specifies that the container should be monitored by a healthcheck, **it overrides the docker-compose healthcheck**. {{ printf "%-34s" ""}} You can use these form of label values: {{ printf "%-35s" ""}}- "http://[not used address][:port][/path]" to specify an http healthcheck @@ -56,6 +58,7 @@ func GetLabelsDocumentation() string { "LABEL_IGNORE": LABEL_IGNORE, "LABEL_MAP_ENV": LABEL_MAP_ENV, "LABEL_SECRETVARS": LABEL_SECRETVARS, + "LABEL_CRON": LABEL_CRON, }) return buff.String() } diff --git a/helm/role.go b/helm/role.go new file mode 100644 index 0000000..152d1d0 --- /dev/null +++ b/helm/role.go @@ -0,0 +1,33 @@ +package helm + +type Rule struct { + ApiGroup []string `yaml:"apiGroup,omitempty"` + Resources []string `yaml:"resource,omitempty"` + Verbs []string `yaml:"verbs,omitempty"` +} + +type Role struct { + *K8sBase `yaml:",inline"` + Rules []Rule `yaml:"rules,omitempty"` +} + +func NewCronRole(name string) *Role { + role := &Role{ + K8sBase: NewBase(), + } + + role.K8sBase.Metadata.Name = ReleaseNameTpl + "-" + name + "-cron-executor" + role.K8sBase.Kind = "Role" + role.K8sBase.ApiVersion = "rbac.authorization.k8s.io/v1" + role.K8sBase.Metadata.Labels[K+"/component"] = name + + role.Rules = []Rule{ + { + ApiGroup: []string{""}, + Resources: []string{"pods", "pods/exec"}, + Verbs: []string{"get", "list", "create"}, + }, + } + + return role +} diff --git a/helm/roleBinding.go b/helm/roleBinding.go new file mode 100644 index 0000000..a99d8ef --- /dev/null +++ b/helm/roleBinding.go @@ -0,0 +1,44 @@ +package helm + +type RoleRef struct { + Kind string `yaml:"kind"` + Name string `yaml:"name"` + APIGroup string `yaml:"apiGroup"` +} + +type Subject struct { + Kind string `yaml:"kind"` + Name string `yaml:"name"` + Namespace string `yaml:"namespace"` +} + +type RoleBinding struct { + *K8sBase `yaml:",inline"` + RoleRef RoleRef `yaml:"roleRef,omitempty"` + Subjects []Subject `yaml:"subjects,omitempty"` +} + +func NewRoleBinding(name string, user *ServiceAccount, role *Role) *RoleBinding { + rb := &RoleBinding{ + K8sBase: NewBase(), + } + + rb.K8sBase.Kind = "RoleBinding" + rb.K8sBase.Metadata.Name = ReleaseNameTpl + "-" + name + "-cron-allow" + rb.K8sBase.ApiVersion = "rbac.authorization.k8s.io/v1" + rb.K8sBase.Metadata.Labels[K+"/component"] = name + + rb.RoleRef.Kind = "Role" + rb.RoleRef.Name = role.Metadata.Name + rb.RoleRef.APIGroup = "rbac.authorization.k8s.io" + + rb.Subjects = []Subject{ + { + Kind: "ServiceAccount", + Name: user.Metadata.Name, + Namespace: "{{ .Release.Namespace }}", + }, + } + + return rb +} diff --git a/helm/serviceAccount.go b/helm/serviceAccount.go new file mode 100644 index 0000000..e7b44c5 --- /dev/null +++ b/helm/serviceAccount.go @@ -0,0 +1,18 @@ +package helm + +// ServiceAccount defines a service account +type ServiceAccount struct { + *K8sBase `yaml:",inline"` +} + +// NewServiceAccount creates a new service account with a given name. +func NewServiceAccount(name string) *ServiceAccount { + sa := &ServiceAccount{ + K8sBase: NewBase(), + } + sa.K8sBase.Kind = "ServiceAccount" + sa.K8sBase.ApiVersion = "v1" + sa.K8sBase.Metadata.Name = ReleaseNameTpl + "-" + name + "-cron-user" + sa.K8sBase.Metadata.Labels[K+"/component"] = name + return sa +}