Feat cronjob #23
84
generator/crontabs.go
Normal file
84
generator/crontabs.go
Normal file
@@ -0,0 +1,84 @@
|
||||
package generator
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"katenary/helm"
|
||||
"log"
|
||||
|
||||
"github.com/compose-spec/compose-go/types"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
const (
|
||||
cronMulti = `pods=$(kubectl get pods --selector=%s/component=%s,%s/resource=deployment -o jsonpath='{.items[*].metadata.name}')`
|
||||
cronMultiCmd = `
|
||||
for pod in $pods; do
|
||||
kubectl exec -i $pod -c %s -- sh -c '%s'
|
||||
done`
|
||||
cronSingle = `pod=$(kubectl get pods --selector=%s/component=%s,%s/resource=deployment -o jsonpath='{.items[0].metadata.name}')`
|
||||
cronCmd = `
|
||||
kubectl exec -i $pod -c %s -- sh -c '%s'`
|
||||
)
|
||||
|
||||
type CronDef struct {
|
||||
Command string `yaml:"command"`
|
||||
Schedule string `yaml:"schedule"`
|
||||
Multi bool `yaml:"allPods,omitempty"`
|
||||
}
|
||||
|
||||
func buildCrontab(deployName string, deployment *helm.Deployment, s *types.ServiceConfig, fileGeneratorChan HelmFileGenerator) {
|
||||
// get the cron label from the service
|
||||
var crondef string
|
||||
var ok bool
|
||||
if crondef, ok = s.Labels[helm.LABEL_CRON]; !ok {
|
||||
return
|
||||
}
|
||||
|
||||
// parse yaml
|
||||
crons := []CronDef{}
|
||||
err := yaml.Unmarshal([]byte(crondef), &crons)
|
||||
if err != nil {
|
||||
log.Fatalf("error: %v", err)
|
||||
}
|
||||
log.Println(crons)
|
||||
|
||||
// create a serviceAccount
|
||||
sa := helm.NewServiceAccount(deployName)
|
||||
// create a role
|
||||
role := helm.NewCronRole(deployName)
|
||||
|
||||
// create a roleBinding
|
||||
roleBinding := helm.NewRoleBinding(deployName, sa, role)
|
||||
|
||||
// make generation
|
||||
fileGeneratorChan <- sa
|
||||
fileGeneratorChan <- role
|
||||
fileGeneratorChan <- roleBinding
|
||||
|
||||
// create crontabs
|
||||
for _, cron := range crons {
|
||||
var cmd, podget string
|
||||
if cron.Multi {
|
||||
podget = cronMulti
|
||||
cmd = cronMultiCmd
|
||||
} else {
|
||||
podget = cronSingle
|
||||
cmd = cronCmd
|
||||
}
|
||||
podget = fmt.Sprintf(podget, helm.K, deployName, helm.K)
|
||||
cmd = fmt.Sprintf(cmd, s.Name, cron.Command)
|
||||
cmd = podget + cmd
|
||||
|
||||
cronTab := helm.NewCrontab(
|
||||
deployName,
|
||||
"bitnami/kubectl",
|
||||
cmd,
|
||||
cron.Schedule,
|
||||
sa,
|
||||
)
|
||||
// add crontab
|
||||
fileGeneratorChan <- cronTab
|
||||
}
|
||||
|
||||
return
|
||||
}
|
@@ -788,7 +788,14 @@ func setSecretVar(name string, s *types.ServiceConfig, c *helm.Container) *helm.
|
||||
|
||||
// Generate a container in deployment with all needed objects (volumes, secrets, env, ...).
|
||||
// The deployName shoud be the name of the deployment, we cannot get it from Metadata as this is a variable name.
|
||||
func newContainerForDeployment(deployName, containerName string, deployment *helm.Deployment, s *types.ServiceConfig, fileGeneratorChan HelmFileGenerator) *helm.Container {
|
||||
func newContainerForDeployment(
|
||||
deployName, containerName string,
|
||||
deployment *helm.Deployment,
|
||||
s *types.ServiceConfig,
|
||||
fileGeneratorChan HelmFileGenerator) *helm.Container {
|
||||
|
||||
buildCrontab(deployName, deployment, s, fileGeneratorChan)
|
||||
|
||||
container := helm.NewContainer(containerName, s.Image, s.Environment, s.Labels)
|
||||
|
||||
applyEnvMapLabel(s, container)
|
||||
|
60
helm/cronTab.go
Normal file
60
helm/cronTab.go
Normal file
@@ -0,0 +1,60 @@
|
||||
package helm
|
||||
|
||||
type Job struct {
|
||||
ServiceAccount string `yaml:"serviceAccount,omitempty"`
|
||||
ServiceAccountName string `yaml:"serviceAccountName,omitempty"`
|
||||
Containers []Container `yaml:"containers"`
|
||||
RestartPolicy string `yaml:"restartPolicy,omitempty"`
|
||||
}
|
||||
type JobSpec struct {
|
||||
Template Job `yaml:"template"`
|
||||
}
|
||||
|
||||
type JobTemplate struct {
|
||||
Metadata Metadata `yaml:"metadata"`
|
||||
Spec JobSpec `yaml:"spec"`
|
||||
Schedule string `yaml:"schedule"`
|
||||
}
|
||||
|
||||
type CronTab struct {
|
||||
*K8sBase `yaml:",inline"`
|
||||
JobTemplate JobTemplate `yaml:"jobTemplate"`
|
||||
}
|
||||
|
||||
func NewCrontab(name, image, command, schedule string, serviceAccount *ServiceAccount) *CronTab {
|
||||
cron := &CronTab{
|
||||
K8sBase: NewBase(),
|
||||
}
|
||||
cron.K8sBase.ApiVersion = "batch/v1"
|
||||
cron.K8sBase.Kind = "CronJob"
|
||||
|
||||
//cmd, err := shlex.Split(command)
|
||||
//if err != nil {
|
||||
// panic(err)
|
||||
//}
|
||||
|
||||
cron.K8sBase.Metadata.Name = ReleaseNameTpl + "-" + name
|
||||
cron.K8sBase.Metadata.Labels[K+"/component"] = name
|
||||
cron.JobTemplate = JobTemplate{
|
||||
Schedule: schedule,
|
||||
Metadata: Metadata{
|
||||
Labels: cron.K8sBase.Metadata.Labels,
|
||||
},
|
||||
Spec: JobSpec{
|
||||
Template: Job{
|
||||
ServiceAccount: serviceAccount.Name(),
|
||||
ServiceAccountName: serviceAccount.Name(),
|
||||
Containers: []Container{
|
||||
{
|
||||
Name: name,
|
||||
Image: image,
|
||||
Command: []string{command},
|
||||
},
|
||||
},
|
||||
RestartPolicy: "OnFailure",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return cron
|
||||
}
|
@@ -12,6 +12,7 @@ func NewDeployment(name string) *Deployment {
|
||||
d.K8sBase.ApiVersion = "apps/v1"
|
||||
d.K8sBase.Kind = "Deployment"
|
||||
d.K8sBase.Metadata.Labels[K+"/component"] = name
|
||||
d.K8sBase.Metadata.Labels[K+"/resource"] = "deployment"
|
||||
return d
|
||||
}
|
||||
|
||||
|
@@ -18,6 +18,7 @@ const (
|
||||
LABEL_EMPTYDIRS = K + "/empty-dirs"
|
||||
LABEL_IGNORE = K + "/ignore"
|
||||
LABEL_SECRETVARS = K + "/secret-vars"
|
||||
LABEL_CRON = K + "/crontabs"
|
||||
|
||||
//deprecated: use LABEL_MAP_ENV instead
|
||||
LABEL_ENV_SERVICE = K + "/env-to-service"
|
||||
@@ -37,6 +38,7 @@ func GetLabelsDocumentation() string {
|
||||
{{.LABEL_SAMEPOD | printf "%-33s"}}: specifies that the pod should be deployed in the same pod than the given service name
|
||||
{{.LABEL_VOLUMEFROM | printf "%-33s"}}: specifies that the volumes to be mounted from the given service (yaml style)
|
||||
{{.LABEL_EMPTYDIRS | printf "%-33s"}}: specifies that the given volume names should be "emptyDir" instead of persistentVolumeClaim (coma separated)
|
||||
{{.LABEL_CRON | printf "%-33s"}}: specifies that the given cronjobs should be deployed (yaml style, array)
|
||||
{{.LABEL_HEALTHCHECK | printf "%-33s"}}: specifies that the container should be monitored by a healthcheck, **it overrides the docker-compose healthcheck**.
|
||||
{{ printf "%-34s" ""}} You can use these form of label values:
|
||||
{{ printf "%-35s" ""}}- "http://[not used address][:port][/path]" to specify an http healthcheck
|
||||
@@ -56,6 +58,7 @@ func GetLabelsDocumentation() string {
|
||||
"LABEL_IGNORE": LABEL_IGNORE,
|
||||
"LABEL_MAP_ENV": LABEL_MAP_ENV,
|
||||
"LABEL_SECRETVARS": LABEL_SECRETVARS,
|
||||
"LABEL_CRON": LABEL_CRON,
|
||||
})
|
||||
return buff.String()
|
||||
}
|
||||
|
33
helm/role.go
Normal file
33
helm/role.go
Normal file
@@ -0,0 +1,33 @@
|
||||
package helm
|
||||
|
||||
type Rule struct {
|
||||
ApiGroup []string `yaml:"apiGroup,omitempty"`
|
||||
Resources []string `yaml:"resource,omitempty"`
|
||||
Verbs []string `yaml:"verbs,omitempty"`
|
||||
}
|
||||
|
||||
type Role struct {
|
||||
*K8sBase `yaml:",inline"`
|
||||
Rules []Rule `yaml:"rules,omitempty"`
|
||||
}
|
||||
|
||||
func NewCronRole(name string) *Role {
|
||||
role := &Role{
|
||||
K8sBase: NewBase(),
|
||||
}
|
||||
|
||||
role.K8sBase.Metadata.Name = ReleaseNameTpl + "-" + name + "-cron-executor"
|
||||
role.K8sBase.Kind = "Role"
|
||||
role.K8sBase.ApiVersion = "rbac.authorization.k8s.io/v1"
|
||||
role.K8sBase.Metadata.Labels[K+"/component"] = name
|
||||
|
||||
role.Rules = []Rule{
|
||||
{
|
||||
ApiGroup: []string{""},
|
||||
Resources: []string{"pods", "pods/exec"},
|
||||
Verbs: []string{"get", "list", "create"},
|
||||
},
|
||||
}
|
||||
|
||||
return role
|
||||
}
|
44
helm/roleBinding.go
Normal file
44
helm/roleBinding.go
Normal file
@@ -0,0 +1,44 @@
|
||||
package helm
|
||||
|
||||
type RoleRef struct {
|
||||
Kind string `yaml:"kind"`
|
||||
Name string `yaml:"name"`
|
||||
APIGroup string `yaml:"apiGroup"`
|
||||
}
|
||||
|
||||
type Subject struct {
|
||||
Kind string `yaml:"kind"`
|
||||
Name string `yaml:"name"`
|
||||
Namespace string `yaml:"namespace"`
|
||||
}
|
||||
|
||||
type RoleBinding struct {
|
||||
*K8sBase `yaml:",inline"`
|
||||
RoleRef RoleRef `yaml:"roleRef,omitempty"`
|
||||
Subjects []Subject `yaml:"subjects,omitempty"`
|
||||
}
|
||||
|
||||
func NewRoleBinding(name string, user *ServiceAccount, role *Role) *RoleBinding {
|
||||
rb := &RoleBinding{
|
||||
K8sBase: NewBase(),
|
||||
}
|
||||
|
||||
rb.K8sBase.Kind = "RoleBinding"
|
||||
rb.K8sBase.Metadata.Name = ReleaseNameTpl + "-" + name + "-cron-allow"
|
||||
rb.K8sBase.ApiVersion = "rbac.authorization.k8s.io/v1"
|
||||
rb.K8sBase.Metadata.Labels[K+"/component"] = name
|
||||
|
||||
rb.RoleRef.Kind = "Role"
|
||||
rb.RoleRef.Name = role.Metadata.Name
|
||||
rb.RoleRef.APIGroup = "rbac.authorization.k8s.io"
|
||||
|
||||
rb.Subjects = []Subject{
|
||||
{
|
||||
Kind: "ServiceAccount",
|
||||
Name: user.Metadata.Name,
|
||||
Namespace: "{{ .Release.Namespace }}",
|
||||
},
|
||||
}
|
||||
|
||||
return rb
|
||||
}
|
18
helm/serviceAccount.go
Normal file
18
helm/serviceAccount.go
Normal file
@@ -0,0 +1,18 @@
|
||||
package helm
|
||||
|
||||
// ServiceAccount defines a service account
|
||||
type ServiceAccount struct {
|
||||
*K8sBase `yaml:",inline"`
|
||||
}
|
||||
|
||||
// NewServiceAccount creates a new service account with a given name.
|
||||
func NewServiceAccount(name string) *ServiceAccount {
|
||||
sa := &ServiceAccount{
|
||||
K8sBase: NewBase(),
|
||||
}
|
||||
sa.K8sBase.Kind = "ServiceAccount"
|
||||
sa.K8sBase.ApiVersion = "v1"
|
||||
sa.K8sBase.Metadata.Name = ReleaseNameTpl + "-" + name + "-cron-user"
|
||||
sa.K8sBase.Metadata.Labels[K+"/component"] = name
|
||||
return sa
|
||||
}
|
Reference in New Issue
Block a user