7 Commits

Author SHA1 Message Date
0e133ae6db fix(path): fixing the default compose file check
All checks were successful
Go-Tests / tests (pull_request) Successful in 1m54s
Go-Tests / sonar (pull_request) Successful in 1m15s
2026-03-15 21:50:26 +01:00
5d839035b9 feat(depends): add suffix on RBAC and SA 2026-03-15 10:15:47 +01:00
7e1bbdc9b3 feat(quality): remove unused modules 2026-03-15 09:43:58 +01:00
f175416ac2 feat(quality): fix duplicates and modernize 2026-03-15 09:43:16 +01:00
613baaf229 feat(depends): add RBAC 2026-03-15 08:55:24 +01:00
8fc9cb31c4 feat(depends): Check call to kubernetes API 2026-03-08 23:50:29 +01:00
78b5af747e feat(depends): Use kubernetes API for depends_on management
We were using netcat to port to check if a service is up, but actually
we can do like Docker / Podman compose and check the status. For now,
I'm using the endpoint status, but maybe we can just check if the object
is "up".
2026-03-08 23:47:13 +01:00
9 changed files with 529 additions and 32 deletions

View File

@@ -97,7 +97,8 @@ Katenary transforms compose services this way:
- environment variables will be stored inside a `ConfigMap`
- image, tags, and ingresses configuration are also stored in `values.yaml` file
- if named volumes are declared, Katenary create `PersistentVolumeClaims` - not enabled in values file
- `depends_on` needs that the pointed service declared a port. If not, you can use labels to inform Katenary
- `depends_on` uses Kubernetes API by default to check if the service endpoint is ready. No port required.
Use label `katenary.v3/depends-on: legacy` to use the old netcat method (requires port).
For any other specific configuration, like binding local files as `ConfigMap`, bind variables, add values with
documentation, etc. You'll need to use labels.
@@ -147,10 +148,8 @@ Katenary proposes a lot of labels to configure the helm chart generation, but so
### Work with Depends On?
Kubernetes does not provide service or pod starting detection from others pods. But Katenary will create `initContainer`
to make you able to wait for a service to respond. But you'll probably need to adapt a bit the compose file.
See this compose file:
Katenary creates `initContainer` to wait for dependent services to be ready. By default, it uses the Kubernetes API
to check if the service endpoint has ready addresses - no port required.
```yaml
version: "3"
@@ -167,9 +166,7 @@ services:
MYSQL_ROOT_PASSWORD: foobar
```
In this case, `webapp` needs to know the `database` port because the `depends_on` points on it and Kubernetes has not
(yet) solution to check the database startup. Katenary wants to create a `initContainer` to hit on the related service.
So, instead of exposing the port in the compose definition, let's declare this to Katenary with labels:
If you need the old netcat-based method (requires port), add the `katenary.v3/depends-on: legacy` label to the dependent service:
```yaml
version: "3"
@@ -179,14 +176,15 @@ services:
image: php:8-apache
depends_on:
- database
labels:
katenary.v3/depends-on: legacy
database:
image: mariadb
environment:
MYSQL_ROOT_PASSWORD: foobar
labels:
katenary.v3/ports: |-
- 3306
ports:
- 3306:3306
```
### Declare ingresses

View File

@@ -109,8 +109,19 @@ func Convert(config ConvertOptions, dockerComposeFile ...string) error {
// the current working directory is the directory
currentDir, _ := os.Getwd()
// Filter to only existing files before chdir
var existingFiles []string
for _, f := range dockerComposeFile {
if _, err := os.Stat(f); err == nil {
existingFiles = append(existingFiles, f)
}
}
if len(existingFiles) == 0 && len(dockerComposeFile) > 0 {
return fmt.Errorf("no compose file found: %v", dockerComposeFile)
}
// go to the root of the project
if err := os.Chdir(filepath.Dir(dockerComposeFile[0])); err != nil {
if err := os.Chdir(filepath.Dir(existingFiles[0])); err != nil {
logger.Failure(err.Error())
return err
}
@@ -122,12 +133,12 @@ func Convert(config ConvertOptions, dockerComposeFile ...string) error {
}()
// repove the directory part of the docker-compose files
for i, f := range dockerComposeFile {
dockerComposeFile[i] = filepath.Base(f)
for i, f := range existingFiles {
existingFiles[i] = filepath.Base(f)
}
// parse the compose files
project, err := parser.Parse(config.Profiles, config.EnvFiles, dockerComposeFile...)
project, err := parser.Parse(config.Profiles, config.EnvFiles, existingFiles...)
if err != nil {
logger.Failure("Cannot parse compose files", err.Error())
return err

View File

@@ -33,15 +33,16 @@ type ConfigMapMount struct {
// Deployment is a kubernetes Deployment.
type Deployment struct {
*appsv1.Deployment `yaml:",inline"`
chart *HelmChart `yaml:"-"`
configMaps map[string]*ConfigMapMount `yaml:"-"`
volumeMap map[string]string `yaml:"-"` // keep map of fixed named to original volume name
service *types.ServiceConfig `yaml:"-"`
defaultTag string `yaml:"-"`
isMainApp bool `yaml:"-"`
exchangesVolumes map[string]*labelstructs.ExchangeVolume `yaml:"-"`
boundEnvVar []string `yaml:"-"` // environement to remove
*appsv1.Deployment `yaml:",inline"`
chart *HelmChart `yaml:"-"`
configMaps map[string]*ConfigMapMount `yaml:"-"`
volumeMap map[string]string `yaml:"-"` // keep map of fixed named to original volume name
service *types.ServiceConfig `yaml:"-"`
defaultTag string `yaml:"-"`
isMainApp bool `yaml:"-"`
exchangesVolumes map[string]*labelstructs.ExchangeVolume `yaml:"-"`
boundEnvVar []string `yaml:"-"` // environement to remove
needsServiceAccount bool `yaml:"-"`
}
// NewDeployment creates a new Deployment from a compose service. The appName is the name of the application taken from the project name.
@@ -262,9 +263,22 @@ func (d *Deployment) BindFrom(service types.ServiceConfig, binded *Deployment) {
// DependsOn adds a initContainer to the deployment that will wait for the service to be up.
func (d *Deployment) DependsOn(to *Deployment, servicename string) error {
// Add a initContainer with busybox:latest using netcat to check if the service is up
// it will wait until the service responds to all ports
logger.Info("Adding dependency from ", d.service.Name, " to ", to.service.Name)
useLegacy := false
if label, ok := d.service.Labels[labels.LabelDependsOn]; ok {
useLegacy = strings.ToLower(label) == "legacy"
}
if useLegacy {
return d.dependsOnLegacy(to, servicename)
}
d.needsServiceAccount = true
return d.dependsOnK8sAPI(to)
}
func (d *Deployment) dependsOnLegacy(to *Deployment, servicename string) error {
for _, container := range to.Spec.Template.Spec.Containers {
commands := []string{}
if len(container.Ports) == 0 {
@@ -291,6 +305,39 @@ func (d *Deployment) DependsOn(to *Deployment, servicename string) error {
return nil
}
func (d *Deployment) dependsOnK8sAPI(to *Deployment) error {
script := `NAMESPACE=${NAMESPACE:-default}
SERVICE=%s
KUBERNETES_SERVICE_HOST=${KUBERNETES_SERVICE_HOST:-kubernetes.default.svc}
KUBERNETES_SERVICE_PORT=${KUBERNETES_SERVICE_PORT:-443}
until wget -q -O- --header="Authorization: Bearer $(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" \
--cacert=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt \
"https://${KUBERNETES_SERVICE_HOST}:${KUBERNETES_SERVICE_PORT}/api/v1/namespaces/${NAMESPACE}/endpoints/${SERVICE}" \
| grep -q '"ready":.*true'; do
sleep 2
done`
command := []string{"/bin/sh", "-c", fmt.Sprintf(script, to.Name)}
d.Spec.Template.Spec.InitContainers = append(d.Spec.Template.Spec.InitContainers, corev1.Container{
Name: "wait-for-" + to.service.Name,
Image: "busybox:latest",
Command: command,
Env: []corev1.EnvVar{
{
Name: "NAMESPACE",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
FieldPath: "metadata.namespace",
},
},
},
},
})
return nil
}
// Filename returns the filename of the deployment.
func (d *Deployment) Filename() string {
return d.service.Name + ".deployment.yaml"
@@ -566,7 +613,7 @@ func (d *Deployment) Yaml() ([]byte, error) {
}
// manage serviceAccount, add condition to use the serviceAccount from values.yaml
if strings.Contains(line, "serviceAccountName:") {
if strings.Contains(line, "serviceAccountName:") && !d.needsServiceAccount {
spaces = strings.Repeat(" ", utils.CountStartingSpaces(line))
pre := spaces + `{{- if ne .Values.` + serviceName + `.serviceAccount "" }}`
post := spaces + "{{- end }}"
@@ -602,6 +649,13 @@ func (d *Deployment) Yaml() ([]byte, error) {
return []byte(strings.Join(content, "\n")), nil
}
func (d *Deployment) SetServiceAccountName() {
if d.needsServiceAccount {
d.Spec.Template.Spec.ServiceAccountName = utils.TplName(d.service.Name, d.chart.Name, "dependency")
} else {
}
}
func (d *Deployment) appendDirectoryToConfigMap(service types.ServiceConfig, appName string, volume types.ServiceVolumeConfig) {
pathnme := utils.PathToName(volume.Source)
if _, ok := d.configMaps[pathnme]; !ok {

View File

@@ -3,6 +3,7 @@ package generator
import (
"fmt"
"os"
"slices"
"strings"
"testing"
@@ -11,6 +12,7 @@ import (
yaml3 "gopkg.in/yaml.v3"
v1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
"sigs.k8s.io/yaml"
)
@@ -142,6 +144,86 @@ services:
if len(dt.Spec.Template.Spec.InitContainers) != 1 {
t.Errorf("Expected 1 init container, got %d", len(dt.Spec.Template.Spec.InitContainers))
}
initContainer := dt.Spec.Template.Spec.InitContainers[0]
if !strings.Contains(initContainer.Image, "busybox") {
t.Errorf("Expected busybox image, got %s", initContainer.Image)
}
fullCommand := strings.Join(initContainer.Command, " ")
if !strings.Contains(fullCommand, "wget") {
t.Errorf("Expected wget command (K8s API method), got %s", fullCommand)
}
if !strings.Contains(fullCommand, "/api/v1/namespaces/") {
t.Errorf("Expected Kubernetes API call to /api/v1/namespaces/, got %s", fullCommand)
}
if !strings.Contains(fullCommand, "/endpoints/") {
t.Errorf("Expected Kubernetes API call to /endpoints/, got %s", fullCommand)
}
if len(initContainer.Env) == 0 {
t.Errorf("Expected environment variables to be set for namespace")
}
hasNamespace := false
for _, env := range initContainer.Env {
if env.Name == "NAMESPACE" && env.ValueFrom != nil && env.ValueFrom.FieldRef != nil {
if env.ValueFrom.FieldRef.FieldPath == "metadata.namespace" {
hasNamespace = true
break
}
}
}
if !hasNamespace {
t.Errorf("Expected NAMESPACE env var with metadata.namespace fieldRef")
}
}
func TestDependsOnLegacy(t *testing.T) {
composeFile := `
services:
web:
image: nginx:1.29
ports:
- 80:80
depends_on:
- database
labels:
katenary.v3/depends-on: legacy
database:
image: mariadb:10.5
ports:
- 3306:3306
`
tmpDir := setup(composeFile)
defer teardown(tmpDir)
currentDir, _ := os.Getwd()
os.Chdir(tmpDir)
defer os.Chdir(currentDir)
output := internalCompileTest(t, "-s", webTemplateOutput)
dt := v1.Deployment{}
if err := yaml.Unmarshal([]byte(output), &dt); err != nil {
t.Errorf(unmarshalError, err)
}
if len(dt.Spec.Template.Spec.InitContainers) != 1 {
t.Errorf("Expected 1 init container, got %d", len(dt.Spec.Template.Spec.InitContainers))
}
initContainer := dt.Spec.Template.Spec.InitContainers[0]
if !strings.Contains(initContainer.Image, "busybox") {
t.Errorf("Expected busybox image, got %s", initContainer.Image)
}
fullCommand := strings.Join(initContainer.Command, " ")
if !strings.Contains(fullCommand, "nc") {
t.Errorf("Expected nc (netcat) command for legacy method, got %s", fullCommand)
}
}
func TestHelmDependencies(t *testing.T) {
@@ -563,3 +645,192 @@ services:
t.Errorf("Expected command to be 'bar baz', got %s", strings.Join(command, " "))
}
}
func TestRestrictedRBACGeneration(t *testing.T) {
composeFile := `
services:
web:
image: nginx:1.29
ports:
- 80:80
depends_on:
- database
database:
image: mariadb:10.5
ports:
- 3306:3306
`
tmpDir := setup(composeFile)
defer teardown(tmpDir)
currentDir, _ := os.Getwd()
os.Chdir(tmpDir)
defer os.Chdir(currentDir)
rbacOutput := internalCompileTest(t, "-s", "templates/web/depends-on.rbac.yaml")
docs := strings.Split(rbacOutput, "---\n")
// Filter out empty documents and strip helm template comments
var filteredDocs []string
for _, doc := range docs {
if strings.TrimSpace(doc) != "" {
// Remove '# Source:' comment lines that helm template adds
lines := strings.Split(doc, "\n")
var contentLines []string
for _, line := range lines {
if !strings.HasPrefix(strings.TrimSpace(line), "# Source:") {
contentLines = append(contentLines, line)
}
}
filteredDocs = append(filteredDocs, strings.Join(contentLines, "\n"))
}
}
if len(filteredDocs) != 3 {
t.Fatalf("Expected 3 YAML documents in RBAC file, got %d (filtered from %d)", len(filteredDocs), len(docs))
}
var sa corev1.ServiceAccount
if err := yaml.Unmarshal([]byte(strings.TrimSpace(filteredDocs[0])), &sa); err != nil {
t.Errorf("Failed to unmarshal ServiceAccount: %v", err)
}
if sa.Kind != "ServiceAccount" {
t.Errorf("Expected Kind=ServiceAccount, got %s", sa.Kind)
}
if !strings.Contains(sa.Name, "web") {
t.Errorf("Expected ServiceAccount name to contain 'web', got %s", sa.Name)
}
var role rbacv1.Role
if err := yaml.Unmarshal([]byte(strings.TrimSpace(filteredDocs[1])), &role); err != nil {
t.Errorf("Failed to unmarshal Role: %v", err)
}
if role.Kind != "Role" {
t.Errorf("Expected Kind=Role, got %s", role.Kind)
}
if len(role.Rules) != 1 {
t.Errorf("Expected 1 rule in Role, got %d", len(role.Rules))
}
rule := role.Rules[0]
if !contains(rule.APIGroups, "") {
t.Error("Expected APIGroup to include core API ('')")
}
if !contains(rule.Resources, "endpoints") {
t.Errorf("Expected Resource to include 'endpoints', got %v", rule.Resources)
}
for _, res := range rule.Resources {
if res == "*" {
t.Error("Role should not have wildcard (*) resource permissions")
}
}
for _, verb := range rule.Verbs {
if verb == "*" {
t.Error("Role should not have wildcard (*) verb permissions")
}
}
var rb rbacv1.RoleBinding
if err := yaml.Unmarshal([]byte(strings.TrimSpace(filteredDocs[2])), &rb); err != nil {
t.Errorf("Failed to unmarshal RoleBinding: %v", err)
}
if rb.Kind != "RoleBinding" {
t.Errorf("Expected Kind=RoleBinding, got %s", rb.Kind)
}
if len(rb.Subjects) != 1 {
t.Errorf("Expected 1 subject in RoleBinding, got %d", len(rb.Subjects))
}
if rb.Subjects[0].Kind != "ServiceAccount" {
t.Errorf("Expected Subject Kind=ServiceAccount, got %s", rb.Subjects[0].Kind)
}
// Helm template renders the name, so check if it contains "web"
if !strings.Contains(rb.RoleRef.Name, "web") {
t.Errorf("Expected RoleRef Name to contain 'web', got %s", rb.RoleRef.Name)
}
if rb.RoleRef.Kind != "Role" {
t.Errorf("Expected RoleRef Kind=Role, got %s", rb.RoleRef.Kind)
}
}
func TestDeploymentReferencesServiceAccount(t *testing.T) {
composeFile := `
services:
web:
image: nginx:1.29
ports:
- 80:80
depends_on:
- database
database:
image: mariadb:10.5
ports:
- 3306:3306
`
tmpDir := setup(composeFile)
defer teardown(tmpDir)
currentDir, _ := os.Getwd()
os.Chdir(tmpDir)
defer os.Chdir(currentDir)
output := internalCompileTest(t, "-s", "templates/web/deployment.yaml")
var dt v1.Deployment
if err := yaml.Unmarshal([]byte(output), &dt); err != nil {
t.Errorf("Failed to unmarshal Deployment: %v", err)
}
serviceAccountName := dt.Spec.Template.Spec.ServiceAccountName
if !strings.Contains(serviceAccountName, "web") {
t.Errorf("Expected ServiceAccountName to contain 'web', got %s", serviceAccountName)
}
if len(dt.Spec.Template.Spec.InitContainers) == 0 {
t.Fatal("Expected at least one init container for depends_on")
}
initContainer := dt.Spec.Template.Spec.InitContainers[0]
if initContainer.Name != "wait-for-database" {
t.Errorf("Expected init container name 'wait-for-database', got %s", initContainer.Name)
}
fullCommand := strings.Join(initContainer.Command, " ")
if !strings.Contains(fullCommand, "wget") {
t.Error("Expected init container to use wget for K8s API calls")
}
if !strings.Contains(fullCommand, "/api/v1/namespaces/") {
t.Error("Expected init container to call /api/v1/namespaces/ endpoint")
}
if !strings.Contains(fullCommand, "/endpoints/") {
t.Error("Expected init container to access /endpoints/ resource")
}
hasNamespace := false
for _, env := range initContainer.Env {
if env.Name == "NAMESPACE" && env.ValueFrom != nil && env.ValueFrom.FieldRef != nil {
if env.ValueFrom.FieldRef.FieldPath == "metadata.namespace" {
hasNamespace = true
break
}
}
}
if !hasNamespace {
t.Error("Expected NAMESPACE env var with metadata.namespace fieldRef")
}
_, err := os.Stat("./chart/templates/web/depends-on.rbac.yaml")
if os.IsNotExist(err) {
t.Error("RBAC file depends-on.rbac.yaml should exist for service using depends_on with K8s API")
} else if err != nil {
t.Errorf("Unexpected error checking RBAC file: %v", err)
}
}
func contains(slice []string, item string) bool {
return slices.Contains(slice, item)
}

View File

@@ -22,7 +22,7 @@ import (
// The Generate function will create the HelmChart object this way:
//
// - Detect the service port name or leave the port number if not found.
// - Create a deployment for each service that are not ingnore.
// - Create a deployment for each service that are not ingore.
// - Create a service and ingresses for each service that has ports and/or declared ingresses.
// - Create a PVC or Configmap volumes for each volume.
// - Create init containers for each service which has dependencies to other services.
@@ -134,6 +134,12 @@ func Generate(project *types.Project) (*HelmChart, error) {
}
}
}
// set ServiceAccountName for deployments that need it
for _, d := range deployments {
d.SetServiceAccountName()
}
for _, name := range drops {
delete(deployments, name)
}
@@ -142,6 +148,11 @@ func Generate(project *types.Project) (*HelmChart, error) {
chart.setEnvironmentValuesFrom(s, deployments)
}
// generate RBAC resources for services that need K8s API access (non-legacy depends_on)
if err := chart.generateRBAC(deployments); err != nil {
logger.Fatalf("error generating RBAC: %s", err)
}
// generate configmaps with environment variables
if err := chart.generateConfigMapsAndSecrets(project); err != nil {
logger.Fatalf("error generating configmaps and secrets: %s", err)
@@ -440,6 +451,58 @@ func samePodVolume(service types.ServiceConfig, v types.ServiceVolumeConfig, dep
return false
}
// generateRBAC creates RBAC resources (ServiceAccount, Role, RoleBinding) for services that need K8s API access.
// A service needs RBAC if it has non-legacy depends_on relationships.
func (chart *HelmChart) generateRBAC(deployments map[string]*Deployment) error {
serviceMap := make(map[string]bool)
for _, d := range deployments {
if !d.needsServiceAccount {
continue
}
sa := NewServiceAccount(*d.service, chart.Name)
role := NewRestrictedRole(*d.service, chart.Name)
rb := NewRestrictedRoleBinding(*d.service, chart.Name)
var buf bytes.Buffer
saYaml, err := yaml.Marshal(sa.ServiceAccount)
if err != nil {
return fmt.Errorf("error marshaling ServiceAccount for %s: %w", d.service.Name, err)
}
buf.Write(saYaml)
buf.WriteString("---\n")
roleYaml, err := yaml.Marshal(role.Role)
if err != nil {
return fmt.Errorf("error marshaling Role for %s: %w", d.service.Name, err)
}
buf.Write(roleYaml)
buf.WriteString("---\n")
rbYaml, err := yaml.Marshal(rb.RoleBinding)
if err != nil {
return fmt.Errorf("error marshaling RoleBinding for %s: %w", d.service.Name, err)
}
buf.Write(rbYaml)
filename := d.service.Name + "/depends-on.rbac.yaml"
chart.Templates[filename] = &ChartTemplate{
Content: buf.Bytes(),
Servicename: d.service.Name,
}
serviceMap[d.service.Name] = true
}
for svcName := range serviceMap {
logger.Log(logger.IconPackage, "Creating RBAC", svcName)
}
return nil
}
func fixContainerNames(project *types.Project) {
// fix container names to be unique
for i, service := range project.Services {

View File

@@ -36,6 +36,7 @@ const (
LabelEnvFrom Label = KatenaryLabelPrefix + "/env-from"
LabelExchangeVolume Label = KatenaryLabelPrefix + "/exchange-volumes"
LabelValuesFrom Label = KatenaryLabelPrefix + "/values-from"
LabelDependsOn Label = KatenaryLabelPrefix + "/depends-on"
)
var (

View File

@@ -355,4 +355,25 @@
DB_USER: database.MARIADB_USER
DB_PASSWORD: database.MARIADB_PASSWORD
"depends-on":
short: "Method to check if a service is ready (for depends_on)."
long: |-
When a service uses `depends_on`, Katenary creates an initContainer to wait
for the dependent service to be ready.
By default, Katenary uses the Kubernetes API to check if the service endpoint
has ready addresses. This method does not require the service to expose a port.
Set this label to `legacy` to use the old netcat method that requires a port
to be defined for the dependent service.
example: |-
web:
image: nginx
depends_on:
- database
labels:
# Use legacy netcat method (requires port)
{{ .KatenaryPrefix }}/depends-on: legacy
type: "string"
# vim: ft=gotmpl.yaml

View File

@@ -32,7 +32,7 @@ func NewRBAC(service types.ServiceConfig, appName string) *RBAC {
APIVersion: "rbac.authorization.k8s.io/v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: utils.TplName(service.Name, appName),
Name: utils.TplName(service.Name, appName, "dependency"),
Labels: GetLabels(service.Name, appName),
Annotations: Annotations,
},
@@ -128,6 +128,79 @@ func (r *Role) Yaml() ([]byte, error) {
}
}
// NewServiceAccount creates a new ServiceAccount from a compose service.
func NewServiceAccount(service types.ServiceConfig, appName string) *ServiceAccount {
return &ServiceAccount{
ServiceAccount: &corev1.ServiceAccount{
TypeMeta: metav1.TypeMeta{
Kind: "ServiceAccount",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: utils.TplName(service.Name, appName),
Labels: GetLabels(service.Name, appName),
Annotations: Annotations,
},
},
service: &service,
}
}
// NewRestrictedRole creates a Role with minimal permissions for init containers.
func NewRestrictedRole(service types.ServiceConfig, appName string) *Role {
return &Role{
Role: &rbacv1.Role{
TypeMeta: metav1.TypeMeta{
Kind: "Role",
APIVersion: "rbac.authorization.k8s.io/v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: utils.TplName(service.Name, appName, "dependency"),
Labels: GetLabels(service.Name, appName),
Annotations: Annotations,
},
Rules: []rbacv1.PolicyRule{
{
APIGroups: []string{""},
Resources: []string{"endpoints"},
Verbs: []string{"get", "list", "watch"},
},
},
},
service: &service,
}
}
// NewRestrictedRoleBinding creates a RoleBinding that binds the restricted role to the ServiceAccount.
func NewRestrictedRoleBinding(service types.ServiceConfig, appName string) *RoleBinding {
return &RoleBinding{
RoleBinding: &rbacv1.RoleBinding{
TypeMeta: metav1.TypeMeta{
Kind: "RoleBinding",
APIVersion: "rbac.authorization.k8s.io/v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: utils.TplName(service.Name, appName, "dependency"),
Labels: GetLabels(service.Name, appName),
Annotations: Annotations,
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
Name: utils.TplName(service.Name, appName, "dependency"),
Namespace: "{{ .Release.Namespace }}",
},
},
RoleRef: rbacv1.RoleRef{
Kind: "Role",
Name: utils.TplName(service.Name, appName, "dependency"),
APIGroup: "rbac.authorization.k8s.io",
},
},
service: &service,
}
}
// ServiceAccount is a kubernetes ServiceAccount.
type ServiceAccount struct {
*corev1.ServiceAccount

View File

@@ -41,16 +41,21 @@ func Parse(profiles []string, envFiles []string, dockerComposeFile ...string) (*
}
}
options, err := cli.NewProjectOptions(dockerComposeFile,
opts := []cli.ProjectOptionsFn{
cli.WithProfiles(profiles),
cli.WithInterpolation(true),
cli.WithDefaultConfigPath,
cli.WithEnvFiles(envFiles...),
cli.WithOsEnv,
cli.WithDotEnv,
cli.WithNormalization(true),
cli.WithResolvedPaths(false),
)
}
if len(dockerComposeFile) == 0 {
opts = append(opts, cli.WithDefaultConfigPath)
}
options, err := cli.NewProjectOptions(dockerComposeFile, opts...)
if err != nil {
return nil, err
}