Go to Katenary V3

This is the next-gen of Katenary
This commit is contained in:
2023-12-06 15:24:02 +01:00
parent c37bde487b
commit 475a025d9e
132 changed files with 6410 additions and 4621 deletions

1
.gitignore vendored
View File

@@ -1,3 +1,4 @@
.venv
dist/*
.cache/*
chart/*

9
.gitmodules vendored Normal file
View File

@@ -0,0 +1,9 @@
[submodule "test/bats"]
path = test/bats
url = https://github.com/bats-core/bats-core.git
[submodule "test/test_helper/bats-support"]
path = test/test_helper/bats-support
url = https://github.com/bats-core/bats-support.git
[submodule "test/test_helper/bats-assert"]
path = test/test_helper/bats-assert
url = https://github.com/bats-core/bats-assert.git

132
Makefile
View File

@@ -4,27 +4,51 @@ VERSION=$(shell git describe --exact-match --tags $(CUR_SHA) 2>/dev/null || echo
CTN:=$(shell which podman 2>&1 1>/dev/null && echo "podman" || echo "docker")
PREFIX=~/.local
GOVERSION=1.21
GO=container
OUT=katenary
BLD_CMD=go build -ldflags="-X 'main.Version=$(VERSION)'" -o $(OUT) ./cmd/katenary/*.go
BLD_CMD=go build -ldflags="-X 'katenary/generator.Version=$(VERSION)'" -o $(OUT) ./cmd/katenary
GOOS=linux
GOARCH=amd64
SIGNER=metal3d@gmail.com
BUILD_IMAGE=docker.io/golang:1.18-alpine
BUILD_IMAGE=docker.io/golang:$(GOVERSION)-alpine
# SHELL=/bin/bash
.PHONY: help clean build
# List of source files
SOURCES=$(wildcard ./*.go ./*/*.go ./*/*/*.go)
# List of binaries to build and sign
BINARIES=dist/katenary-linux-amd64 dist/katenary-linux-arm64 dist/katenary.exe dist/katenary-darwin-amd64 dist/katenary-freebsd-amd64 dist/katenary-freebsd-arm64
# List of signatures to build
ASC_BINARIES=$(patsubst %,%.asc,$(BINARIES))
# defaults
SHELL := bash
# strict mode
.SHELLFLAGS := -eu -o pipefail -c
# One session per target
.ONESHELL:
.DELETE_ON_ERROR:
MAKEFLAGS += --warn-undefined-variables
MAKEFLAGS += --no-builtin-rules
.PHONY: help clean build install
all: build
help:
@cat <<EOF
@cat <<EOF | fold -s -w 80
=== HELP ===
To avoid you to install Go, the build is made by podman or docker.
You can use:
Installinf (you can use local Go by setting GO=local)):
# use podman or docker to build
$$ make install
# or use local Go
$$ make install GO=local
This will build and install katenary inside the PREFIX(/bin) value (default is $(PREFIX))
To change the PREFIX to somewhere where only root or sudo users can save the binary, it is recommended to build before install:
To change the PREFIX to somewhere where only root or sudo users can save the binary, it is recommended to build before install, one more time you can use local Go by setting GO=local:
$$ make build
$$ sudo make install PREFIX=/usr/local
@@ -47,21 +71,40 @@ help:
$$ make build-all
EOF
## Standard build
build: pull katenary
build-all:
rm -f dist/*
$(MAKE) _build-all
_build-all: pull dist dist/katenary-linux-amd64 dist/katenary-linux-arm64 dist/katenary.exe dist/katenary-darwin-amd64 dist/katenary-freebsd-amd64 dist/katenary-freebsd-arm64
pull:
ifneq ($(GO),local)
@echo -e "\033[1;32mPulling $(BUILD_IMAGE) docker image\033[0m"
@$(CTN) pull $(BUILD_IMAGE)
endif
dist:
katenary: $(SOURCES) Makefile go.mod go.sum
ifeq ($(GO),local)
@echo "=> Build on host using go"
else
@echo "=> Build in container using" $(CTN)
endif
echo $(BLD_CMD)
ifeq ($(GO),local)
$(BLD_CMD)
else ifeq ($(CTN),podman)
@podman run -e CGO_ENABLED=0 -e GOOS=$(GOOS) -e GOARCH=$(GOARCH) \
--rm -v $(PWD):/go/src/katenary:z -w /go/src/katenary --userns keep-id -it $(BUILD_IMAGE) $(BLD_CMD)
else
@docker run -e CGO_ENABLED=0 -e GOOS=$(GOOS) -e GOARCH=$(GOARCH) \
--rm -v $(PWD):/go/src/katenary:z -w /go/src/katenary --user $(shell id -u):$(shell id -g) -e HOME=/tmp -it $(BUILD_IMAGE) $(BLD_CMD)
endif
echo "=> Stripping if possible"
strip $(OUT) 2>/dev/null || echo "=> No strip available"
## Release build
dist: prepare $(BINARIES) $(ASC_BINARIES)
prepare: pull
mkdir -p dist
dist/katenary-linux-amd64:
@@ -69,7 +112,6 @@ dist/katenary-linux-amd64:
@echo -e "\033[1;32mBuilding katenary $(VERSION) for linux-amd64...\033[0m"
$(MAKE) katenary GOOS=linux GOARCH=amd64 OUT=$@
dist/katenary-linux-arm64:
@echo
@echo -e "\033[1;32mBuilding katenary $(VERSION) for linux-arm...\033[0m"
@@ -95,29 +137,15 @@ dist/katenary-freebsd-arm64:
@echo -e "\033[1;32mBuilding katenary $(VERSION) for freebsd-arm64...\033[0m"
$(MAKE) katenary GOOS=freebsd GOARCH=arm64 OUT=$@
katenary: $(wildcard */*.go Makefile go.mod go.sum)
ifeq ($(GO),local)
@echo "=> Build in host using go"
else
@echo "=> Build in container using" $(CTN)
endif
echo $(BLD_CMD)
ifeq ($(GO),local)
$(BLD_CMD)
else ifeq ($(CTN),podman)
@podman run -e CGO_ENABLED=0 -e GOOS=$(GOOS) -e GOARCH=$(GOARCH) \
--rm -v $(PWD):/go/src/katenary:z -w /go/src/katenary --userns keep-id -it $(BUILD_IMAGE) $(BLD_CMD)
else
@docker run -e CGO_ENABLED=0 -e GOOS=$(GOOS) -e GOARCH=$(GOARCH) \
--rm -v $(PWD):/go/src/katenary:z -w /go/src/katenary --user $(shell id -u):$(shell id -g) -e HOME=/tmp -it $(BUILD_IMAGE) $(BLD_CMD)
endif
echo "=> Stripping if possible"
strip $(OUT) 2>/dev/null || echo "=> No strip available"
gpg-sign:
rm -f dist/*.asc
$(MAKE) $(ASC_BINARIES)
dist/%.asc: dist/%
gpg --armor --detach-sign --default-key $(SIGNER) $< &>/dev/null || exit 1
install: build
cp katenary $(PREFIX)/bin/katenary
install -Dm755 katenary $(PREFIX)/bin/katenary
uninstall:
rm -f $(PREFIX)/bin/katenary
@@ -131,8 +159,6 @@ test:
@echo -e "\033[1;33mTesting katenary $(VERSION)...\033[0m"
go test -v ./...
.ONESHELL:
push-release: build-all
@rm -f release.id
# read personal access token from .git-credentials
@@ -154,3 +180,37 @@ push-release: build-all
https://uploads.github.com/repos/metal3d/katenary/releases/$$(cat release.id)/assets?name=$$(basename $$i)
done
@rm -f release.id
__label_doc:
@echo "=> Generating labels doc..."
# short label doc
go run ./cmd/katenary help-labels -m | \
sed -i '
/START_LABEL_DOC/,/STOP_LABEL_DOC/{/<!--/!d};
/START_LABEL_DOC/,/STOP_LABEL_DOC/r/dev/stdin
' doc/docs/labels.md
# detailed label doc
go run ./cmd/katenary help-labels -am | sed 's/^##/###/' | \
sed -i '
/START_DETAILED_DOC/,/STOP_DETAILED_DOC/{/<!--/!d};
/START_DETAILED_DOC/,/STOP_DETAILED_DOC/r/dev/stdin
' doc/docs/labels.md
echo "=> Generating Code documentation..."
PACKAGES=$$(for f in $$(find . -name "*.go" -type f); do dirname $$f; done | sort -u)
for pack in $$PACKAGES; do
echo "-> Generating doc for $$pack"
#gomarkdoc -o doc/docs/packages/$$pack.md $$pack
gomarkdoc -f azure-devops $$pack | pandoc -t gfm -o doc/docs/packages/$$pack.md
# drop the Index section without removing the title
# - remove the Index section, but keep the following heading
sed -i '/^## Index/,/^##/ { /## Index/d; /^##/! d }' doc/docs/packages/$$pack.md
# fixes for markdown problem
# - there are \* on heading, replace to *
sed -i 's/\\\*/\*/g' doc/docs/packages/$$pack.md
## parenthis in heading are escaped, replace to unescaped
sed -i 's/\\(/\(/g' doc/docs/packages/$$pack.md
sed -i 's/\\)/\)/g' doc/docs/packages/$$pack.md
## list are badly formatted with 2 spaces, replace to 4
done

View File

@@ -2,16 +2,6 @@
<img src="./misc/logo.png" alt="Katenary Logo" style="max-width: 90%" align="center"/>
</div>
> Warning!
> Katenary will be soon go to release v3. This is a full rewrite of the tool, using official go-compose and kubernetes libraries to generate object.
> The current state of the source code started to be unmaintainable and too complex to fix. I decided to revise and recreate the tool. This will change
> some commands and the labels to use.
>
> The current v2 version will be frozen to the current state.
>
> No panic, the v3 detects the v2 syntax and will not break your helm chart.
Katenary is a tool to help to transform `docker-compose` files to a working Helm Chart for Kubernetes.
> **Important Note:** Katenary is a tool to help to build Helm Chart from a docker-compose file, but docker-compose doesn't propose as many features as what can do Kubernetes. So, we strongly recommend to use Katenary as a "bootstrap" tool and then to manually enhance the generated helm chart.

View File

@@ -2,152 +2,229 @@ package main
import (
"fmt"
"katenary/generator/writers"
"katenary/helm"
"katenary/update"
"strconv"
"katenary/generator"
"katenary/utils"
"os"
"strings"
"github.com/compose-spec/compose-go/cli"
"github.com/spf13/cobra"
)
var Version = "master" // changed at compile time
const longHelp = `Katenary is a tool to convert compose files to Helm Charts.
var longHelp = `Katenary aims to be a tool to convert docker-compose files to Helm Charts.
It will create deployments, services, volumes, secrets, and ingress resources.
But it will also create initContainers based on depend_on, healthcheck, and other features.
It's not magical, sometimes you'll need to fix the generated charts.
The general way to use it is to call one of these commands:
katenary convert
katenary convert -c docker-compose.yml
katenary convert -c docker-compose.yml -o ./charts
In case of, check the help of each command using:
katenary <command> --help
or
"katenary help <command>"
Each [command] and subcommand has got an "help" and "--help" flag to show more information.
`
func init() {
// apply the version to the "update" package
update.Version = Version
}
func main() {
// The base command
rootCmd := &cobra.Command{
Use: "katenary",
Long: longHelp,
Short: "Katenary is a tool to convert docker-compose files to Helm Charts",
}
rootCmd.Example = ` katenary convert -c docker-compose.yml -o ./charts`
// to display the version
versionCmd := &cobra.Command{
Use: "version",
Short: "Display version",
Run: func(c *cobra.Command, args []string) { c.Println(Version) },
}
// convert command, need some flags
var composeFiles *[]string
convertCmd := &cobra.Command{
Use: "convert",
Short: "Convert docker-compose to helm chart",
Long: "Convert docker-compose to helm chart. The resulting helm chart will be in the current directory/" +
ChartsDir + "/" + AppName +
".\nThe appversion will be generated that way:\n" +
"- if it's in a git project, it takes git version or tag\n" +
"- if it's not defined, so the version will be get from the --app-version flag \n" +
"- if it's not defined, so the 0.0.1 version is used",
Run: func(c *cobra.Command, args []string) {
force := c.Flag("force").Changed
appversion := c.Flag("app-version").Value.String()
appName := c.Flag("app-name").Value.String()
chartVersion := c.Flag("chart-version").Value.String()
chartDir := c.Flag("output-dir").Value.String()
indentation, err := strconv.Atoi(c.Flag("indent-size").Value.String())
if err != nil {
writers.IndentSize = indentation
}
Convert(*composeFiles, appversion, appName, chartDir, chartVersion, force)
},
}
composeFiles = convertCmd.Flags().StringArrayP(
"compose-file", "c", []string{ComposeFile}, "compose file to convert, can be use several times to override previous file. Order is important!")
convertCmd.Flags().BoolP(
"force", "f", false, "force overwrite of existing output files")
convertCmd.Flags().StringP(
"app-version", "a", AppVersion, "app version")
convertCmd.Flags().StringP(
"chart-version", "v", ChartVersion, "chart version")
convertCmd.Flags().StringP(
"app-name", "n", AppName, "application name")
convertCmd.Flags().StringP(
"output-dir", "o", ChartsDir, "chart directory")
convertCmd.Flags().IntP(
"indent-size", "i", 2, "set the indent size of the YAML files")
// show possible labels to set in docker-compose file
showLabelsCmd := &cobra.Command{
Use: "show-labels",
Short: "Show labels of a resource",
Run: func(c *cobra.Command, args []string) {
c.Println(helm.GetLabelsDocumentation())
},
}
// Update the binary to the latest version
updateCmd := &cobra.Command{
Use: "upgrade",
Short: "Upgrade katenary to the latest version if available",
Run: func(c *cobra.Command, args []string) {
version, assets, err := update.CheckLatestVersion()
if err != nil {
c.Println(err)
return
}
c.Println("Updating to version: " + version)
err = update.DownloadLatestVersion(assets)
if err != nil {
c.Println(err)
return
}
c.Println("Update completed")
},
}
rootCmd.Version = generator.Version
rootCmd.CompletionOptions.DisableDescriptions = false
rootCmd.CompletionOptions.DisableNoDescFlag = false
rootCmd.AddCommand(
versionCmd,
convertCmd,
showLabelsCmd,
updateCmd,
generateCompletionCommand(rootCmd.Name()),
generateVersionCommand(),
generateConvertCommand(),
generateHashComposefilesCommand(),
generateLabelHelpCommand(),
)
// in parallel, check if the current katenary version is the latest
ch := make(chan string)
go func() {
version, _, err := update.CheckLatestVersion()
if err != nil {
ch <- ""
rootCmd.Execute()
}
const completionHelp = `To load completions:
Bash:
# Add this line in your ~/.bashrc or ~/.bash_profile file
$ source <(%[1]s completion bash)
# Or, you can load completions for each users session. Execute once:
# Linux:
$ %[1]s completion bash > /etc/bash_completion.d/%[1]s
# macOS:
$ %[1]s completion bash > $(brew --prefix)/etc/bash_completion.d/%[1]s
Zsh:
# If shell completion is not already enabled in your environment,
# you will need to enable it. You can execute the following once:
$ echo "autoload -U compinit; compinit" >> ~/.zshrc
# To load completions for each session, execute once:
$ %[1]s completion zsh > "${fpath[1]}/_%[1]s"
# You will need to start a new shell for this setup to take effect.
fish:
$ %[1]s completion fish | source
# To load completions for each session, execute once:
$ %[1]s completion fish > ~/.config/fish/completions/%[1]s.fish
PowerShell:
PS> %[1]s completion powershell | Out-String | Invoke-Expression
# To load completions for every new session, run:
PS> %[1]s completion powershell > %[1]s.ps1
# and source this file from your PowerShell profile.
`
func generateCompletionCommand(name string) *cobra.Command {
bashV1 := false
cmd := &cobra.Command{
Use: "completion",
DisableFlagsInUseLine: true,
ValidArgs: []string{"bash", "zsh", "fish", "powershell"},
Args: cobra.MatchAll(cobra.ExactArgs(1), cobra.OnlyValidArgs),
Short: "Generates completion scripts",
Long: fmt.Sprintf(completionHelp, name),
Run: func(cmd *cobra.Command, args []string) {
if len(args) == 0 {
cmd.Help()
return
}
if Version != version {
ch <- fmt.Sprintf("\x1b[33mNew version available: " +
version +
" - to auto upgrade katenary, you can execute: katenary upgrade\x1b[0m\n")
switch args[0] {
case "bash":
// get the bash version
if cmd.Flags().Changed("bash-v1") {
cmd.Root().GenBashCompletion(os.Stdout)
return
}
cmd.Root().GenBashCompletionV2(os.Stdout, true)
case "zsh":
cmd.Root().GenZshCompletion(os.Stdout)
case "fish":
cmd.Root().GenFishCompletion(os.Stdout, true)
case "powershell":
cmd.Root().GenPowerShellCompletion(os.Stdout)
}
},
}
}()
// Execute the command
finalize := make(chan error)
go func() {
finalize <- rootCmd.Execute()
}()
// add a flag to force bash completion v1
cmd.Flags().Bool("bash-v1", bashV1, "Force bash completion v1")
// Wait for both goroutines to finish
if err := <-finalize; err != nil {
return cmd
}
func generateConvertCommand() *cobra.Command {
force := false
outputDir := "./chart"
dockerComposeFile := make([]string, 0)
profiles := make([]string, 0)
helmdepUpdate := false
var appVersion *string
givenAppVersion := ""
chartVersion := "0.1.0"
convertCmd := &cobra.Command{
Use: "convert",
Short: "Converts a docker-compose file to a Helm Chart",
Run: func(cmd *cobra.Command, args []string) {
if givenAppVersion != "" {
appVersion = &givenAppVersion
}
generator.Convert(generator.ConvertOptions{
Force: force,
OutputDir: outputDir,
Profiles: profiles,
HelmUpdate: helmdepUpdate,
AppVersion: appVersion,
ChartVersion: chartVersion,
}, dockerComposeFile...)
},
}
convertCmd.Flags().BoolVarP(&force, "force", "f", force, "Force the overwrite of the chart directory")
convertCmd.Flags().BoolVarP(&helmdepUpdate, "helm-update", "u", helmdepUpdate, "Update helm dependencies if helm is installed")
convertCmd.Flags().StringSliceVarP(&profiles, "profile", "p", profiles, "Specify the profiles to use")
convertCmd.Flags().StringVarP(&outputDir, "output-dir", "o", outputDir, "Specify the output directory")
convertCmd.Flags().StringSliceVarP(&dockerComposeFile, "compose-file", "c", cli.DefaultFileNames, "Specify an alternate compose files - can be specified multiple times or use coma to separate them")
convertCmd.Flags().StringVarP(&givenAppVersion, "app-version", "a", "", "Specify the app version (in Chart.yaml)")
convertCmd.Flags().StringVarP(&chartVersion, "chart-version", "v", chartVersion, "Specify the chart version (in Chart.yaml)")
return convertCmd
}
func generateVersionCommand() *cobra.Command {
return &cobra.Command{
Use: "version",
Short: "Print the version number of Katenary",
Run: func(cmd *cobra.Command, args []string) {
println(generator.Version)
},
}
}
func generateLabelHelpCommand() *cobra.Command {
markdown := false
all := false
cmd := &cobra.Command{
Use: "help-labels [label]",
Short: "Print the labels help for all or a specific label",
Long: `Print the labels help for all or a specific label
If no label is specified, the help for all labels is printed.
If a label is specified, the help for this label is printed.
The name of the label must be specified without the prefix ` + generator.KATENARY_PREFIX + `.
e.g.
kanetary help-labels
katenary help-labels ingress
katenary help-labels map-env
`,
ValidArgs: generator.GetLabelNames(),
Run: func(cmd *cobra.Command, args []string) {
if len(args) > 0 {
fmt.Println(generator.GetLabelHelpFor(args[0], markdown))
return
}
if all {
// show the help for all labels
l := len(generator.GetLabelNames())
for i, label := range generator.GetLabelNames() {
fmt.Println(generator.GetLabelHelpFor(label, markdown))
if !markdown && i < l-1 {
fmt.Println(strings.Repeat("-", 80))
}
}
return
}
fmt.Println(generator.GetLabelHelp(markdown))
},
}
cmd.Flags().BoolVarP(&markdown, "markdown", "m", markdown, "Use the markdown format")
cmd.Flags().BoolVarP(&all, "all", "a", all, "Print the full help for all labels")
return cmd
}
func generateHashComposefilesCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "hash-composefiles [composefile]",
Short: "Print the hash of the composefiles",
Long: `Print the hash of the composefiles
If no composefile is specified, the hash of all composefiles is printed.`,
Run: func(cmd *cobra.Command, args []string) {
if len(args) > 0 {
if hash, err := utils.HashComposefiles(args); err != nil {
fmt.Println(err)
} else {
fmt.Println(hash)
}
fmt.Print(<-ch)
return
}
},
}
return cmd
}

View File

@@ -1,149 +0,0 @@
package main
import (
"errors"
"fmt"
"katenary/compose"
"katenary/generator"
"os"
"os/exec"
"path/filepath"
"strings"
)
var (
composeFiles = []string{"compose.yml", "compose.yaml", "docker-compose.yaml", "docker-compose.yml"}
ComposeFile = ""
AppName = "MyApp"
ChartsDir = "chart"
AppVersion = "0.0.1"
ChartVersion = "0.1.0"
)
func init() {
FindComposeFile()
SetAppName()
SetAppVersion()
}
func FindComposeFile() bool {
for _, file := range composeFiles {
if _, err := os.Stat(file); err == nil {
ComposeFile = file
return true
}
}
return false
}
// SetAppName sets the application name from the current directory name.
func SetAppName() {
wd, err := os.Getwd()
if err != nil {
return
}
AppName = filepath.Base(wd)
if AppName == "" {
AppName = "MyApp"
}
}
// SetAppVersion set the AppVersion variable to the git version/tag
func SetAppVersion() {
AppVersion, _ = detectGitVersion()
}
// Try to detect the git version/tag.
func detectGitVersion() (string, error) {
defaulVersion := "0.0.1"
// Check if .git directory exists
if s, err := os.Stat(".git"); err != nil {
// .git should be a directory
return defaulVersion, errors.New("no git repository found")
} else if !s.IsDir() {
// .git should be a directory
return defaulVersion, errors.New(".git is not a directory")
}
// check if "git" executable is callable
if _, err := exec.LookPath("git"); err != nil {
return defaulVersion, errors.New("git executable not found")
}
// get the latest commit hash
if out, err := exec.Command("git", "log", "-n1", "--pretty=format:%h").Output(); err == nil {
latestCommit := strings.TrimSpace(string(out))
// then get the current branch/tag
out, err := exec.Command("git", "branch", "--show-current").Output()
if err != nil {
return defaulVersion, errors.New("git branch --show-current failed")
} else {
currentBranch := strings.TrimSpace(string(out))
// finally, check if the current tag (if exists) correspond to the current commit
// git describe --exact-match --tags <latestCommit>
out, err := exec.Command("git", "describe", "--exact-match", "--tags", latestCommit).Output()
if err == nil {
return strings.TrimSpace(string(out)), nil
} else {
return currentBranch + "-" + latestCommit, nil
}
}
}
return defaulVersion, errors.New("git log failed")
}
func Convert(composeFile []string, appVersion, appName, chartDir, chartVersion string, force bool) {
if len(composeFile) == 0 {
fmt.Println("No compose file given")
return
}
composeFiles := composeFile
ComposeFile = composeFiles[0]
for _, cf := range composeFiles {
if _, err := os.Stat(cf); err != nil {
fmt.Printf("Compose file %s not found\n", cf)
return
}
}
// Parse the compose file now
p := compose.NewParser(composeFiles)
p.Parse(appName)
dirname := filepath.Join(chartDir, appName)
if _, err := os.Stat(dirname); err == nil && !force {
response := ""
for response != "y" && response != "n" {
response = "n"
fmt.Printf(""+
"The %s directory already exists, it will be \x1b[31;1mremoved\x1b[0m!\n"+
"Do you really want to continue? [y/N]: ", dirname)
fmt.Scanf("%s", &response)
response = strings.ToLower(response)
}
if response == "n" {
fmt.Println("Cancelled")
os.Exit(0)
}
}
// cleanup and create the chart directory (until "templates")
if err := os.RemoveAll(dirname); err != nil {
fmt.Printf("Error removing %s: %s\n", dirname, err)
os.Exit(1)
}
// create the templates directory
templatesDir := filepath.Join(dirname, "templates")
if err := os.MkdirAll(templatesDir, 0755); err != nil {
fmt.Printf("Error creating %s: %s\n", templatesDir, err)
os.Exit(1)
}
// start generator
generator.Generate(p, Version, appName, appVersion, chartVersion, ComposeFile, dirname)
}

View File

@@ -1,96 +0,0 @@
package compose
import (
"io/ioutil"
"log"
"os"
"path/filepath"
"github.com/compose-spec/compose-go/cli"
"github.com/compose-spec/compose-go/types"
)
const (
ICON_EXCLAMATION = "❕"
)
// Parser is a docker-compose parser.
type Parser struct {
Data *types.Project
temporary *string
}
var (
Appname = ""
CURRENT_DIR, _ = os.Getwd()
)
// NewParser create a Parser and parse the file given in filename. If filename is empty, we try to parse the content[0] argument that should be a valid YAML content.
func NewParser(filename []string, content ...string) *Parser {
p := &Parser{}
if len(content) > 0 { // mainly for the tests...
dir := filepath.Dir(filename[0])
err := os.MkdirAll(dir, 0755)
if err != nil {
log.Fatal(err)
}
p.temporary = &dir
ioutil.WriteFile(filename[0], []byte(content[0]), 0644)
cli.DefaultFileNames = filename
}
// if filename is not in cli Default files, add it
if len(filename) > 0 {
found := false
for _, defaultFileName := range cli.DefaultFileNames {
for _, givenFileName := range filename {
if defaultFileName == givenFileName {
found = true
break
}
}
}
// add the file at first position
if !found {
cli.DefaultFileNames = append([]string{filename[0]}, cli.DefaultFileNames...)
}
if len(filename) > 1 {
cli.DefaultOverrideFileNames = append(filename[1:], cli.DefaultOverrideFileNames...)
}
}
return p
}
// Parse using compose-go parser, adapt a bit the Project and set Appname.
func (p *Parser) Parse(appname string) {
// Reminder:
// - set Appname
// - loas services
options, err := cli.NewProjectOptions(nil,
cli.WithDefaultConfigPath,
cli.WithNormalization(true),
cli.WithInterpolation(true),
cli.WithResolvedPaths(true),
)
if err != nil {
log.Fatal(err)
}
proj, err := cli.ProjectFromOptions(options)
if err != nil {
log.Fatal("Failed to create project", err)
}
Appname = proj.Name
p.Data = proj
CURRENT_DIR = p.Data.WorkingDir
}
func GetCurrentDir() string {
return CURRENT_DIR
}

72
doc/docs/coding.md Normal file
View File

@@ -0,0 +1,72 @@
# How Katenary works behind the scene
This section is for developers who want to take part in Katenary. Here we describe how it works and the expected principles.
## A few important points
Katenary is developed in Go. The version currently supported is 1.20. For reasons of readability, the `any` type is preferred to `interface{}`.
Since version v3, Katenary uses, in addition to `go-compose`, the `k8s` library to generate objects that are guaranteed to work before transformation. Katenary adds Helm syntax entries to add loops, transformations and conditions.
We really try to follow best practices and code principles. But, Katenary needs a lot of workarounds and string manipulation during the process. There are, also, some drawbacks using standard k8s packages that makes a lot of type checks when generating the objects. We need to finalize the values after object generation.
**This makes the coding a bit harder than simply converting from YAML to YAML.**
> If Katenary only generated YAML objects, the algorithms would be much simpler and would require less generation work.
## General principle
During conversion, the `generator` package is primarily responsible for creating "objects". The principle is to generate one `Deployment` per `compose` service. If the container coming from "compose" exposes ports (explicitly), then a service is created.
If the declaration of a container is to be integrated into another pod (via the `same-pod` label), this `Deployment` and its associated service are still created. They are deleted last, once the merge has been completed.
## Conversion in "`generator`" package
The `generator` package is where object struct are defined, and where the `Generate()` function is written.
The generation is made by using a `HelmChart` object:
```golang
chart := NewChart(appName string)
```
Then, some processes are made to detect the "main app verion" (tag for the main service image), bootstrapping declared ports in labels, managing links to bind containers in one pods...
Then, a loop basically makes this:
```golang
for _, service := range project.Services {
dep := NewDeployment(service)
y, _ := dep.Yaml()
chart.Templates[dep.Filename()] = &ChartTemplate{
Content: y,
Servicename: service.Name,
}
}
```
**A lot** of string manipulations are made by each `Yaml()` methods. This is where you find the complex and impacting operations. The `Yaml` methods **don't return a valid YAML content**. This is a Helm Chart Yaml content with template conditions, vamues and calls to helper templates.
> The `Yaml()` methods, in each object, need contribution, help, fixes, enhancements...
> They work, but there is a lot of complexity. Please, create issues, pull-requests and conversation in the GitHub repository.
The final step, before sending all templates to chart, is to bind the containers inside the same pod where it's specified.
For each source container linked to the destination:
- we get the deployment of the source
- we copy the container to the destination deployment
- we get the associated service (if any)
- we then copy the service port to the destination service
- we finally remove the source service and deployment
> The configmap, secrets, variables... are kept.
It finaly computes the `helper` file.
## Convertion command
The `generator` works the same as described above. But the "convert" command makes some final steps:
- generate `values.yaml` and `Chart.yaml` files from the `HelmChart` object
- add comments to the `values.yaml` files
- add comments to the `Chart.yaml` files

15
doc/docs/dependencies.md Normal file
View File

@@ -0,0 +1,15 @@
# Why those dependencies?
Katenary uses `compose-go` and several kubernetes official packages.
- `github.com/compose-spec/compose-go`: to parse compose files. It ensures that:
- the project respects the "compose" specification
- katenary uses the "compose" struct exactly the same way that podman-compose or docker does
- `github.com/spf13/cobra`: to parse command line arguments, subcommands and flags. It also generates completion for bash, zsh, fish and powershell.
- `github.com/thediveo/netdb`: to get the standard names of a service from it's port number
- `gopkg.in/yaml.v3`:
- to generate `Chart.yaml` and `values.yaml` files (only)
- to parse Katenary labels in the compose file
- `k8s.io/api` and `k8s.io/apimachinery` to create Kubernetes objects
- `sigs.k8s.io/yaml`: to generate Katenary yaml files

View File

@@ -1,358 +1,384 @@
# Using labels
# Labels documentation
Katenary proposes labels to specify adaptation to provide to the Helm Chart. All labels are declared in the help message using:
Katenary proposes labels to set in `compose.yaml` files (or override files) to configure the Helm Chart generation. Because it is sometimes needed to have structured values, it is necessary to use the Yaml syntax. While compose labels are string, we can use `|` to use Yaml multilines as value.
```text
$ katenary show-labels
Katenary will try to Unmarshal these labels.
# Labels
katenary.io/ignore : ignore the container, it will not yied any object in the helm chart (bool)
katenary.io/secret-vars : secret variables to push on a secret file (coma separated)
katenary.io/secret-envfiles : set the given file names as a secret instead of configmap (coma separated)
katenary.io/mapenv : map environment variable to a template string (yaml style, object)
katenary.io/ports : set the ports to assign on the container in pod + expose as a service (coma separated)
katenary.io/container-ports : set the ports to assign on the contaienr in pod but avoid service (coma separated)
katenary.io/ingress : set the port to expose in an ingress (coma separated)
katenary.io/configmap-volumes : specifies that the volumes points on a configmap (coma separated)
katenary.io/same-pod : specifies that the pod should be deployed in the same pod than the
given service name (string)
katenary.io/volume-from : specifies that the volumes to be mounted from the given service (yaml style)
katenary.io/empty-dirs : specifies that the given volume names should be "emptyDir" instead of
persistentVolumeClaim (coma separated)
katenary.io/crontabs : specifies a cronjobs to create (yaml style, array) - this will create a
cronjob, a service account, a role and a rolebinding to start the command with "kubectl"
The form is the following:
- command: the command to run
schedule: the schedule to run the command (e.g. "@daily" or "*/1 * * * *")
image: the image to use for the command (default to "bitnami/kubectl")
allPods: true if you want to run the command on all pods (default to false)
katenary.io/healthcheck : specifies that the container should be monitored by a healthcheck,
**it overrides the docker-compose healthcheck**.
You can use these form of label values:
-> http://[ignored][:port][/path] to specify an http healthcheck
-> tcp://[ignored]:port to specify a tcp healthcheck
-> other string is condidered as a "command" healthcheck
```
## Label list and types
## healthcheck
<!-- START_LABEL_DOC : do not remove this tag !-->
| Label name | Description | Type |
| ----------------------------- | ------------------------------------------------------ | --------------------- |
| `katenary.v3/configmap-files` | Add files to the configmap. | list of strings |
| `katenary.v3/cronjob` | Create a cronjob from the service. | object |
| `katenary.v3/dependencies` | Add Helm dependencies to the service. | list of objects |
| `katenary.v3/description` | Description of the service | string |
| `katenary.v3/env-from` | Add environment variables from antoher service. | list of strings |
| `katenary.v3/health-check` | Health check to be added to the deployment. | object |
| `katenary.v3/ignore` | Ignore the service | bool |
| `katenary.v3/ingress` | Ingress rules to be added to the service. | object |
| `katenary.v3/main-app` | Mark the service as the main app. | bool |
| `katenary.v3/map-env` | Map env vars from the service to the deployment. | object |
| `katenary.v3/ports` | Ports to be added to the service. | list of uint32 |
| `katenary.v3/same-pod` | Move the same-pod deployment to the target deployment. | string |
| `katenary.v3/secrets` | Env vars to be set as secrets. | list of string |
| `katenary.v3/values` | Environment variables to be added to the values.yaml | list of string or map |
HealthCheck label defines how to make LivenessProbe on Kubernetes.
<!-- STOP_LABEL_DOC : do not remove this tag !-->
## Detailed description
<!-- START_DETAILED_DOC : do not remove this tag !-->
### katenary.v3/configmap-files
Add files to the configmap.
**Type**: `list of strings`
It makes a file or directory to be converted to one or more ConfigMaps
and mounted in the pod. The file or directory is relative to the
service directory.
If it is a directory, all files inside it are added to the ConfigMap.
If the directory as subdirectories, so one configmap per subpath are created.
!!! Warning
This overrides the compose file healthcheck
It is not intended to be used to store an entire project in configmaps.
It is intended to be used to store configuration files that are not managed
by the application, like nginx configuration files. Keep in mind that your
project sources should be stored in an application image or in a storage.
**Example:**
```yaml
volumes
- ./conf.d:/etc/nginx/conf.d
labels:
katenary.v3/configmap-files: |-
- ./conf.d
```
### katenary.v3/cronjob
Create a cronjob from the service.
**Type**: `object`
This adds a cronjob to the chart.
The label value is a YAML object with the following attributes:
- command: the command to be executed
- schedule: the cron schedule (cron format or @every where "every" is a
duration like 1h30m, daily, hourly...)
- rbac: false (optionnal), if true, it will create a role, a rolebinding and
a serviceaccount to make your cronjob able to connect the Kubernetes API
**Example:**
```yaml
labels:
katenary.v3/cronjob: |-
command: echo "hello world"
schedule: "* */1 * * *" # or @hourly for example
```
### katenary.v3/dependencies
Add Helm dependencies to the service.
**Type**: `list of objects`
Set the service to be, actually, a Helm dependency. This means that the
service will not be exported as template. The dependencies are added to
the Chart.yaml file and the values are added to the values.yaml file.
It's a list of objects with the following attributes:
- name: the name of the dependency
- repository: the repository of the dependency
- alias: the name of the dependency in values.yaml (optional)
- values: the values to be set in values.yaml (optional)
!!! Info
The hostname is set to "localhost" by convention, but Katenary will ignore the hostname in tcp and http tests because it will create a LivenessProbe.
Katenary doesn't update the helm depenedencies by default.
Some example of usage:
Use `--helm-update` (or `-u`) flag to update the dependencies.
example: <code>katenary convert -u</code>
By setting an alias, it is possible to change the name of the dependency
in values.yaml.
**Example:**
```yaml
services:
mariadb:
image: mariadb
labels:
katenary.io/healthcheck: tcp://localhost:3306
katenary.v3/dependencies: |-
- name: mariadb
repository: oci://registry-1.docker.io/bitnamicharts
webapp:
image: nginx
labels:
katenary.io/healthcheck: http://localhost:80
## optional, it changes the name of the section in values.yaml
# alias: mydatabase
example:
image: yourimage
labels:
katenary.io/healthcheck: "test -f /opt/installed"
## optional, it adds the values to values.yaml
values:
auth:
database: mydatabasename
username: myuser
password: the secret password
```
## crontabs
### katenary.v3/description
Crontabs label proposes to create a complete CronTab object with needed RBAC to make it possible to run command inside the pod(s) with `kubectl`. Katenary will make the job for you. You only need to provide the command(s) to call.
Description of the service
It's a YAML array in multiline label.
**Type**: `string`
This replaces the default comment in values.yaml file to the given description.
It is useful to document the service and configuration.
The value can be set with a documentation in multiline format.
**Example:**
```yaml
services:
mariadb:
image: mariadb
labels:
katenary.io/crontabs: |
- command: mysqldump -B myapp -uroot -p$${MYSQL_ROOT_PASSWORD} > dump.sql
schedule: "@every 1h"
```
The object is:
```
command: Command to run
schedule: the cron form schedule string
allPods: boolean (default false) to activate the cront on each pod
image: image name to use (default is bitnami/kubectl)
with corresponding tag to your kubernetes version
katenary.v3/description: |-
This is a description of the service.
It can be multiline.
```
## empty-dirs
### katenary.v3/env-from
You sometime don't need to create a PersistentVolumeClaim. For example when a volume in your compose file is actually made to share the data between 2 or more containers.
Add environment variables from antoher service.
In this case, an "emptyDir" volume is appreciated.
**Type**: `list of strings`
It adds environment variables from another service to the current service.
**Example:**
```yaml
services:
webapp:
image: nginx
volumes:
- websource:/var/www/html
labels:
# sources is actually an empty directory on the node
katenary.io/empty-dirs: websource
service1:
image: nginx:1.19
environment:
FOO: bar
php:
image: php:7-fpm
volumes:
- sources:/var/www/html
service2:
image: php:7.4-fpm
labels:
# in the same pod than webapp
katenary.io/same-pod: webapp
# see the corresponding section, get the volume
# fro webapp
katenary.io/volume-from: |
sources:
webapp: websource
# get the congigMap from service1 where FOO is
# defined inside this service too
katenary.v3/env-from: |-
- myservice1
```
## volume-from
### katenary.v3/health-check
We see this in the [empty-dir](#empty-dir) section, this label defines that the corresponding volume should be shared in this pod.
Health check to be added to the deployment.
**Type**: `object`
Health check to be added to the deployment.
**Example:**
```yaml
services:
webapp:
image: nginx
volumes:
- datasource:/var/www/html
app:
image: php
volumes:
- data:/opt/data
labels:
katenary.io/volume-from: |
# data in this container...
data:
# ... correspond to "datasource" in "webapp" container
webapp: datasource
katenary.v3/health-check: |-
httpGet:
path: /health
port: 8080
```
This implies that the declared volume in "webapp" will be mounted to "app" pods.
### katenary.v3/ignore
Ignore the service
**Type**: `bool`
Ingoring a service to not be exported in helm chart.
**Example:**
```yaml
labels:
katenary.v3/ignore: "true"
```
### katenary.v3/ingress
Ingress rules to be added to the service.
**Type**: `object`
Declare an ingress rule for the service. The port should be exposed or
declared with `katenary.v3/ports`.
**Example:**
```yaml
labels:
katenary.v3/ingress: |-
port: 80
hostname: mywebsite.com (optional)
```
### katenary.v3/main-app
Mark the service as the main app.
**Type**: `bool`
This makes the service to be the main application. Its image tag is
considered to be the
Chart appVersion and to be the defaultvalue in Pod container
image attribute.
!!! Warning
This is possible with Kubernetes volumes restrictions. So, it works in these cases:
This label cannot be repeated in others services. If this label is
set in more than one service as true, Katenary will return an error.
- if the volume class is Read Write Many
- or if you mount the volume in the same pod (so in the same node)
- and/or the volume is an emptyDir
## same-pod
It's sometimes important and/or necessary to declare that 2 services are in the same pod. For example, using PHP-FPM and NGinx. In this case, you can declare that both services are in the same pod.
You must declare this label only on "supplementary" services and always use the same master service for the entire pod declaration.
**Example:**
```yaml
services:
web:
image: nginx
php:
image: php:8-fpm
ghost:
image: ghost:1.25.5
labels:
katenary.io/same-pod: web
# The chart is now named ghost, and the appVersion is 1.25.5.
# In Deployment, the image attribute is set to ghost:1.25.5 if
# you don't change the "tag" attribute in values.yaml
katenary.v3/main-app: true
```
The above example will create a `web` deployment, the PHP container is added in the `web` pod.
### katenary.v3/map-env
## configmap-volumes
Map env vars from the service to the deployment.
This label proposes to declare a file or directory where content is actually static and can be mounted as configMap volume.
**Type**: `object`
It's a comma separated label, you can declare several volumes.
Because you may need to change the variable for Kubernetes, this label
forces the value to another. It is also particullary helpful to use a template
value instead. For example, you could bind the value to a service name
with Helm attributes:
`{{ tpl .Release.Name . }}`.
For example, in `static/index.html`:
If you use `__APP__` in the value, it will be replaced by the Chart name.
```html
<html>
<body>Hello</body>
</html>
```
And a compose file (snippet):
**Example:**
```yaml
serivces:
web:
image: nginx
volumes:
- ./static:/usr/share/nginx/html:z
labels:
katenary.io/configmap-volumes: ./statics
```
What will make Katenary:
- create a configmap containing the "index.html" file as data
- declare the volume in the `web` deployment file
- mount the configmap in `/usr/share/nginx/html` directory of the container
## ingress
Declare which port to use to create an ingress. The hostname will be declared in `values.yaml` file.
```yaml
serivces:
web:
image: nginx
ports:
- 8080:80
labels:
katenary.io/ingress: 80
```
!!! Info
A port **must** be declared, in `ports` section or with `katenary.io/ports` label. This to force the creation of a `Service`.
## ports and container-ports
It's sometimes not mandatory to declare a port in compose file, or maybe you want to avoid to expose them in the compose file. But Katenary will sometimes need to know the ports to create service, for example to allow `depends_on` directive.
In this case, you can declare the ports in the corresponding label:
```yaml
serivces:
web:
image: nginx
labels:
katenary.io/ports: 80,443
```
This will leave Katenary creating the service to open these ports to others pods.
Sometimes, you need to have `containerPort` in pods but **avoid the service declaration**, so you can use this label:
```yaml
services:
php:
image: php:8-fpm
labels:
katenary.io/container-ports: 9000
```
That will only declare the container port in the pod, but not in the service.
!!! Info
It's very useful when you need to declare ports in conjonction with `same-pod`. Katenary would create a service with all the pods ports inside. The `container-ports` label will make the ports to be ignored in the service creation.
## mapenv
Environment variables are working great for your compose stack but you sometimes need to change them in Helm. This label allows you to remap the value for Helm.
For example, when you use an environment variable to point on another service.
```yaml
serivces:
php:
image: php
environment:
env:
DB_HOST: database
database:
image: mariadb
RUNNING: docker
OTHER: value
labels:
katenary.io/ports: 3306
katenary.v3/map-env: |-
RUNNING: kubernetes
DB_HOST: '{{ include "__APP__.fullname" . }}-database'
```
The above example will break when you'll start it in Kubernetes because the `database` service will not be named like this, it will be renamed to `{{ .Release.Name }}-database`. So, you can declare the rewrite:
### katenary.v3/ports
Ports to be added to the service.
**Type**: `list of uint32`
Only useful for services without exposed port. It is mandatory if the
service is a dependency of another service.
**Example:**
```yaml
services:
labels:
katenary.v3/ports: |-
- 8080
- 8081
```
### katenary.v3/same-pod
Move the same-pod deployment to the target deployment.
**Type**: `string`
This will make the service to be included in another service pod. Some services
must work together in the same pod, like a sidecar or a proxy or nginx + php-fpm.
Note that volume and VolumeMount are copied from the source to the target
deployment.
**Example:**
```yaml
web:
image: nginx:1.19
php:
image: php
environment:
DB_HOST: database
image: php:7.4-fpm
labels:
katenary.io/mapenv: |
DB_HOST: "{{ .Release.Name }}"-database
database:
image: mariadb
labels:
katenary.io/ports: 3306
katenary.v3/same-pod: web
```
It's also useful when you want to change a variable value to another when you deploy on Kubernetes.
### katenary.v3/secrets
## secret-envfiles
Env vars to be set as secrets.
Katenary binds all "environemnt files" to config maps. But some of these files can be bound as sercrets.
**Type**: `list of string`
In this case, declare the files as is:
This label allows setting the environment variables as secrets. The variable
is removed from the environment and added to a secret object.
The variable can be set to the `katenary.v3/values` too,
so the secret value can be configured in values.yaml
**Example:**
```yaml
services:
app:
image: #...
env_file:
- ./env/whatever
- ./env/sensitives
env:
PASSWORD: a very secret password
NOT_A_SECRET: a public value
labels:
katenary.io/secret-envfiles: ./env/sensitives
katenary.v3/secrets: |-
- PASSWORD
```
## secret-vars
### katenary.v3/values
If you have some environemnt variables to declare as secret, you can list them in the `secret-vars` label.
Environment variables to be added to the values.yaml
**Type**: `list of string or map`
By default, all environment variables in the "env" and environment
files are added to configmaps with the static values set. This label
allows to add environment variables to the values.yaml file.
Note that the value inside the configmap is `{{ tpl vaname . }}`, so
you can set the value to a template that will be rendered with the
values.yaml file.
The value can be set with a documentation. This may help to understand
the purpose of the variable.
**Example:**
```yaml
services:
database:
image: mariadb
environemnt:
MYSQL_PASSWORD: foobar
MYSQL_ROOT_PASSWORD: longpasswordhere
MYSQL_USER: john
MYSQL_DATABASE: appdb
env:
FOO: bar
DB_NAME: mydb
TO_CONFIGURE: something that can be changed in values.yaml
A_COMPLEX_VALUE: example
labels:
katenary.io/secret-vars: MYSQL_ROOT_PASSWORD,MYSQL_PASSWORD
katenary.v3/values: |-
# simple values, set as is in values.yaml
- TO_CONFIGURE
# complex values, set as a template in values.yaml with a documentation
- A_COMPLEX_VALUE: |-
This is the documentation for the variable to
configure in values.yaml.
It can be, of course, a multiline text.
```
## ignore
Simply ignore the service to not be exported in the Helm Chart.
```yaml
serivces:
# this service is able to answer HTTP
# on port 5000
webapp:
image: myapp
labels:
# declare the port
katenary.io/ports: 5000
# the ingress controller is a web proxy, so...
katenary.io/ingress: 5000
# with local Docker, I want to access my webapp
# with "myapp.locahost" so I use a nice proxy on
# port 80
proxy:
image: quay.io/pathwae/proxy
ports:
- 80:80
environemnt:
CONFIG: |
myapp.localhost: webapp:5000
labels:
# I don't need it in Helm, it's only
# for local test!
katenary.io/ignore: true
```
<!-- STOP_DETAILED_DOC : do not remove this tag !-->

View File

@@ -0,0 +1,8 @@
<!-- Code generated by gomarkdoc. DO NOT EDIT -->
# katenary
``` go
import "katenary/cmd/katenary"
```

View File

@@ -0,0 +1,893 @@
<!-- Code generated by gomarkdoc. DO NOT EDIT -->
# generator
``` go
import "katenary/generator"
```
The generator package generates kubernetes objects from a compose file
and transforms them into a helm chart.
The generator package is the core of katenary. It is responsible for
generating kubernetes objects from a compose file and transforming them
into a helm chart. Convertion manipulates Yaml representation of
kubernetes object to add conditions, labels, annotations, etc. to the
objects. It also create the values to be set to the values.yaml file.
The generate.Convert() create an HelmChart object and call “Generate()”
method to convert from a compose file to a helm chart. It saves the helm
chart in the given directory.
If you want to change or override the write behavior, you can use the
HelmChart.Generate() function and implement your own write function.
This function returns the helm chart object containing all kubernetes
objects and helm chart ingormation. It does not write the helm chart to
the disk.
TODO: Manage cronjob + rbac TODO: create note.txt TODO: manage emptyDirs
## Constants
``` go
const KATENARY_PREFIX = "katenary.v3/"
```
## Variables
``` go
var (
// Standard annotationss
Annotations = map[string]string{
KATENARY_PREFIX + "version": Version,
}
)
```
Version is the version of katenary. It is set at compile time.
``` go
var Version = "master" // changed at compile time
```
## func Convert
``` go
func Convert(config ConvertOptions, dockerComposeFile ...string)
```
Convert a compose (docker, podman…) project to a helm chart. It calls
Generate() to generate the chart and then write it to the disk.
## func GetLabelHelp
``` go
func GetLabelHelp(asMarkdown bool) string
```
Generate the help for the labels.
## func GetLabelHelpFor
``` go
func GetLabelHelpFor(labelname string, asMarkdown bool) string
```
GetLabelHelpFor returns the help for a specific label.
## func GetLabelNames
``` go
func GetLabelNames() []string
```
GetLabelNames returns a sorted list of all katenary label names.
## func GetLabels
``` go
func GetLabels(serviceName, appName string) map[string]string
```
## func GetMatchLabels
``` go
func GetMatchLabels(serviceName, appName string) map[string]string
```
## func Helper
``` go
func Helper(name string) string
```
Helper returns the \_helpers.tpl file for a chart.
## func NewCronJob
``` go
func NewCronJob(service types.ServiceConfig, chart *HelmChart, appName string) (*CronJob, *RBAC)
```
NewCronJob creates a new CronJob from a compose service. The appName is
the name of the application taken from the project name.
## type ChartTemplate
ChartTemplate is a template of a chart. It contains the content of the
template and the name of the service. This is used internally to
generate the templates.
TODO: maybe we can set it private.
``` go
type ChartTemplate struct {
Content []byte
Servicename string
}
```
## type ConfigMap
ConfigMap is a kubernetes ConfigMap. Implements the DataMap interface.
``` go
type ConfigMap struct {
*corev1.ConfigMap
// contains filtered or unexported fields
}
```
### func NewConfigMap
``` go
func NewConfigMap(service types.ServiceConfig, appName string) *ConfigMap
```
NewConfigMap creates a new ConfigMap from a compose service. The appName
is the name of the application taken from the project name. The
ConfigMap is filled by environment variables and labels “map-env”.
### func NewConfigMapFromFiles
``` go
func NewConfigMapFromFiles(service types.ServiceConfig, appName string, path string) *ConfigMap
```
NewConfigMapFromFiles creates a new ConfigMap from a compose service.
This path is the path to the file or directory. If the path is a
directory, all files in the directory are added to the ConfigMap. Each
subdirectory are ignored. Note that the Generate() function will create
the subdirectories ConfigMaps.
### func (*ConfigMap) AddData
``` go
func (c *ConfigMap) AddData(key string, value string)
```
AddData adds a key value pair to the configmap. Append or overwrite the
value if the key already exists.
### func (*ConfigMap) AppendDir
``` go
func (c *ConfigMap) AppendDir(path string)
```
AddFile adds files from given path to the configmap. It is not
recursive, to add all files in a directory, you need to call this
function for each subdirectory.
### func (*ConfigMap) Filename
``` go
func (c *ConfigMap) Filename() string
```
Filename returns the filename of the configmap. If the configmap is used
for files, the filename contains the path.
### func (*ConfigMap) SetData
``` go
func (c *ConfigMap) SetData(data map[string]string)
```
SetData sets the data of the configmap. It replaces the entire data.
### func (*ConfigMap) Yaml
``` go
func (c *ConfigMap) Yaml() ([]byte, error)
```
Yaml returns the yaml representation of the configmap
## type ConvertOptions
ConvertOptions are the options to convert a compose project to a helm
chart.
``` go
type ConvertOptions struct {
Force bool // Force the chart directory deletion if it already exists.
OutputDir string // The output directory of the chart.
Profiles []string // Profile to use for the conversion.
HelmUpdate bool // If true, the "helm dep update" command will be run after the chart generation.
AppVersion *string // Set the chart "appVersion" field. If nil, the version will be set to 0.1.0.
ChartVersion string // Set the chart "version" field.
}
```
## type CronJob
CronJob is a kubernetes CronJob.
``` go
type CronJob struct {
*batchv1.CronJob
// contains filtered or unexported fields
}
```
### func (*CronJob) Filename
``` go
func (c *CronJob) Filename() string
```
Filename returns the filename of the cronjob.
Implements the Yaml interface.
### func (*CronJob) Yaml
``` go
func (c *CronJob) Yaml() ([]byte, error)
```
Yaml returns the yaml representation of the cronjob.
Implements the Yaml interface.
## type CronJobValue
CronJobValue is a cronjob configuration that will be saved in
values.yaml.
``` go
type CronJobValue struct {
Repository *RepositoryValue `yaml:"repository,omitempty"`
Environment map[string]any `yaml:"environment,omitempty"`
ImagePullPolicy string `yaml:"imagePullPolicy,omitempty"`
Schedule string `yaml:"schedule"`
}
```
## type DataMap
DataMap is a kubernetes ConfigMap or Secret. It can be used to add data
to the ConfigMap or Secret.
``` go
type DataMap interface {
SetData(map[string]string)
AddData(string, string)
}
```
### func NewFileMap
``` go
func NewFileMap(service types.ServiceConfig, appName string, kind string) DataMap
```
NewFileMap creates a new DataMap from a compose service. The appName is
the name of the application taken from the project name.
## type Dependency
Dependency is a dependency of a chart to other charts.
``` go
type Dependency struct {
Name string `yaml:"name"`
Version string `yaml:"version"`
Repository string `yaml:"repository"`
Alias string `yaml:"alias,omitempty"`
Values map[string]any `yaml:"-"` // do not export to Chart.yaml
}
```
## type Deployment
Deployment is a kubernetes Deployment.
``` go
type Deployment struct {
*appsv1.Deployment `yaml:",inline"`
// contains filtered or unexported fields
}
```
### func NewDeployment
``` go
func NewDeployment(service types.ServiceConfig, chart *HelmChart) *Deployment
```
NewDeployment creates a new Deployment from a compose service. The
appName is the name of the application taken from the project name. It
also creates the Values map that will be used to create the values.yaml
file.
### func (*Deployment) AddContainer
``` go
func (d *Deployment) AddContainer(service types.ServiceConfig)
```
AddContainer adds a container to the deployment.
### func (*Deployment) AddHealthCheck
``` go
func (d *Deployment) AddHealthCheck(service types.ServiceConfig, container *corev1.Container)
```
### func (*Deployment) AddIngress
``` go
func (d *Deployment) AddIngress(service types.ServiceConfig, appName string) *Ingress
```
AddIngress adds an ingress to the deployment. It creates the ingress
object.
### func (*Deployment) AddVolumes
``` go
func (d *Deployment) AddVolumes(service types.ServiceConfig, appName string)
```
AddVolumes adds a volume to the deployment. It does not create the PVC,
it only adds the volumes to the deployment. If the volume is a bind
volume it will warn the user that it is not supported yet.
### func (*Deployment) BindFrom
``` go
func (d *Deployment) BindFrom(service types.ServiceConfig, binded *Deployment)
```
### func (*Deployment) DependsOn
``` go
func (d *Deployment) DependsOn(to *Deployment) error
```
DependsOn adds a initContainer to the deployment that will wait for the
service to be up.
### func (*Deployment) Filename
``` go
func (d *Deployment) Filename() string
```
### func (*Deployment) SetEnvFrom
``` go
func (d *Deployment) SetEnvFrom(service types.ServiceConfig, appName string)
```
SetEnvFrom sets the environment variables to a configmap. The configmap
is created.
### func (*Deployment) Yaml
``` go
func (d *Deployment) Yaml() ([]byte, error)
```
Yaml returns the yaml representation of the deployment.
## type FileMapUsage
FileMapUsage is the usage of the filemap.
``` go
type FileMapUsage uint8
```
FileMapUsage constants.
``` go
const (
FileMapUsageConfigMap FileMapUsage = iota // pure configmap for key:values.
FileMapUsageFiles // files in a configmap.
)
```
## type HelmChart
HelmChart is a Helm Chart representation. It contains all the tempaltes,
values, versions, helpers…
``` go
type HelmChart struct {
Name string `yaml:"name"`
ApiVersion string `yaml:"apiVersion"`
Version string `yaml:"version"`
AppVersion string `yaml:"appVersion"`
Description string `yaml:"description"`
Dependencies []Dependency `yaml:"dependencies,omitempty"`
Templates map[string]*ChartTemplate `yaml:"-"` // do not export to yaml
Helper string `yaml:"-"` // do not export to yaml
Values map[string]any `yaml:"-"` // do not export to yaml
VolumeMounts map[string]any `yaml:"-"` // do not export to yaml
// contains filtered or unexported fields
}
```
### func Generate
``` go
func Generate(project *types.Project) (*HelmChart, error)
```
Generate a chart from a compose project. This does not write files to
disk, it only creates the HelmChart object.
The Generate function will create the HelmChart object this way:
1. Detect the service port name or leave the port number if not found.
2. Create a deployment for each service that are not ingnore.
3. Create a service and ingresses for each service that has ports
and/or declared ingresses.
4. Create a PVC or Configmap volumes for each volume.
5. Create init containers for each service which has dependencies to
other services.
6. Create a chart dependencies.
7. Create a configmap and secrets from the environment variables.
8. Merge the same-pod services.
### func NewChart
``` go
func NewChart(name string) *HelmChart
```
NewChart creates a new empty chart with the given name.
## type Help
Help is the documentation of a label.
``` go
type Help struct {
Short string `yaml:"short"`
Long string `yaml:"long"`
Example string `yaml:"example"`
Type string `yaml:"type"`
}
```
## type Ingress
``` go
type Ingress struct {
*networkv1.Ingress
// contains filtered or unexported fields
}
```
### func NewIngress
``` go
func NewIngress(service types.ServiceConfig, Chart *HelmChart) *Ingress
```
NewIngress creates a new Ingress from a compose service.
### func (*Ingress) Filename
``` go
func (ingress *Ingress) Filename() string
```
### func (*Ingress) Yaml
``` go
func (ingress *Ingress) Yaml() ([]byte, error)
```
## type IngressValue
IngressValue is a ingress configuration that will be saved in
values.yaml.
``` go
type IngressValue struct {
Enabled bool `yaml:"enabled"`
Host string `yaml:"host"`
Path string `yaml:"path"`
Class string `yaml:"class"`
Annotations map[string]string `yaml:"annotations"`
}
```
## type Label
Label is a katenary label to find in compose files.
``` go
type Label = string
```
Known labels.
``` go
const (
LABEL_MAIN_APP Label = KATENARY_PREFIX + "main-app"
LABEL_VALUES Label = KATENARY_PREFIX + "values"
LABEL_SECRETS Label = KATENARY_PREFIX + "secrets"
LABEL_PORTS Label = KATENARY_PREFIX + "ports"
LABEL_INGRESS Label = KATENARY_PREFIX + "ingress"
LABEL_MAP_ENV Label = KATENARY_PREFIX + "map-env"
LABEL_HEALTHCHECK Label = KATENARY_PREFIX + "health-check"
LABEL_SAME_POD Label = KATENARY_PREFIX + "same-pod"
LABEL_DESCRIPTION Label = KATENARY_PREFIX + "description"
LABEL_IGNORE Label = KATENARY_PREFIX + "ignore"
LABEL_DEPENDENCIES Label = KATENARY_PREFIX + "dependencies"
LABEL_CM_FILES Label = KATENARY_PREFIX + "configmap-files"
LABEL_CRONJOB Label = KATENARY_PREFIX + "cronjob"
LABEL_ENV_FROM Label = KATENARY_PREFIX + "env-from"
)
```
## type LabelType
LabelType identifies the type of label to generate in objects. TODO: is
this still needed?
``` go
type LabelType uint8
```
``` go
const (
DeploymentLabel LabelType = iota
ServiceLabel
)
```
## type PersistenceValue
PersistenceValue is a persistence configuration that will be saved in
values.yaml.
``` go
type PersistenceValue struct {
Enabled bool `yaml:"enabled"`
StorageClass string `yaml:"storageClass"`
Size string `yaml:"size"`
AccessMode []string `yaml:"accessMode"`
}
```
## type RBAC
RBAC is a kubernetes RBAC containing a role, a rolebinding and an
associated serviceaccount.
``` go
type RBAC struct {
RoleBinding *RoleBinding
Role *Role
ServiceAccount *ServiceAccount
}
```
### func NewRBAC
``` go
func NewRBAC(service types.ServiceConfig, appName string) *RBAC
```
NewRBAC creates a new RBAC from a compose service. The appName is the
name of the application taken from the project name.
## type RepositoryValue
RepositoryValue is a docker repository image and tag that will be saved
in values.yaml.
``` go
type RepositoryValue struct {
Image string `yaml:"image"`
Tag string `yaml:"tag"`
}
```
## type Role
Role is a kubernetes Role.
``` go
type Role struct {
*rbacv1.Role
// contains filtered or unexported fields
}
```
### func (*Role) Filename
``` go
func (r *Role) Filename() string
```
### func (*Role) Yaml
``` go
func (r *Role) Yaml() ([]byte, error)
```
## type RoleBinding
RoleBinding is a kubernetes RoleBinding.
``` go
type RoleBinding struct {
*rbacv1.RoleBinding
// contains filtered or unexported fields
}
```
### func (*RoleBinding) Filename
``` go
func (r *RoleBinding) Filename() string
```
### func (*RoleBinding) Yaml
``` go
func (r *RoleBinding) Yaml() ([]byte, error)
```
## type Secret
Secret is a kubernetes Secret.
Implements the DataMap interface.
``` go
type Secret struct {
*corev1.Secret
// contains filtered or unexported fields
}
```
### func NewSecret
``` go
func NewSecret(service types.ServiceConfig, appName string) *Secret
```
NewSecret creates a new Secret from a compose service
### func (*Secret) AddData
``` go
func (s *Secret) AddData(key string, value string)
```
AddData adds a key value pair to the secret.
### func (*Secret) Filename
``` go
func (s *Secret) Filename() string
```
Filename returns the filename of the secret.
### func (*Secret) SetData
``` go
func (s *Secret) SetData(data map[string]string)
```
SetData sets the data of the secret.
### func (*Secret) Yaml
``` go
func (s *Secret) Yaml() ([]byte, error)
```
Yaml returns the yaml representation of the secret.
## type Service
Service is a kubernetes Service.
``` go
type Service struct {
*v1.Service `yaml:",inline"`
// contains filtered or unexported fields
}
```
### func NewService
``` go
func NewService(service types.ServiceConfig, appName string) *Service
```
NewService creates a new Service from a compose service.
### func (*Service) AddPort
``` go
func (s *Service) AddPort(port types.ServicePortConfig, serviceName ...string)
```
AddPort adds a port to the service.
### func (*Service) Filename
``` go
func (s *Service) Filename() string
```
Filename returns the filename of the service.
### func (*Service) Yaml
``` go
func (s *Service) Yaml() ([]byte, error)
```
Yaml returns the yaml representation of the service.
## type ServiceAccount
ServiceAccount is a kubernetes ServiceAccount.
``` go
type ServiceAccount struct {
*corev1.ServiceAccount
// contains filtered or unexported fields
}
```
### func (*ServiceAccount) Filename
``` go
func (r *ServiceAccount) Filename() string
```
### func (*ServiceAccount) Yaml
``` go
func (r *ServiceAccount) Yaml() ([]byte, error)
```
## type Value
Value will be saved in values.yaml. It contains configuraiton for all
deployment and services. The content will be lile:
name_of_component:
repository:
image: image_name
tag: image_tag
persistence:
enabled: true
storageClass: storage_class_name
ingress:
enabled: true
host: host_name
path: path_name
environment:
ENV_VAR_1: value_1
ENV_VAR_2: value_2
``` go
type Value struct {
Repository *RepositoryValue `yaml:"repository,omitempty"`
Persistence map[string]*PersistenceValue `yaml:"persistence,omitempty"`
Ingress *IngressValue `yaml:"ingress,omitempty"`
ImagePullPolicy string `yaml:"imagePullPolicy,omitempty"`
Environment map[string]any `yaml:"environment,omitempty"`
Replicas *uint32 `yaml:"replicas,omitempty"`
CronJob *CronJobValue `yaml:"cronjob,omitempty"`
}
```
### func NewValue
``` go
func NewValue(service types.ServiceConfig, main ...bool) *Value
```
NewValue creates a new Value from a compose service. The value contains
the necessary information to deploy the service (image, tag, replicas,
etc.).
If \`main\` is true, the tag will be empty because it will be set in the
helm chart appVersion.
### func (*Value) AddIngress
``` go
func (v *Value) AddIngress(host, path string)
```
### func (*Value) AddPersistence
``` go
func (v *Value) AddPersistence(volumeName string)
```
AddPersistence adds persistence configuration to the Value.
## type VolumeClaim
VolumeClaim is a kubernetes VolumeClaim. This is a
PersistentVolumeClaim.
``` go
type VolumeClaim struct {
*v1.PersistentVolumeClaim
// contains filtered or unexported fields
}
```
### func NewVolumeClaim
``` go
func NewVolumeClaim(service types.ServiceConfig, volumeName, appName string) *VolumeClaim
```
NewVolumeClaim creates a new VolumeClaim from a compose service.
### func (*VolumeClaim) Filename
``` go
func (v *VolumeClaim) Filename() string
```
Filename returns the suggested filename for a VolumeClaim.
### func (*VolumeClaim) Yaml
``` go
func (v *VolumeClaim) Yaml() ([]byte, error)
```
Yaml marshals a VolumeClaim into yaml.
## type Yaml
Yaml is a kubernetes object that can be converted to yaml.
``` go
type Yaml interface {
Yaml() ([]byte, error)
Filename() string
}
```
Generated by [gomarkdoc](https://github.com/princjef/gomarkdoc)

View File

@@ -0,0 +1,28 @@
<!-- Code generated by gomarkdoc. DO NOT EDIT -->
# extrafiles
``` go
import "katenary/generator/extrafiles"
```
extrafiles package provides function to generate the Chart files that
are not objects. Like README.md and notes.txt…
## func NotesFile
``` go
func NotesFile() string
```
NoteTXTFile returns the content of the note.txt file.
## func ReadMeFile
``` go
func ReadMeFile(charname, description string, values map[string]any) string
```
ReadMeFile returns the content of the README.md file.
Generated by [gomarkdoc](https://github.com/princjef/gomarkdoc)

View File

@@ -0,0 +1,20 @@
<!-- Code generated by gomarkdoc. DO NOT EDIT -->
# parser
``` go
import "katenary/parser"
```
Parser package is a wrapper around compose-go to parse compose files.
## func Parse
``` go
func Parse(profiles []string, dockerComposeFile ...string) (*types.Project, error)
```
Parse compose files and return a project. The project is parsed with
dotenv, osenv and profiles.
Generated by [gomarkdoc](https://github.com/princjef/gomarkdoc)

View File

@@ -0,0 +1,55 @@
<!-- Code generated by gomarkdoc. DO NOT EDIT -->
# update
``` go
import "katenary/update"
```
Update package is used to check if a new version of katenary is
available.
## Variables
``` go
var Version = "master" // reset by cmd/main.go
```
## func DownloadFile
``` go
func DownloadFile(url, exe string) error
```
DownloadFile will download a url to a local file. It also ensure that
the file is executable.
## func DownloadLatestVersion
``` go
func DownloadLatestVersion(assets []Asset) error
```
DownloadLatestVersion will download the latest version of katenary.
## type Asset
Asset is a github asset from release url.
``` go
type Asset struct {
Name string `json:"name"`
URL string `json:"browser_download_url"`
}
```
### func CheckLatestVersion
``` go
func CheckLatestVersion() (string, []Asset, error)
```
CheckLatestVersion check katenary latest version from release and
propose to download it
Generated by [gomarkdoc](https://github.com/princjef/gomarkdoc)

187
doc/docs/packages/utils.md Normal file
View File

@@ -0,0 +1,187 @@
<!-- Code generated by gomarkdoc. DO NOT EDIT -->
# utils
``` go
import "katenary/utils"
```
Utils package provides some utility functions used in katenary. It
defines some constants and functions used in the whole project.
## Constants
Icons used in katenary.
``` go
const (
IconSuccess Icon = "✅"
IconFailure = "❌"
IconWarning = "⚠️'"
IconNote = "📝"
IconWorld = "🌐"
IconPlug = "🔌"
IconPackage = "📦"
IconCabinet = "🗄️"
IconInfo = "❕"
IconSecret = "🔒"
IconConfig = "🔧"
IconDependency = "🔗"
)
```
## func CountStartingSpaces
``` go
func CountStartingSpaces(line string) int
```
CountStartingSpaces counts the number of spaces at the beginning of a
string.
## func GetContainerByName
``` go
func GetContainerByName(name string, containers []corev1.Container) (*corev1.Container, int)
```
GetContainerByName returns a container by name and its index in the
array. It returns nil, -1 if not found.
## func GetKind
``` go
func GetKind(path string) (kind string)
```
GetKind returns the kind of the resource from the file path.
## func GetServiceNameByPort
``` go
func GetServiceNameByPort(port int) string
```
GetServiceNameByPort returns the service name for a port. It the service
name is not found, it returns an empty string.
## func GetValuesFromLabel
``` go
func GetValuesFromLabel(service types.ServiceConfig, LabelValues string) map[string]*EnvConfig
```
GetValuesFromLabel returns a map of values from a label.
## func HashComposefiles
``` go
func HashComposefiles(files []string) (string, error)
```
HashComposefiles returns a hash of the compose files.
## func Int32Ptr
``` go
func Int32Ptr(i int32) *int32
```
Int32Ptr returns a pointer to an int32.
## func MapKeys
``` go
func MapKeys(m map[string]interface{}) []string
```
## func PathToName
``` go
func PathToName(path string) string
```
PathToName converts a path to a kubernetes complient name.
## func StrPtr
``` go
func StrPtr(s string) *string
```
StrPtr returns a pointer to a string.
## func TplName
``` go
func TplName(serviceName, appname string, suffix ...string) string
```
TplName returns the name of the kubernetes resource as a template
string. It is used in the templates and defined in \_helper.tpl file.
## func TplValue
``` go
func TplValue(serviceName, variable string, pipes ...string) string
```
GetContainerByName returns a container by name and its index in the
array.
## func Warn
``` go
func Warn(msg ...interface{})
```
Warn prints a warning message
## func WordWrap
``` go
func WordWrap(text string, lineWidth int) string
```
WordWrap wraps a string to a given line width. Warning: it may break the
string. You need to check the result.
## func Wrap
``` go
func Wrap(src, above, below string) string
```
Wrap wraps a string with a string above and below. It will respect the
indentation of the src string.
## func WrapBytes
``` go
func WrapBytes(src, above, below []byte) []byte
```
WrapBytes wraps a byte array with a byte array above and below. It will
respect the indentation of the src string.
## type EnvConfig
EnvConfig is a struct to hold the description of an environment
variable.
``` go
type EnvConfig struct {
Description string
Service types.ServiceConfig
}
```
## type Icon
Icon is a unicode icon
``` go
type Icon string
```
Generated by [gomarkdoc](https://github.com/princjef/gomarkdoc)

View File

@@ -27,7 +27,7 @@ button.md-clipboard:hover::after {
article a,
article a:visited {
color: var(--md-code-hl-number-color);
color: var(--md-code-hl-number-color) !important;
}
.md-center {
@@ -53,3 +53,15 @@ pre code.hljs {
background-color: var(--code-bg-color);
color: var(--code-fg-color);
}
table tbody code {
text-align: left;
white-space: nowrap;
font-size: 1em !important;
background-color: transparent !important;
color: var(--md-code-hl-special-color) !important;
}
h3[id*="katenaryio"] {
color: var(--md-code-hl-special-color);
}

View File

@@ -18,8 +18,8 @@ markdown_extensions:
- admonition
- attr_list
- pymdownx.emoji:
emoji_generator: !!python/name:materialx.emoji.to_svg
emoji_index: !!python/name:materialx.emoji.twemoji
emoji_index: !!python/name:material.extensions.emoji.twemoji
emoji_generator: !!python/name:material.extensions.emoji.to_svg
- pymdownx.highlight:
anchor_linenums: true
use_pygments: false
@@ -28,7 +28,7 @@ extra_css:
- statics/main.css
extra_javascript:
- statics/addons.js
copyright: Copyright &copy; 2021 - 2022 - Katenary authors
copyright: Copyright &copy; 2021 - 2023 - Katenary authors
extra:
generator: false
social:
@@ -38,3 +38,12 @@ nav:
- "Home": index.md
- usage.md
- labels.md
- Behind the scene:
- coding.md
- dependencies.md
- Go Packages:
- packages/generator.md
- packages/parser.md
- packages/update.md
- packages/utils.md
- packages/generator/extrafiles.md

View File

@@ -1,4 +1,4 @@
mkdocs==1.3.0
mkdocs>=1.3.0
Jinja2>=2.10.2
MarkupSafe>=2.0
pymdown-extensions>=9.5

View File

@@ -1,10 +0,0 @@
# Basic example
This is a basic example of what can do Katenary with standard docker-compose file.
In this example:
- `depends_on` yield a `initContainer` in the webapp ddeployment to wait for database
- so we need to declare the listened port inside `database` container as we don't use it with docker-compose- also, we needed to declare that `DB_HOST` is actually a service name using `mapenv` label
Take a look on [chart/basic](chart/basic) directory to see what `katenary convert` command has generated.

View File

@@ -1,8 +0,0 @@
# Create on 2022-02-17T10:27:30+01:00
# Katenary command line: katenary convert
apiVersion: v2
appVersion: 0.0.1
description: A helm chart for basic
name: basic
type: application
version: 0.1.0

View File

@@ -1,8 +0,0 @@
Congratulations,
Your application is now deployed. This may take a while to be up and responding.
{{ if .Values.webapp.ingress.enabled -}}
- webapp is accessible on : http://{{ .Values.webapp.ingress.host }}
{{- end }}

View File

@@ -1,39 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: '{{ .Release.Name }}-database'
labels:
katenary.io/component: database
katenary.io/project: basic
katenary.io/release: '{{ .Release.Name }}'
annotations:
katenary.io/docker-compose-sha1: b9f12bb7d1e97901c1d7680394209525763f6640
katenary.io/version: master-3619cc4
spec:
replicas: 1
selector:
matchLabels:
katenary.io/component: database
katenary.io/release: '{{ .Release.Name }}'
template:
metadata:
labels:
katenary.io/component: database
katenary.io/release: '{{ .Release.Name }}'
spec:
containers:
- name: database
image: '{{ .Values.database.image }}'
ports:
- name: database
containerPort: 3306
env:
- name: MARIADB_PASSWORD
value: foo
- name: MARIADB_DATABASE
value: myapp
- name: MARIADB_ROOT_PASSWORD
value: foobar
- name: MARIADB_USER
value: foo

View File

@@ -1,19 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: '{{ .Release.Name }}-database'
labels:
katenary.io/component: database
katenary.io/project: basic
katenary.io/release: '{{ .Release.Name }}'
annotations:
katenary.io/docker-compose-sha1: b9f12bb7d1e97901c1d7680394209525763f6640
katenary.io/version: master-3619cc4
spec:
selector:
katenary.io/component: database
katenary.io/release: '{{ .Release.Name }}'
ports:
- protocol: TCP
port: 3306
targetPort: 3306

View File

@@ -1,48 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: '{{ .Release.Name }}-webapp'
labels:
katenary.io/component: webapp
katenary.io/project: basic
katenary.io/release: '{{ .Release.Name }}'
annotations:
katenary.io/docker-compose-sha1: b9f12bb7d1e97901c1d7680394209525763f6640
katenary.io/version: master-3619cc4
spec:
replicas: 1
selector:
matchLabels:
katenary.io/component: webapp
katenary.io/release: '{{ .Release.Name }}'
template:
metadata:
labels:
katenary.io/component: webapp
katenary.io/release: '{{ .Release.Name }}'
spec:
initContainers:
- name: check-database
image: busybox
command:
- sh
- -c
- |-
OK=0
echo "Checking database port"
while [ $OK != 1 ]; do
echo -n "."
nc -z {{ .Release.Name }}-database 3306 2>&1 >/dev/null && OK=1 || sleep 1
done
echo
echo "Done"
containers:
- name: webapp
image: '{{ .Values.webapp.image }}'
ports:
- name: webapp
containerPort: 80
env:
- name: DB_HOST
value: '{{ .Release.Name }}-database'

View File

@@ -1,34 +0,0 @@
{{- if .Values.webapp.ingress.enabled -}}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: '{{ .Release.Name }}-webapp'
labels:
katenary.io/component: webapp
katenary.io/project: basic
katenary.io/release: '{{ .Release.Name }}'
annotations:
katenary.io/docker-compose-sha1: b9f12bb7d1e97901c1d7680394209525763f6640
katenary.io/version: master-3619cc4
spec:
{{- if and .Values.webapp.ingress.class (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }}
ingressClassName: '{{ .Values.webapp.ingress.class }}'
{{- end }}
rules:
- host: '{{ .Values.webapp.ingress.host }}'
http:
paths:
- path: /
pathType: Prefix
backend:
{{- if semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion }}
service:
name: '{{ .Release.Name }}-webapp'
port:
number: 80
{{- else }}
serviceName: '{{ .Release.Name }}-webapp'
servicePort: 80
{{- end }}
{{- end -}}

View File

@@ -1,19 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: '{{ .Release.Name }}-webapp'
labels:
katenary.io/component: webapp
katenary.io/project: basic
katenary.io/release: '{{ .Release.Name }}'
annotations:
katenary.io/docker-compose-sha1: b9f12bb7d1e97901c1d7680394209525763f6640
katenary.io/version: master-3619cc4
spec:
selector:
katenary.io/component: webapp
katenary.io/release: '{{ .Release.Name }}'
ports:
- protocol: TCP
port: 80
targetPort: 80

View File

@@ -1,8 +0,0 @@
database:
image: mariadb:10
webapp:
image: php:7-apache
ingress:
class: nginx
enabled: false
host: webapp.basic.tld

View File

@@ -1,31 +0,0 @@
version: "3"
# this example is absolutely not working, it's an example to see how it is converted
# by Katenary
services:
webapp:
image: php:7-apache
environment:
DB_HOST: database
ports:
- "8080:80"
labels:
# expose an ingress
katenary.io/ingress: 80
# DB_HOST is actually a service name
katenary.io/mapenv: |
DB_HOST: "{{ .Release.Name }}-database"
depends_on:
- database
database:
image: mariadb:10
environment:
MARIADB_ROOT_PASSWORD: foobar
MARIADB_USER: foo
MARIADB_PASSWORD: foo
MARIADB_DATABASE: myapp
labels:
# because we don't provide "ports" or "expose", alert katenary
# to use the mysql port for service declaration
katenary.io/ports: 3306

View File

@@ -0,0 +1,49 @@
# cronjobs
A Helm chart for cronjobs
## Installing the Chart
To install the chart with the release name `my-release`:
```bash
# Standard Helm install
$ helm install my-release cronjobs
# To use a custom namespace and force the creation of the namespace
$ helm install my-release --namespace my-namespace --create-namespace cronjobs
# To use a custom values file
$ helm install my-release -f my-values.yaml cronjobs
```
See the [Helm documentation](https://helm.sh/docs/intro/using_helm/) for more information on installing and managing the chart.
## Configuration
The following table lists the configurable parameters of the cronjobs chart and their default values.
| Parameter | Default |
| ----------------------------------- | -------------- |
| `app.imagePullPolicy` | `IfNotPresent` |
| `app.replicas` | `1` |
| `app.repository.image` | `nginx` |
| `app.repository.tag` | `` |
| `backup.cronjob.imagePullPolicy` | `IfNotPresent` |
| `backup.cronjob.repository.image` | `alpine` |
| `backup.cronjob.repository.tag` | `1` |
| `backup.cronjob.schedule` | `@hourly` |
| `backup.imagePullPolicy` | `IfNotPresent` |
| `backup.replicas` | `1` |
| `backup.repository.image` | `alpine` |
| `backup.repository.tag` | `1` |
| `withrbac.cronjob.imagePullPolicy` | `IfNotPresent` |
| `withrbac.cronjob.repository.image` | `busybox` |
| `withrbac.cronjob.repository.tag` | `` |
| `withrbac.cronjob.schedule` | `@daily` |
| `withrbac.imagePullPolicy` | `IfNotPresent` |
| `withrbac.replicas` | `1` |
| `withrbac.repository.image` | `busybox` |
| `withrbac.repository.tag` | `` |

View File

@@ -0,0 +1,27 @@
Your release is named {{ .Release.Name }}.
To learn more about the release, try:
$ helm -n {{ .Release.Namespace }} status {{ .Release.Name }}
$ helm -n {{ .Release.Namespace }} get all {{ .Release.Name }}
To delete the release, run:
$ helm -n {{ .Release.Namespace }} delete {{ .Release.Name }}
You can see this notes again by running:
$ helm -n {{ .Release.Namespace }} get notes {{ .Release.Name }}
{{- $count := 0 -}}
{{- range $s, $v := .Values -}}
{{- if and $v $v.ingress -}}
{{- $count = add $count 1 -}}
{{- if eq $count 1 }}
The ingress list is:
{{ end }}
- {{ $s }}: http://{{ $v.ingress.host }}{{ $v.ingress.path }}
{{- end -}}
{{ end -}}

View File

@@ -0,0 +1,36 @@
{{- define "cronjobs.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- define "cronjobs.name" -}}
{{- if .Values.nameOverride -}}
{{- .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- define "cronjobs.labels" -}}
{{ include "cronjobs.selectorLabels" .}}
{{ if .Chart.Version -}}
{{ printf "katenary.v3/chart-version: %s" .Chart.Version }}
{{- end }}
{{ if .Chart.AppVersion -}}
{{ printf "katenary.v3/app-version: %s" .Chart.AppVersion }}
{{- end }}
{{- end -}}
{{- define "cronjobs.selectorLabels" -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{ printf "katenary.v3/name: %s" $name }}
{{ printf "katenary.v3/instance: %s" .Release.Name }}
{{- end -}}

View File

@@ -1,9 +0,0 @@
# Example with Ghost
[Ghost](https://ghost.org/) is a simple but powerfull blog engine. It is very nice to test some behaviors with Docker or Podman.
The given `docker-compose.yaml` file here declares a stand-alone blog service. To help using it, we use [Patwae](https://pathwae.net) reverse-proxy to listend http://ghost.example.localhost
The problem to solve is that the `url` environment variable correspond to the Ingress host when we will convert it to Helm Chart. So, we use the `mapenv` label to declare that `url` is actually `{{ .Values.blog.ingress.host }}` value.
Note that we also `ignore` pathwae because we don't need it in our Helm Chart.

View File

@@ -1,8 +0,0 @@
# Create on 2022-05-05T14:16:27+02:00
# Katenary command line: /tmp/go-build669507924/b001/exe/main convert
apiVersion: v2
appVersion: 0.0.1
description: A helm chart for ghost
name: ghost
type: application
version: 0.1.0

View File

@@ -1,8 +0,0 @@
Congratulations,
Your application is now deployed. This may take a while to be up and responding.
{{ if .Values.blog.ingress.enabled -}}
- blog is accessible on : http://{{ .Values.blog.ingress.host }}
{{- end }}

View File

@@ -1,33 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: '{{ .Release.Name }}-blog'
labels:
katenary.io/component: blog
katenary.io/project: ghost
katenary.io/release: '{{ .Release.Name }}'
annotations:
katenary.io/docker-compose-sha1: 0c2bbf548ff569c3dc5d77dc158e98bbe86fb5d4
katenary.io/version: master
spec:
replicas: 1
selector:
matchLabels:
katenary.io/component: blog
katenary.io/release: '{{ .Release.Name }}'
template:
metadata:
labels:
katenary.io/component: blog
katenary.io/release: '{{ .Release.Name }}'
spec:
containers:
- name: blog
image: '{{ .Values.blog.image }}'
ports:
- name: blog
containerPort: 2368
env:
- name: url
value: http://{{ .Values.blog.ingress.host }}

View File

@@ -1,42 +0,0 @@
{{- if .Values.blog.ingress.enabled -}}
{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}}
apiVersion: networking.k8s.io/v1
{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
apiVersion: networking.k8s.io/v1beta1
{{- else -}}
apiVersion: extensions/v1beta1
{{- end }}
kind: Ingress
metadata:
name: '{{ .Release.Name }}-blog'
labels:
katenary.io/component: blog
katenary.io/project: ghost
katenary.io/release: '{{ .Release.Name }}'
annotations:
katenary.io/docker-compose-sha1: 0c2bbf548ff569c3dc5d77dc158e98bbe86fb5d4
katenary.io/version: master
spec:
{{- if and .Values.blog.ingress.class (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }}
ingressClassName: '{{ .Values.blog.ingress.class }}'
{{- end }}
rules:
- host: '{{ .Values.blog.ingress.host }}'
http:
paths:
- path: /
{{- if semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion }}
pathType: Prefix
{{- end }}
backend:
{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion }}
service:
name: '{{ .Release.Name }}-blog'
port:
number: 2368
{{- else }}
serviceName: '{{ .Release.Name }}-blog'
servicePort: 2368
{{- end }}
{{- end -}}

View File

@@ -1,19 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: '{{ .Release.Name }}-blog'
labels:
katenary.io/component: blog
katenary.io/project: ghost
katenary.io/release: '{{ .Release.Name }}'
annotations:
katenary.io/docker-compose-sha1: 0c2bbf548ff569c3dc5d77dc158e98bbe86fb5d4
katenary.io/version: master
spec:
selector:
katenary.io/component: blog
katenary.io/release: '{{ .Release.Name }}'
ports:
- protocol: TCP
port: 2368
targetPort: 2368

View File

@@ -1,6 +0,0 @@
blog:
image: ghost
ingress:
class: nginx
enabled: false
host: blog.ghost.tld

View File

@@ -1,30 +0,0 @@
version: "3"
services:
blog:
image: ghost
environment:
# this is OK for local test, but not with Helm
# because the URL depends on Ingress
url: http://ghost.example.localhost
labels:
katenary.io/ports: 2368
katenary.io/ingress: 2368
# ... so we declare that "url" is actually
# the ingress host
katenary.io/mapenv: |
url: http://{{ .Values.blog.ingress.host }}
proxy:
# A simple proxy for localhost
image: quay.io/pathwae/proxy
environment:
CONFIG: |
ghost.example.localhost:
to: http://blog:2368
ports:
- 80:80
labels:
# we don't want this in Helm because we will use
# an ingress
katenary.io/ignore: true

View File

@@ -0,0 +1,37 @@
# multidir
A Helm chart for multidir
## Installing the Chart
To install the chart with the release name `my-release`:
```bash
# Standard Helm install
$ helm install my-release multidir
# To use a custom namespace and force the creation of the namespace
$ helm install my-release --namespace my-namespace --create-namespace multidir
# To use a custom values file
$ helm install my-release -f my-values.yaml multidir
```
See the [Helm documentation](https://helm.sh/docs/intro/using_helm/) for more information on installing and managing the chart.
## Configuration
The following table lists the configurable parameters of the multidir chart and their default values.
| Parameter | Default |
| ---------------------- | -------------- |
| `bar.imagePullPolicy` | `IfNotPresent` |
| `bar.replicas` | `1` |
| `bar.repository.image` | `alpine` |
| `bar.repository.tag` | `` |
| `foo.imagePullPolicy` | `IfNotPresent` |
| `foo.replicas` | `1` |
| `foo.repository.image` | `alpine` |
| `foo.repository.tag` | `` |

View File

@@ -0,0 +1,27 @@
Your release is named {{ .Release.Name }}.
To learn more about the release, try:
$ helm -n {{ .Release.Namespace }} status {{ .Release.Name }}
$ helm -n {{ .Release.Namespace }} get all {{ .Release.Name }}
To delete the release, run:
$ helm -n {{ .Release.Namespace }} delete {{ .Release.Name }}
You can see this notes again by running:
$ helm -n {{ .Release.Namespace }} get notes {{ .Release.Name }}
{{- $count := 0 -}}
{{- range $s, $v := .Values -}}
{{- if and $v $v.ingress -}}
{{- $count = add $count 1 -}}
{{- if eq $count 1 }}
The ingress list is:
{{ end }}
- {{ $s }}: http://{{ $v.ingress.host }}{{ $v.ingress.path }}
{{- end -}}
{{ end -}}

View File

@@ -0,0 +1,36 @@
{{- define "multidir.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- define "multidir.name" -}}
{{- if .Values.nameOverride -}}
{{- .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- define "multidir.labels" -}}
{{ include "multidir.selectorLabels" .}}
{{ if .Chart.Version -}}
{{ printf "katenary.v3/chart-version: %s" .Chart.Version }}
{{- end }}
{{ if .Chart.AppVersion -}}
{{ printf "katenary.v3/app-version: %s" .Chart.AppVersion }}
{{- end }}
{{- end -}}
{{- define "multidir.selectorLabels" -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{ printf "katenary.v3/name: %s" $name }}
{{ printf "katenary.v3/instance: %s" .Release.Name }}
{{- end -}}

View File

@@ -0,0 +1 @@
A file containing configuration here

View File

@@ -0,0 +1,2 @@
variable: foo
example: bar

View File

@@ -1,13 +0,0 @@
# Make it possible to bind several containers in one pod
In this example, we need to make nginx and php-fpm to run inside the same "pod". The reason is that we configured FPM to listen an unix socket instead of the 9000 port.
Because NGinx will need to connect to the unix socket wich is a file, both containers should share the same node and work together.
So, in the docker-compose file, we need to declare:
- `katenary.io/empty-dirs: socket` where `socket` is the "volume name", this will avoid the creation of a PVC
- `katenary.io/same-pod: http` in `php` container to declare that this will be added in the `containers` section of the `http` deployment
You can note that we also use `configmap-volumes` to declare our configuration as `configMap`.
Take a look on [chart/same-pod](chart/same-pod) directory to see the result of the `katenary convert` command.

View File

@@ -1,8 +0,0 @@
# Create on 2022-02-17T11:36:02+01:00
# Katenary command line: katenary convert --force
apiVersion: v2
appVersion: 0.0.1
description: A helm chart for same-pod
name: same-pod
type: application
version: 0.1.0

View File

@@ -1,8 +0,0 @@
Congratulations,
Your application is now deployed. This may take a while to be up and responding.
{{ if .Values.http.ingress.enabled -}}
- http is accessible on : http://{{ .Values.http.ingress.host }}
{{- end }}

View File

@@ -1,23 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: '{{ .Release.Name }}-config-nginx-http'
labels:
katenary.io/component: ""
katenary.io/project: same-pod
katenary.io/release: '{{ .Release.Name }}'
annotations:
katenary.io/docker-compose-sha1: 74e67695bfdbb829f15531321e158808018280e0
katenary.io/version: master-bf44d44
data:
default.conf: |
upstream _php {
server unix:/sock/fpm.sock;
}
server {
listen 80;
location ~ ^/index\.php(/|$) {
fastcgi_pass _php;
include fastcgi_params;
}
}

View File

@@ -1,30 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: '{{ .Release.Name }}-config-php-php'
labels:
katenary.io/component: ""
katenary.io/project: same-pod
katenary.io/release: '{{ .Release.Name }}'
annotations:
katenary.io/docker-compose-sha1: 74e67695bfdbb829f15531321e158808018280e0
katenary.io/version: master-bf44d44
data:
www.conf: |
[www]
user = www-data
group = www-data
listen = /sock/fpm.sock
pm = dynamic
pm.max_children = 5
pm.start_servers = 2
pm.min_spare_servers = 1
pm.max_spare_servers = 3
access.log = /proc/self/fd/2
log_limit = 8192
clear_env = no
catch_workers_output = yes
decorate_workers_output = no

View File

@@ -1,52 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: '{{ .Release.Name }}-http'
labels:
katenary.io/component: http
katenary.io/project: same-pod
katenary.io/release: '{{ .Release.Name }}'
annotations:
katenary.io/docker-compose-sha1: 74e67695bfdbb829f15531321e158808018280e0
katenary.io/version: master-bf44d44
spec:
replicas: 1
selector:
matchLabels:
katenary.io/component: http
katenary.io/release: '{{ .Release.Name }}'
template:
metadata:
labels:
katenary.io/component: http
katenary.io/release: '{{ .Release.Name }}'
spec:
containers:
- name: http
image: '{{ .Values.http.image }}'
ports:
- name: http
containerPort: 80
volumeMounts:
- mountPath: /sock
name: sock
- mountPath: /etc/nginx/conf.d
name: config-nginx
- name: php
image: '{{ .Values.php.image }}'
volumeMounts:
- mountPath: /sock
name: sock
- mountPath: /usr/local/etc/php-fpm.d/www.conf
name: config-php
subPath: www.conf
volumes:
- emptyDir: {}
name: sock
- configMap:
name: '{{ .Release.Name }}-config-nginx-http'
name: config-nginx
- configMap:
name: '{{ .Release.Name }}-config-php-php'
name: config-php

View File

@@ -1,34 +0,0 @@
{{- if .Values.http.ingress.enabled -}}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: '{{ .Release.Name }}-http'
labels:
katenary.io/component: http
katenary.io/project: same-pod
katenary.io/release: '{{ .Release.Name }}'
annotations:
katenary.io/docker-compose-sha1: 74e67695bfdbb829f15531321e158808018280e0
katenary.io/version: master-bf44d44
spec:
{{- if and .Values.http.ingress.class (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }}
ingressClassName: '{{ .Values.http.ingress.class }}'
{{- end }}
rules:
- host: '{{ .Values.http.ingress.host }}'
http:
paths:
- path: /
pathType: Prefix
backend:
{{- if semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion }}
service:
name: '{{ .Release.Name }}-http'
port:
number: 80
{{- else }}
serviceName: '{{ .Release.Name }}-http'
servicePort: 80
{{- end }}
{{- end -}}

View File

@@ -1,19 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: '{{ .Release.Name }}-http'
labels:
katenary.io/component: http
katenary.io/project: same-pod
katenary.io/release: '{{ .Release.Name }}'
annotations:
katenary.io/docker-compose-sha1: 74e67695bfdbb829f15531321e158808018280e0
katenary.io/version: master-bf44d44
spec:
selector:
katenary.io/component: http
katenary.io/release: '{{ .Release.Name }}'
ports:
- protocol: TCP
port: 80
targetPort: 80

View File

@@ -1,8 +0,0 @@
http:
image: nginx:alpine
ingress:
class: nginx
enabled: false
host: http.same-pod.tld
php:
image: php:fpm

View File

@@ -1,10 +0,0 @@
upstream _php {
server unix:/sock/fpm.sock;
}
server {
listen 80;
location ~ ^/index\.php(/|$) {
fastcgi_pass _php;
include fastcgi_params;
}
}

View File

@@ -1,17 +0,0 @@
[www]
user = www-data
group = www-data
listen = /sock/fpm.sock
pm = dynamic
pm.max_children = 5
pm.start_servers = 2
pm.min_spare_servers = 1
pm.max_spare_servers = 3
access.log = /proc/self/fd/2
log_limit = 8192
clear_env = no
catch_workers_output = yes
decorate_workers_output = no

View File

@@ -1,38 +0,0 @@
version: "3"
services:
http:
image: nginx:alpine
ports:
- "8080:80"
volumes:
- "sock:/sock"
- "./config/nginx:/etc/nginx/conf.d:z"
labels:
# the "sock" volume will need to be shared to the same pod, so let's
# declare that this is not a PVC
katenary.io/empty-dirs: sock
# use ./config/nginx as a configMap
katenary.io/configmap-volumes: ./config/nginx
# declare an ingress
katenary.io/ingress: 80
php:
image: php:fpm
volumes:
- "sock:/sock"
- "./config/php/www.conf:/usr/local/etc/php-fpm.d/www.conf:z"
labels:
# fpm will need to use a unix socket shared
# with nginx (http service above), so we want here
# make a single pod containing nginx and php
katenary.io/same-pod: http
# use the ./config/php files as a configMap
katenary.io/configmap-volumes: ./config/php/www.conf
volumes:
sock:

View File

@@ -0,0 +1,37 @@
# shareenv
A Helm chart for shareenv
## Installing the Chart
To install the chart with the release name `my-release`:
```bash
# Standard Helm install
$ helm install my-release shareenv
# To use a custom namespace and force the creation of the namespace
$ helm install my-release --namespace my-namespace --create-namespace shareenv
# To use a custom values file
$ helm install my-release -f my-values.yaml shareenv
```
See the [Helm documentation](https://helm.sh/docs/intro/using_helm/) for more information on installing and managing the chart.
## Configuration
The following table lists the configurable parameters of the shareenv chart and their default values.
| Parameter | Default |
| ----------------------- | -------------- |
| `app1.imagePullPolicy` | `IfNotPresent` |
| `app1.replicas` | `1` |
| `app1.repository.image` | `nginx` |
| `app1.repository.tag` | `1` |
| `app2.imagePullPolicy` | `IfNotPresent` |
| `app2.replicas` | `1` |
| `app2.repository.image` | `nginx` |
| `app2.repository.tag` | `1` |

View File

@@ -0,0 +1,27 @@
Your release is named {{ .Release.Name }}.
To learn more about the release, try:
$ helm -n {{ .Release.Namespace }} status {{ .Release.Name }}
$ helm -n {{ .Release.Namespace }} get all {{ .Release.Name }}
To delete the release, run:
$ helm -n {{ .Release.Namespace }} delete {{ .Release.Name }}
You can see this notes again by running:
$ helm -n {{ .Release.Namespace }} get notes {{ .Release.Name }}
{{- $count := 0 -}}
{{- range $s, $v := .Values -}}
{{- if and $v $v.ingress -}}
{{- $count = add $count 1 -}}
{{- if eq $count 1 }}
The ingress list is:
{{ end }}
- {{ $s }}: http://{{ $v.ingress.host }}{{ $v.ingress.path }}
{{- end -}}
{{ end -}}

View File

@@ -0,0 +1,36 @@
{{- define "shareenv.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- define "shareenv.name" -}}
{{- if .Values.nameOverride -}}
{{- .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- define "shareenv.labels" -}}
{{ include "shareenv.selectorLabels" .}}
{{ if .Chart.Version -}}
{{ printf "katenary.v3/chart-version: %s" .Chart.Version }}
{{- end }}
{{ if .Chart.AppVersion -}}
{{ printf "katenary.v3/app-version: %s" .Chart.AppVersion }}
{{- end }}
{{- end -}}
{{- define "shareenv.selectorLabels" -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{ printf "katenary.v3/name: %s" $name }}
{{ printf "katenary.v3/instance: %s" .Release.Name }}
{{- end -}}

View File

@@ -0,0 +1,37 @@
# somevolumes
A Helm chart for somevolumes
## Installing the Chart
To install the chart with the release name `my-release`:
```bash
# Standard Helm install
$ helm install my-release somevolumes
# To use a custom namespace and force the creation of the namespace
$ helm install my-release --namespace my-namespace --create-namespace somevolumes
# To use a custom values file
$ helm install my-release -f my-values.yaml somevolumes
```
See the [Helm documentation](https://helm.sh/docs/intro/using_helm/) for more information on installing and managing the chart.
## Configuration
The following table lists the configurable parameters of the somevolumes chart and their default values.
| Parameter | Default |
| ----------------------------------------------- | ----------------- |
| `site1.imagePullPolicy` | `IfNotPresent` |
| `site1.persistence.statics.accessMode[0].value` | `ReadWriteOnce` |
| `site1.persistence.statics.enabled` | `true` |
| `site1.persistence.statics.size` | `1Gi` |
| `site1.persistence.statics.storageClass` | `-` |
| `site1.replicas` | `1` |
| `site1.repository.image` | `docker.io/nginx` |
| `site1.repository.tag` | `1` |

View File

@@ -0,0 +1,27 @@
Your release is named {{ .Release.Name }}.
To learn more about the release, try:
$ helm -n {{ .Release.Namespace }} status {{ .Release.Name }}
$ helm -n {{ .Release.Namespace }} get all {{ .Release.Name }}
To delete the release, run:
$ helm -n {{ .Release.Namespace }} delete {{ .Release.Name }}
You can see this notes again by running:
$ helm -n {{ .Release.Namespace }} get notes {{ .Release.Name }}
{{- $count := 0 -}}
{{- range $s, $v := .Values -}}
{{- if and $v $v.ingress -}}
{{- $count = add $count 1 -}}
{{- if eq $count 1 }}
The ingress list is:
{{ end }}
- {{ $s }}: http://{{ $v.ingress.host }}{{ $v.ingress.path }}
{{- end -}}
{{ end -}}

View File

@@ -0,0 +1,36 @@
{{- define "somevolumes.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- define "somevolumes.name" -}}
{{- if .Values.nameOverride -}}
{{- .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- define "somevolumes.labels" -}}
{{ include "somevolumes.selectorLabels" .}}
{{ if .Chart.Version -}}
{{ printf "katenary.v3/chart-version: %s" .Chart.Version }}
{{- end }}
{{ if .Chart.AppVersion -}}
{{ printf "katenary.v3/app-version: %s" .Chart.AppVersion }}
{{- end }}
{{- end -}}
{{- define "somevolumes.selectorLabels" -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{ printf "katenary.v3/name: %s" $name }}
{{ printf "katenary.v3/instance: %s" .Release.Name }}
{{- end -}}

60
generator/chart.go Normal file
View File

@@ -0,0 +1,60 @@
package generator
// Dependency is a dependency of a chart to other charts.
type Dependency struct {
Name string `yaml:"name"`
Version string `yaml:"version"`
Repository string `yaml:"repository"`
Alias string `yaml:"alias,omitempty"`
Values map[string]any `yaml:"-"` // do not export to Chart.yaml
}
// ChartTemplate is a template of a chart. It contains the content of the template and the name of the service.
// This is used internally to generate the templates.
//
// TODO: maybe we can set it private.
type ChartTemplate struct {
Content []byte
Servicename string
}
// HelmChart is a Helm Chart representation. It contains all the
// tempaltes, values, versions, helpers...
type HelmChart struct {
Name string `yaml:"name"`
ApiVersion string `yaml:"apiVersion"`
Version string `yaml:"version"`
AppVersion string `yaml:"appVersion"`
Description string `yaml:"description"`
Dependencies []Dependency `yaml:"dependencies,omitempty"`
Templates map[string]*ChartTemplate `yaml:"-"` // do not export to yaml
Helper string `yaml:"-"` // do not export to yaml
Values map[string]any `yaml:"-"` // do not export to yaml
VolumeMounts map[string]any `yaml:"-"` // do not export to yaml
composeHash *string `yaml:"-"` // do not export to yaml
}
// NewChart creates a new empty chart with the given name.
func NewChart(name string) *HelmChart {
return &HelmChart{
Name: name,
Templates: make(map[string]*ChartTemplate, 0),
Description: "A Helm chart for " + name,
ApiVersion: "v2",
Version: "",
AppVersion: "", // set to 0.1.0 by default if no "main-app" label is found
Values: map[string]any{
"pullSecrets": []string{},
},
}
}
// ConvertOptions are the options to convert a compose project to a helm chart.
type ConvertOptions struct {
Force bool // Force the chart directory deletion if it already exists.
OutputDir string // The output directory of the chart.
Profiles []string // Profile to use for the conversion.
HelmUpdate bool // If true, the "helm dep update" command will be run after the chart generation.
AppVersion *string // Set the chart "appVersion" field. If nil, the version will be set to 0.1.0.
ChartVersion string // Set the chart "version" field.
}

224
generator/configMap.go Normal file
View File

@@ -0,0 +1,224 @@
package generator
import (
"katenary/utils"
"log"
"os"
"path/filepath"
"regexp"
"strings"
"github.com/compose-spec/compose-go/types"
goyaml "gopkg.in/yaml.v3"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/yaml"
)
// only used to check interface implementation
var (
_ DataMap = (*ConfigMap)(nil)
_ Yaml = (*ConfigMap)(nil)
)
// NewFileMap creates a new DataMap from a compose service. The appName is the name of the application taken from the project name.
func NewFileMap(service types.ServiceConfig, appName string, kind string) DataMap {
switch kind {
case "configmap":
return NewConfigMap(service, appName)
default:
log.Fatalf("Unknown filemap kind: %s", kind)
}
return nil
}
// FileMapUsage is the usage of the filemap.
type FileMapUsage uint8
// FileMapUsage constants.
const (
FileMapUsageConfigMap FileMapUsage = iota // pure configmap for key:values.
FileMapUsageFiles // files in a configmap.
)
// ConfigMap is a kubernetes ConfigMap.
// Implements the DataMap interface.
type ConfigMap struct {
*corev1.ConfigMap
service *types.ServiceConfig
usage FileMapUsage
path string
}
// NewConfigMap creates a new ConfigMap from a compose service. The appName is the name of the application taken from the project name.
// The ConfigMap is filled by environment variables and labels "map-env".
func NewConfigMap(service types.ServiceConfig, appName string) *ConfigMap {
done := map[string]bool{}
drop := map[string]bool{}
secrets := []string{}
labelValues := []string{}
cm := &ConfigMap{
service: &service,
ConfigMap: &corev1.ConfigMap{
TypeMeta: metav1.TypeMeta{
Kind: "ConfigMap",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: utils.TplName(service.Name, appName),
Labels: GetLabels(service.Name, appName),
Annotations: Annotations,
},
Data: make(map[string]string),
},
}
// get the secrets from the labels
if v, ok := service.Labels[LABEL_SECRETS]; ok {
err := yaml.Unmarshal([]byte(v), &secrets)
if err != nil {
log.Fatal(err)
}
// drop the secrets from the environment
for _, secret := range secrets {
drop[secret] = true
}
}
// get the label values from the labels
varDescriptons := utils.GetValuesFromLabel(service, LABEL_VALUES)
for value := range varDescriptons {
labelValues = append(labelValues, value)
}
// change the environment variables to the values defined in the values.yaml
for _, value := range labelValues {
if _, ok := service.Environment[value]; !ok {
done[value] = true
continue
}
//val := `{{ tpl .Values.` + service.Name + `.environment.` + value + ` $ }}`
val := utils.TplValue(service.Name, "environment."+value)
service.Environment[value] = &val
}
// remove the variables that are already defined in the environment
if l, ok := service.Labels[LABEL_MAP_ENV]; ok {
envmap := make(map[string]string)
if err := goyaml.Unmarshal([]byte(l), &envmap); err != nil {
log.Fatal("Error parsing map-env", err)
}
for key, value := range envmap {
cm.AddData(key, strings.ReplaceAll(value, "__APP__", appName))
done[key] = true
}
}
for key, env := range service.Environment {
if _, ok := done[key]; ok {
continue
}
if _, ok := drop[key]; ok {
continue
}
cm.AddData(key, *env)
}
return cm
}
// NewConfigMapFromFiles creates a new ConfigMap from a compose service. This path is the path to the
// file or directory. If the path is a directory, all files in the directory are added to the ConfigMap.
// Each subdirectory are ignored. Note that the Generate() function will create the subdirectories ConfigMaps.
func NewConfigMapFromFiles(service types.ServiceConfig, appName string, path string) *ConfigMap {
normalized := path
normalized = strings.TrimLeft(normalized, ".")
normalized = strings.TrimLeft(normalized, "/")
normalized = regexp.MustCompile(`[^a-zA-Z0-9-]+`).ReplaceAllString(normalized, "-")
cm := &ConfigMap{
path: path,
service: &service,
usage: FileMapUsageFiles,
ConfigMap: &corev1.ConfigMap{
TypeMeta: metav1.TypeMeta{
Kind: "ConfigMap",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: utils.TplName(service.Name, appName) + "-" + normalized,
Labels: GetLabels(service.Name, appName),
Annotations: Annotations,
},
Data: make(map[string]string),
},
}
// cumulate the path to the WorkingDir
path = filepath.Join(service.WorkingDir, path)
path = filepath.Clean(path)
cm.AppendDir(path)
return cm
}
// SetData sets the data of the configmap. It replaces the entire data.
func (c *ConfigMap) SetData(data map[string]string) {
c.Data = data
}
// AddData adds a key value pair to the configmap. Append or overwrite the value if the key already exists.
func (c *ConfigMap) AddData(key string, value string) {
c.Data[key] = value
}
// AddFile adds files from given path to the configmap. It is not recursive, to add all files in a directory,
// you need to call this function for each subdirectory.
func (c *ConfigMap) AppendDir(path string) {
// read all files in the path and add them to the configmap
stat, err := os.Stat(path)
if err != nil {
log.Fatalf("Path %s does not exist\n", path)
}
// recursively read all files in the path and add them to the configmap
if stat.IsDir() {
files, err := os.ReadDir(path)
if err != nil {
log.Fatal(err)
}
for _, file := range files {
if file.IsDir() {
continue
}
path := filepath.Join(path, file.Name())
content, err := os.ReadFile(path)
if err != nil {
log.Fatal(err)
}
// remove the path from the file
filename := filepath.Base(path)
c.AddData(filename, string(content))
}
} else {
// add the file to the configmap
content, err := os.ReadFile(path)
if err != nil {
log.Fatal(err)
}
c.AddData(filepath.Base(path), string(content))
}
}
// Filename returns the filename of the configmap. If the configmap is used for files, the filename contains the path.
func (c *ConfigMap) Filename() string {
switch c.usage {
case FileMapUsageFiles:
return filepath.Join(c.service.Name, "statics", c.path, "configmap.yaml")
default:
return c.service.Name + ".configmap.yaml"
}
}
// Yaml returns the yaml representation of the configmap
func (c *ConfigMap) Yaml() ([]byte, error) {
return yaml.Marshal(c)
}

View File

@@ -1,200 +0,0 @@
package generator
import (
"fmt"
"katenary/helm"
"katenary/logger"
"log"
"os"
"strconv"
"strings"
"github.com/compose-spec/compose-go/types"
)
// Generate a container in deployment with all needed objects (volumes, secrets, env, ...).
// The deployName shoud be the name of the deployment, we cannot get it from Metadata as this is a variable name.
func newContainerForDeployment(
deployName, containerName string,
deployment *helm.Deployment,
s *types.ServiceConfig,
fileGeneratorChan HelmFileGenerator) *helm.Container {
buildCrontab(deployName, deployment, s, fileGeneratorChan)
container := helm.NewContainer(containerName, s.Image, s.Environment, s.Labels)
applyEnvMapLabel(s, container)
if secretFile := setSecretVar(containerName, s, container); secretFile != nil {
fileGeneratorChan <- secretFile
container.EnvFrom = append(container.EnvFrom, map[string]map[string]string{
"secretRef": {
"name": secretFile.Metadata().Name,
},
})
}
setEnvToValues(containerName, s, container)
prepareContainer(container, s, containerName)
prepareEnvFromFiles(deployName, s, container, fileGeneratorChan)
// add the container in deployment
if deployment.Spec.Template.Spec.Containers == nil {
deployment.Spec.Template.Spec.Containers = make([]*helm.Container, 0)
}
deployment.Spec.Template.Spec.Containers = append(
deployment.Spec.Template.Spec.Containers,
container,
)
// add the volumes
if deployment.Spec.Template.Spec.Volumes == nil {
deployment.Spec.Template.Spec.Volumes = make([]map[string]interface{}, 0)
}
// manage LABEL_VOLUMEFROM
addVolumeFrom(deployment, container, s)
// and then we can add other volumes
deployment.Spec.Template.Spec.Volumes = append(
deployment.Spec.Template.Spec.Volumes,
prepareVolumes(deployName, containerName, s, container, fileGeneratorChan)...,
)
// add init containers
if deployment.Spec.Template.Spec.InitContainers == nil {
deployment.Spec.Template.Spec.InitContainers = make([]*helm.Container, 0)
}
deployment.Spec.Template.Spec.InitContainers = append(
deployment.Spec.Template.Spec.InitContainers,
prepareInitContainers(containerName, s, container)...,
)
// check if there is containerPort assigned in label, add it, and do
// not create service for this.
if ports, ok := s.Labels[helm.LABEL_CONTAINER_PORT]; ok {
for _, port := range strings.Split(ports, ",") {
func(port string, container *helm.Container, s *types.ServiceConfig) {
port = strings.TrimSpace(port)
if port == "" {
return
}
portNumber, err := strconv.Atoi(port)
if err != nil {
return
}
// avoid already declared ports
for _, p := range s.Ports {
if int(p.Target) == portNumber {
return
}
}
container.Ports = append(container.Ports, &helm.ContainerPort{
Name: deployName + "-" + port,
ContainerPort: portNumber,
})
}(port, container, s)
}
}
return container
}
// prepareContainer assigns image, command, env, and labels to a container.
func prepareContainer(container *helm.Container, service *types.ServiceConfig, servicename string) {
// if there is no image name, this should fail!
if service.Image == "" {
log.Fatal(ICON_PACKAGE+" No image name for service ", servicename)
}
// Get the image tag
imageParts := strings.Split(service.Image, ":")
tag := ""
if len(imageParts) == 2 {
container.Image = imageParts[0]
tag = imageParts[1]
}
vtag := ".Values." + servicename + ".repository.tag"
container.Image = `{{ .Values.` + servicename + `.repository.image }}` +
`{{ if ne ` + vtag + ` "" }}:{{ ` + vtag + ` }}{{ end }}`
container.Command = service.Command
AddValues(servicename, map[string]EnvVal{
"repository": map[string]EnvVal{
"image": imageParts[0],
"tag": tag,
},
})
prepareProbes(servicename, service, container)
generateContainerPorts(service, servicename, container)
}
// generateContainerPorts add the container ports of a service.
func generateContainerPorts(s *types.ServiceConfig, name string, container *helm.Container) {
exists := make(map[int]string)
for _, port := range s.Ports {
portName := name
for _, n := range exists {
if name == n {
portName = fmt.Sprintf("%s-%d", name, port.Target)
}
}
container.Ports = append(container.Ports, &helm.ContainerPort{
Name: portName,
ContainerPort: int(port.Target),
})
exists[int(port.Target)] = name
}
// manage the "expose" section to be a NodePort in Kubernetes
for _, expose := range s.Expose {
port, _ := strconv.Atoi(expose)
if _, exist := exists[port]; exist {
continue
}
container.Ports = append(container.Ports, &helm.ContainerPort{
Name: name,
ContainerPort: port,
})
}
}
// prepareInitContainers add the init containers of a service.
func prepareInitContainers(name string, s *types.ServiceConfig, container *helm.Container) []*helm.Container {
// We need to detect others services, but we probably not have parsed them yet, so
// we will wait for them for a while.
initContainers := make([]*helm.Container, 0)
for dp := range s.DependsOn {
c := helm.NewContainer("check-"+dp, "busybox", nil, s.Labels)
command := strings.ReplaceAll(strings.TrimSpace(dependScript), "__service__", dp)
foundPort := -1
locker.Lock()
if defaultPort, ok := servicesMap[dp]; !ok {
logger.Redf("Error while getting port for service %s\n", dp)
os.Exit(1)
} else {
foundPort = defaultPort
}
locker.Unlock()
if foundPort == -1 {
log.Fatalf(
"ERROR, the %s service is waiting for %s port number, "+
"but it is never discovered. You must declare at least one port in "+
"the \"ports\" section of the service in the docker-compose file",
name,
dp,
)
}
command = strings.ReplaceAll(command, "__port__", strconv.Itoa(foundPort))
c.Command = []string{
"sh",
"-c",
command,
}
initContainers = append(initContainers, c)
}
return initContainers
}

638
generator/converter.go Normal file
View File

@@ -0,0 +1,638 @@
package generator
import (
"bytes"
"errors"
"fmt"
"katenary/generator/extrafiles"
"katenary/parser"
"katenary/utils"
"log"
"os"
"os/exec"
"path/filepath"
"regexp"
"strings"
"time"
"github.com/compose-spec/compose-go/types"
goyaml "gopkg.in/yaml.v3"
)
const headerHelp = `# This file is autogenerated by katenary
#
# DO NOT EDIT IT BY HAND UNLESS YOU KNOW WHAT YOU ARE DOING
#
# If you want to change the content of this file, you should edit the
# compose file and run katenary again.
# If you need to override some values, you can do it in a override file
# and use the -f flag to specify it when running the helm command.
`
// Convert a compose (docker, podman...) project to a helm chart.
// It calls Generate() to generate the chart and then write it to the disk.
func Convert(config ConvertOptions, dockerComposeFile ...string) {
var (
templateDir = filepath.Join(config.OutputDir, "templates")
helpersPath = filepath.Join(config.OutputDir, "templates", "_helpers.tpl")
chartPath = filepath.Join(config.OutputDir, "Chart.yaml")
valuesPath = filepath.Join(config.OutputDir, "values.yaml")
readmePath = filepath.Join(config.OutputDir, "README.md")
notesPath = filepath.Join(templateDir, "NOTES.txt")
)
// the current working directory is the directory
currentDir, _ := os.Getwd()
// go to the root of the project
if err := os.Chdir(filepath.Dir(dockerComposeFile[0])); err != nil {
fmt.Println(utils.IconFailure, err)
os.Exit(1)
}
defer os.Chdir(currentDir) // after the generation, go back to the original directory
// repove the directory part of the docker-compose files
for i, f := range dockerComposeFile {
dockerComposeFile[i] = filepath.Base(f)
}
// parse the compose files
project, err := parser.Parse(config.Profiles, dockerComposeFile...)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
// check older version of labels
if err := checkOldLabels(project); err != nil {
fmt.Println(utils.IconFailure, err)
os.Exit(1)
}
if !config.Force {
// check if the chart directory exists
// if yes, prevent the user from overwriting it and ask for confirmation
if _, err := os.Stat(config.OutputDir); err == nil {
fmt.Print(utils.IconWarning, " The chart directory "+config.OutputDir+" already exists, do you want to overwrite it? [y/N] ")
var answer string
fmt.Scanln(&answer)
if strings.ToLower(answer) != "y" {
fmt.Println("Aborting")
os.Exit(126) // 126 is the exit code for "Command invoked cannot execute"
}
}
fmt.Println() // clean line
}
// Build the objects !
chart, err := Generate(project)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
// if the app version is set from the command line, use it
if config.AppVersion != nil {
chart.AppVersion = *config.AppVersion
}
chart.Version = config.ChartVersion
// remove the chart directory if it exists
os.RemoveAll(config.OutputDir)
// create the chart directory
if err := os.MkdirAll(templateDir, 0755); err != nil {
fmt.Println(utils.IconFailure, err)
os.Exit(1)
}
for name, template := range chart.Templates {
t := template.Content
t = removeNewlinesInsideBrackets(t)
t = removeUnwantedLines(t)
t = addModeline(t)
kind := utils.GetKind(name)
var icon utils.Icon
switch kind {
case "deployment":
icon = utils.IconPackage
case "service":
icon = utils.IconPlug
case "ingress":
icon = utils.IconWorld
case "volumeclaim":
icon = utils.IconCabinet
case "configmap":
icon = utils.IconConfig
case "secret":
icon = utils.IconSecret
default:
icon = utils.IconInfo
}
servicename := template.Servicename
if err := os.MkdirAll(filepath.Join(templateDir, servicename), 0755); err != nil {
fmt.Println(utils.IconFailure, err)
os.Exit(1)
}
fmt.Println(icon, "Creating", kind, servicename)
// if the name is a path, create the directory
if strings.Contains(name, string(filepath.Separator)) {
name = filepath.Join(templateDir, name)
err := os.MkdirAll(filepath.Dir(name), 0755)
if err != nil {
fmt.Println(utils.IconFailure, err)
os.Exit(1)
}
} else {
// remove the serivce name from the template name
name = strings.Replace(name, servicename+".", "", 1)
name = filepath.Join(templateDir, servicename, name)
}
f, err := os.Create(name)
if err != nil {
fmt.Println(utils.IconFailure, err)
os.Exit(1)
}
f.Write(t)
f.Close()
}
// calculate the sha1 hash of the services
buf := bytes.NewBuffer(nil)
encoder := goyaml.NewEncoder(buf)
encoder.SetIndent(2)
if err := encoder.Encode(chart); err != nil {
fmt.Println(err)
os.Exit(1)
}
yamlChart := buf.Bytes()
if err != nil {
fmt.Println(err)
os.Exit(1)
}
// concat chart adding a comment with hash of services on top
yamlChart = append([]byte(fmt.Sprintf("# compose hash (sha1): %s\n", *chart.composeHash)), yamlChart...)
// add the list of compose files
files := []string{}
for _, file := range project.ComposeFiles {
base := filepath.Base(file)
files = append(files, base)
}
yamlChart = append([]byte(fmt.Sprintf("# compose files: %s\n", strings.Join(files, ", "))), yamlChart...)
// add generated date
yamlChart = append([]byte(fmt.Sprintf("# generated at: %s\n", time.Now().Format(time.RFC3339))), yamlChart...)
// document Chart.yaml file
yamlChart = addChartDoc(yamlChart, project)
f, err := os.Create(chartPath)
if err != nil {
fmt.Println(utils.IconFailure, err)
os.Exit(1)
}
f.Write(yamlChart)
f.Close()
buf.Reset()
encoder = goyaml.NewEncoder(buf)
encoder.SetIndent(2)
if err = encoder.Encode(&chart.Values); err != nil {
fmt.Println(err)
os.Exit(1)
}
values := buf.Bytes()
values = addDescriptions(values, *project)
values = addDependencyDescription(values, chart.Dependencies)
values = addCommentsToValues(values)
values = addStorageClassHelp(values)
values = addImagePullSecretsHelp(values)
values = addImagePullPolicyHelp(values)
values = addVariablesDoc(values, project)
values = addMainTagAppDoc(values, project)
values = append([]byte(headerHelp), values...)
f, err = os.Create(valuesPath)
if err != nil {
fmt.Println(utils.IconFailure, err)
os.Exit(1)
}
f.Write(values)
f.Close()
f, err = os.Create(helpersPath)
if err != nil {
fmt.Println(utils.IconFailure, err)
os.Exit(1)
}
f.Write([]byte(chart.Helper))
f.Close()
readme := extrafiles.ReadMeFile(chart.Name, chart.Description, chart.Values)
f, err = os.Create(readmePath)
if err != nil {
fmt.Println(utils.IconFailure, err)
os.Exit(1)
}
f.Write([]byte(readme))
f.Close()
notes := extrafiles.NotesFile()
f, err = os.Create(notesPath)
if err != nil {
fmt.Println(utils.IconFailure, err)
os.Exit(1)
}
f.Write([]byte(notes))
f.Close()
if config.HelmUpdate {
if err := helmUpdate(config); err != nil {
fmt.Println(utils.IconFailure, err)
os.Exit(1)
} else if err := helmLint(config); err != nil {
fmt.Println(utils.IconFailure, err)
os.Exit(1)
} else {
fmt.Println(utils.IconSuccess, "Helm chart created successfully")
}
}
}
const ingressClassHelp = `# Default value for ingress.class annotation
# class: "-"
# If the value is "-", controller will not set ingressClassName
# If the value is "", Ingress will be set to an empty string, so
# controller will use the default value for ingressClass
# If the value is specified, controller will set the named class e.g. "nginx"
# More info: https://kubernetes.io/docs/concepts/services-networking/ingress/#the-ingress-resource
`
func addCommentsToValues(values []byte) []byte {
lines := strings.Split(string(values), "\n")
for i, line := range lines {
if strings.Contains(line, "ingress:") {
spaces := utils.CountStartingSpaces(line)
spacesString := strings.Repeat(" ", spaces)
// indent ingressClassHelper comment
ingressClassHelp := strings.ReplaceAll(ingressClassHelp, "\n", "\n"+spacesString)
ingressClassHelp = strings.TrimRight(ingressClassHelp, " ")
ingressClassHelp = spacesString + ingressClassHelp
lines[i] = ingressClassHelp + line
}
}
return []byte(strings.Join(lines, "\n"))
}
const storageClassHelp = `# Storage class to use for PVCs
# storageClass: "-" means use default
# storageClass: "" means do not specify
# storageClass: "foo" means use that storageClass
# More info: https://kubernetes.io/docs/concepts/storage/storage-classes/
`
// addStorageClassHelp adds a comment to the values.yaml file to explain how to
// use the storageClass option.
func addStorageClassHelp(values []byte) []byte {
lines := strings.Split(string(values), "\n")
for i, line := range lines {
if strings.Contains(line, "storageClass:") {
spaces := utils.CountStartingSpaces(line)
spacesString := strings.Repeat(" ", spaces)
// indent ingressClassHelper comment
storageClassHelp := strings.ReplaceAll(storageClassHelp, "\n", "\n"+spacesString)
storageClassHelp = strings.TrimRight(storageClassHelp, " ")
storageClassHelp = spacesString + storageClassHelp
lines[i] = storageClassHelp + line
}
}
return []byte(strings.Join(lines, "\n"))
}
// addModeline adds a modeline to the values.yaml file to make sure that vim
// will use the correct syntax highlighting.
func addModeline(values []byte) []byte {
modeline := "# vi" + "m: ft=gotmpl.yaml"
// if the values ends by `{{- end }}` we need to add the modeline before
lines := strings.Split(string(values), "\n")
if lines[len(lines)-1] == "{{- end }}" || lines[len(lines)-1] == "{{- end -}}" {
lines = lines[:len(lines)-1]
lines = append(lines, modeline, "{{- end }}")
return []byte(strings.Join(lines, "\n"))
}
return append(values, []byte(modeline)...)
}
// addDescriptions adds the description from the label to the values.yaml file on top
// of the service definition.
func addDescriptions(values []byte, project types.Project) []byte {
for _, service := range project.Services {
if description, ok := service.Labels[LABEL_DESCRIPTION]; ok {
// set it as comment
description = "\n# " + strings.ReplaceAll(description, "\n", "\n# ")
values = regexp.MustCompile(
`(?m)^`+service.Name+`:$`,
).ReplaceAll(values, []byte(description+"\n"+service.Name+":"))
} else {
// set it as comment
description = "\n# " + service.Name + " configuration"
values = regexp.MustCompile(
`(?m)^`+service.Name+`:$`,
).ReplaceAll(
values,
[]byte(description+"\n"+service.Name+":"),
)
}
}
return values
}
func addDependencyDescription(values []byte, dependencies []Dependency) []byte {
for _, d := range dependencies {
name := d.Name
if d.Alias != "" {
name = d.Alias
}
values = regexp.MustCompile(
`(?m)^`+name+`:$`,
).ReplaceAll(
values,
[]byte("\n# "+d.Name+" helm dependency configuration\n"+name+":"),
)
}
return values
}
const imagePullSecretHelp = `
# imagePullSecrets allows you to specify a name of an image pull secret.
# You must provide a list of object with the name field set to the name of the
# e.g.
# pullSecrets:
# - name: regcred
# You are, for now, repsonsible for creating the secret.
# More info: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
`
func addImagePullSecretsHelp(values []byte) []byte {
// add imagePullSecrets help
lines := strings.Split(string(values), "\n")
for i, line := range lines {
if strings.Contains(line, "pullSecrets:") {
spaces := utils.CountStartingSpaces(line)
spacesString := strings.Repeat(" ", spaces)
// indent imagePullSecretHelp comment
imagePullSecretHelp := strings.ReplaceAll(imagePullSecretHelp, "\n", "\n"+spacesString)
imagePullSecretHelp = strings.TrimRight(imagePullSecretHelp, " ")
imagePullSecretHelp = spacesString + imagePullSecretHelp
lines[i] = imagePullSecretHelp + line
}
}
return []byte(strings.Join(lines, "\n"))
}
func addChartDoc(values []byte, project *types.Project) []byte {
chartDoc := fmt.Sprintf(`# This is the main values.yaml file for the %s chart.
# More information can be found in the chart's README.md file.
#
`, project.Name)
lines := strings.Split(string(values), "\n")
for i, line := range lines {
if regexp.MustCompile(`(?m)^name:`).MatchString(line) {
doc := fmt.Sprintf("\n# Name of the chart (required), basically the name of the project.\n")
lines[i] = doc + line
} else if regexp.MustCompile(`(?m)^version:`).MatchString(line) {
doc := fmt.Sprintf("\n# Version of the chart (required)\n")
lines[i] = doc + line
} else if strings.Contains(line, "appVersion:") {
spaces := utils.CountStartingSpaces(line)
doc := fmt.Sprintf(
"\n%s# Version of the application (required).\n%s# This should be the main application version.\n",
strings.Repeat(" ", spaces),
strings.Repeat(" ", spaces),
)
lines[i] = doc + line
} else if strings.Contains(line, "dependencies:") {
spaces := utils.CountStartingSpaces(line)
doc := fmt.Sprintf("\n"+
"%s# Dependencies are external charts that this chart will depend on.\n"+
"%s# More information can be found in the chart's README.md file.\n",
strings.Repeat(" ", spaces),
strings.Repeat(" ", spaces),
)
lines[i] = doc + line
}
}
return []byte(chartDoc + strings.Join(lines, "\n"))
}
const imagePullPolicyHelp = `# imagePullPolicy allows you to specify a policy to cache or always pull an image.
# You must provide a string value with one of the following values:
# - Always -> will always pull the image
# - Never -> will never pull the image, the image should be present on the node
# - IfNotPresent -> will pull the image only if it is not present on the node
# More info: https://kubernetes.io/docs/concepts/containers/images/#updating-images
`
func addImagePullPolicyHelp(values []byte) []byte {
// add imagePullPolicy help
lines := strings.Split(string(values), "\n")
for i, line := range lines {
if strings.Contains(line, "imagePullPolicy:") {
spaces := utils.CountStartingSpaces(line)
spacesString := strings.Repeat(" ", spaces)
// indent imagePullPolicyHelp comment
imagePullPolicyHelp := strings.ReplaceAll(imagePullPolicyHelp, "\n", "\n"+spacesString)
imagePullPolicyHelp = strings.TrimRight(imagePullPolicyHelp, " ")
imagePullPolicyHelp = spacesString + imagePullPolicyHelp
lines[i] = imagePullPolicyHelp + line
}
}
return []byte(strings.Join(lines, "\n"))
}
func addVariablesDoc(values []byte, project *types.Project) []byte {
lines := strings.Split(string(values), "\n")
currentService := ""
for _, service := range project.Services {
variables := utils.GetValuesFromLabel(service, LABEL_VALUES)
for i, line := range lines {
if regexp.MustCompile(`(?m)^` + service.Name + `:`).MatchString(line) {
currentService = service.Name
}
for varname, variable := range variables {
if variable == nil {
continue
}
spaces := utils.CountStartingSpaces(line)
if regexp.MustCompile(`(?m)\s*`+varname+`:`).MatchString(line) && currentService == service.Name {
// add # to the beginning of the Description
doc := strings.ReplaceAll("\n"+variable.Description, "\n", "\n"+strings.Repeat(" ", spaces)+"# ")
doc = strings.TrimRight(doc, " ")
doc += "\n" + line
lines[i] = doc
}
}
}
}
return []byte(strings.Join(lines, "\n"))
}
const mainTagAppDoc = `This is the version of the main application.
Leave it to blank to use the Chart "AppVersion" value.`
func addMainTagAppDoc(values []byte, project *types.Project) []byte {
lines := strings.Split(string(values), "\n")
for _, service := range project.Services {
inService := false
inRegistry := false
// read the label LabelMainApp
if v, ok := service.Labels[LABEL_MAIN_APP]; !ok {
continue
} else if v == "false" || v == "no" || v == "0" {
continue
} else {
fmt.Printf("%s Adding main tag app doc %s\n", utils.IconConfig, service.Name)
}
for i, line := range lines {
if regexp.MustCompile(`^` + service.Name + `:`).MatchString(line) {
inService = true
}
if inService && regexp.MustCompile(`^\s*repository:.*`).MatchString(line) {
inRegistry = true
}
if inService && inRegistry {
if regexp.MustCompile(`^\s*tag: .*`).MatchString(line) {
spaces := utils.CountStartingSpaces(line)
doc := strings.ReplaceAll(mainTagAppDoc, "\n", "\n"+strings.Repeat(" ", spaces)+"# ")
doc = strings.Repeat(" ", spaces) + "# " + doc
lines[i] = doc + "\n" + line + "\n"
break
}
}
}
}
return []byte(strings.Join(lines, "\n"))
}
func removeNewlinesInsideBrackets(values []byte) []byte {
re, err := regexp.Compile(`(?s)\{\{(.*?)\}\}`)
if err != nil {
log.Fatal(err)
}
return re.ReplaceAllFunc(values, func(b []byte) []byte {
// get the first match
matches := re.FindSubmatch(b)
replacement := bytes.ReplaceAll(matches[1], []byte("\n"), []byte(" "))
// remove repeated spaces
replacement = regexp.MustCompile(`\s+`).ReplaceAll(replacement, []byte(" "))
// remove newlines inside brackets
return bytes.ReplaceAll(b, matches[1], replacement)
})
}
var unwantedLines = []string{
"creationTimestamp:",
"status:",
}
func removeUnwantedLines(values []byte) []byte {
lines := strings.Split(string(values), "\n")
output := []string{}
for _, line := range lines {
next := false
for _, unwanted := range unwantedLines {
if strings.Contains(line, unwanted) {
next = true
}
}
if !next {
output = append(output, line)
}
}
return []byte(strings.Join(output, "\n"))
}
// check if the project makes use of older labels (kanetary.[^v3])
func checkOldLabels(project *types.Project) error {
badServices := make([]string, 0)
for _, service := range project.Services {
for label := range service.Labels {
if strings.Contains(label, "katenary.") && !strings.Contains(label, KATENARY_PREFIX) {
badServices = append(badServices, fmt.Sprintf("- %s: %s", service.Name, label))
}
}
}
if len(badServices) > 0 {
message := fmt.Sprintf(` Old labels detected in project "%s".
The current version of katenary uses labels with the prefix "%s" which are not compatible with previous versions.
Your project is not compatible with this version.
Please upgrade your labels to follow the current version
Services to upgrade:
%s`,
project.Name,
KATENARY_PREFIX[0:len(KATENARY_PREFIX)-1],
strings.Join(badServices, "\n"),
)
return errors.New(utils.WordWrap(message, 80))
}
return nil
}
func helmUpdate(config ConvertOptions) error {
// lookup for "helm" binary
fmt.Println(utils.IconInfo, "Updating helm dependencies...")
helm, err := exec.LookPath("helm")
if err != nil {
fmt.Println(utils.IconFailure, err)
os.Exit(1)
}
// run "helm dependency update"
cmd := exec.Command(helm, "dependency", "update", config.OutputDir)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
return cmd.Run()
}
func helmLint(config ConvertOptions) error {
fmt.Println(utils.IconInfo, "Linting...")
helm, err := exec.LookPath("helm")
if err != nil {
fmt.Println(utils.IconFailure, err)
os.Exit(1)
}
cmd := exec.Command(helm, "lint", config.OutputDir)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
return cmd.Run()
}

133
generator/cronJob.go Normal file
View File

@@ -0,0 +1,133 @@
package generator
import (
"katenary/utils"
"log"
"strings"
"github.com/compose-spec/compose-go/types"
goyaml "gopkg.in/yaml.v3"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/yaml"
)
// only used to check interface implementation
var (
_ Yaml = (*CronJob)(nil)
)
// CronJob is a kubernetes CronJob.
type CronJob struct {
*batchv1.CronJob
service *types.ServiceConfig
}
// NewCronJob creates a new CronJob from a compose service. The appName is the name of the application taken from the project name.
func NewCronJob(service types.ServiceConfig, chart *HelmChart, appName string) (*CronJob, *RBAC) {
var labels, ok = service.Labels[LABEL_CRONJOB]
if !ok {
return nil, nil
}
mapping := struct {
Image string `yaml:"image,omitempty"`
Command string `yaml:"command"`
Schedule string `yaml:"schedule"`
Rbac bool `yaml:"rbac"`
}{
Image: "",
Command: "",
Schedule: "",
Rbac: false,
}
if err := goyaml.Unmarshal([]byte(labels), &mapping); err != nil {
log.Fatalf("Error parsing cronjob labels: %s", err)
return nil, nil
}
if _, ok := chart.Values[service.Name]; !ok {
chart.Values[service.Name] = NewValue(service, false)
}
if chart.Values[service.Name].(*Value).CronJob == nil {
chart.Values[service.Name].(*Value).CronJob = &CronJobValue{}
}
chart.Values[service.Name].(*Value).CronJob.Schedule = mapping.Schedule
chart.Values[service.Name].(*Value).CronJob.ImagePullPolicy = "IfNotPresent"
chart.Values[service.Name].(*Value).CronJob.Environment = map[string]any{}
image, tag := mapping.Image, ""
if image == "" { // if image is not set, use the image from the service
image = service.Image
}
if strings.Contains(image, ":") {
image = strings.Split(service.Image, ":")[0]
tag = strings.Split(service.Image, ":")[1]
}
chart.Values[service.Name].(*Value).CronJob.Repository = &RepositoryValue{
Image: image,
Tag: tag,
}
cronjob := &CronJob{
CronJob: &batchv1.CronJob{
TypeMeta: metav1.TypeMeta{
Kind: "CronJob",
APIVersion: "batch/v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: utils.TplName(service.Name, appName),
Labels: GetLabels(service.Name, appName),
Annotations: Annotations,
},
Spec: batchv1.CronJobSpec{
Schedule: "{{ .Values." + service.Name + ".cronjob.schedule }}",
JobTemplate: batchv1.JobTemplateSpec{
Spec: batchv1.JobSpec{
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: "cronjob",
Image: "{{ .Values." + service.Name + ".cronjob.repository.image }}:{{ default .Values." + service.Name + ".cronjob.repository.tag \"latest\" }}",
Command: []string{
"sh",
"-c",
mapping.Command,
},
},
},
},
},
},
},
},
},
service: &service,
}
var rbac *RBAC
if mapping.Rbac {
rbac = NewRBAC(service, appName)
// add the service account to the cronjob
cronjob.Spec.JobTemplate.Spec.Template.Spec.ServiceAccountName = utils.TplName(service.Name, appName)
}
return cronjob, rbac
}
// Filename returns the filename of the cronjob.
//
// Implements the Yaml interface.
func (c *CronJob) Filename() string {
return c.service.Name + ".cronjob.yaml"
}
// Yaml returns the yaml representation of the cronjob.
//
// Implements the Yaml interface.
func (c *CronJob) Yaml() ([]byte, error) {
return yaml.Marshal(c)
}

View File

@@ -1,110 +0,0 @@
package generator
import (
"fmt"
"katenary/helm"
"katenary/logger"
"log"
"github.com/alessio/shellescape"
"github.com/compose-spec/compose-go/types"
"gopkg.in/yaml.v3"
)
const (
cronMulti = `pods=$(kubectl get pods --selector=%s/component=%s,%s/resource=deployment -o jsonpath='{.items[*].metadata.name}')`
cronMultiCmd = `
for pod in $pods; do
kubectl exec -i $pod -c %s -- sh -c %s
done`
cronSingle = `pod=$(kubectl get pods --selector=%s/component=%s,%s/resource=deployment -o jsonpath='{.items[0].metadata.name}')`
cronCmd = `
kubectl exec -i $pod -c %s -- sh -c %s`
)
type CronDef struct {
Command string `yaml:"command"`
Schedule string `yaml:"schedule"`
Image string `yaml:"image"`
Multi bool `yaml:"allPods,omitempty"`
}
func buildCrontab(deployName string, deployment *helm.Deployment, s *types.ServiceConfig, fileGeneratorChan HelmFileGenerator) {
// get the cron label from the service
var crondef string
var ok bool
if crondef, ok = s.Labels[helm.LABEL_CRON]; !ok {
return
}
// parse yaml
crons := []CronDef{}
err := yaml.Unmarshal([]byte(crondef), &crons)
if err != nil {
log.Fatalf("error: %v", err)
}
if len(crons) == 0 {
return
}
// create a serviceAccount
sa := helm.NewServiceAccount(deployName)
// create a role
role := helm.NewCronRole(deployName)
// create a roleBinding
roleBinding := helm.NewRoleBinding(deployName, sa, role)
// make generation
logger.Magenta(ICON_RBAC, "Generating ServiceAccount, Role and RoleBinding for cron jobs", deployName)
fileGeneratorChan <- sa
fileGeneratorChan <- role
fileGeneratorChan <- roleBinding
numcron := len(crons) - 1
index := 1
// create crontabs
for _, cron := range crons {
escaped := shellescape.Quote(cron.Command)
var cmd, podget string
if cron.Multi {
podget = cronMulti
cmd = cronMultiCmd
} else {
podget = cronSingle
cmd = cronCmd
}
podget = fmt.Sprintf(podget, helm.K, deployName, helm.K)
cmd = fmt.Sprintf(cmd, s.Name, escaped)
cmd = podget + cmd
if cron.Image == "" {
cron.Image = `bitnami/kubectl:{{ printf "%s.%s" .Capabilities.KubeVersion.Major .Capabilities.KubeVersion.Minor }}`
}
name := deployName
if numcron > 0 {
name = fmt.Sprintf("%s-%d", deployName, index)
}
// add crontab
suffix := ""
if numcron > 0 {
suffix = fmt.Sprintf("%d", index)
}
cronTab := helm.NewCrontab(
name,
cron.Image,
cmd,
cron.Schedule,
sa,
)
logger.Magenta(ICON_CRON, "Generating crontab", deployName, suffix)
fileGeneratorChan <- cronTab
index++
}
return
}

View File

@@ -1,70 +1,569 @@
package generator
import (
"katenary/helm"
"katenary/logger"
"fmt"
"katenary/utils"
"log"
"os"
"path/filepath"
"regexp"
"strings"
"time"
"github.com/compose-spec/compose-go/types"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/yaml"
)
// This function will try to yied deployment and services based on a service from the compose file structure.
func buildDeployment(name string, s *types.ServiceConfig, linked map[string]types.ServiceConfig, fileGeneratorChan HelmFileGenerator) {
var _ Yaml = (*Deployment)(nil)
logger.Magenta(ICON_PACKAGE+" Generating deployment for ", name)
deployment := helm.NewDeployment(name)
newContainerForDeployment(name, name, deployment, s, fileGeneratorChan)
// Add selectors
selectors := buildSelector(name, s)
selectors[helm.K+"/resource"] = "deployment"
deployment.Spec.Selector = map[string]interface{}{
"matchLabels": selectors,
// Deployment is a kubernetes Deployment.
type Deployment struct {
*appsv1.Deployment `yaml:",inline"`
chart *HelmChart `yaml:"-"`
configMaps map[string]bool `yaml:"-"`
service *types.ServiceConfig `yaml:"-"`
defaultTag string `yaml:"-"`
isMainApp bool `yaml:"-"`
}
deployment.Spec.Template.Metadata.Labels = selectors
// Now, the linked services (same pod)
for lname, link := range linked {
newContainerForDeployment(name, lname, deployment, &link, fileGeneratorChan)
// append ports and expose ports to the deployment,
// to be able to generate them in the Service file
if len(link.Ports) > 0 || len(link.Expose) > 0 {
s.Ports = append(s.Ports, link.Ports...)
s.Expose = append(s.Expose, link.Expose...)
// NewDeployment creates a new Deployment from a compose service. The appName is the name of the application taken from the project name.
// It also creates the Values map that will be used to create the values.yaml file.
func NewDeployment(service types.ServiceConfig, chart *HelmChart) *Deployment {
ports := []corev1.ContainerPort{}
for _, port := range service.Ports {
ports = append(ports, corev1.ContainerPort{
ContainerPort: int32(port.Target),
})
}
isMainApp := false
if mainLabel, ok := service.Labels[LABEL_MAIN_APP]; ok {
main := strings.ToLower(mainLabel)
isMainApp = main == "true" || main == "yes" || main == "1"
}
defaultTag := `default "latest"`
if isMainApp {
defaultTag = `default .Chart.AppVersion "latest"`
}
chart.Values[service.Name] = NewValue(service, isMainApp)
appName := chart.Name
dep := &Deployment{
isMainApp: isMainApp,
defaultTag: defaultTag,
service: &service,
chart: chart,
Deployment: &appsv1.Deployment{
TypeMeta: metav1.TypeMeta{
Kind: "Deployment",
APIVersion: "apps/v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: utils.TplName(service.Name, appName),
Labels: GetLabels(service.Name, appName),
Annotations: Annotations,
},
Spec: appsv1.DeploymentSpec{
Replicas: utils.Int32Ptr(1),
Selector: &metav1.LabelSelector{
MatchLabels: GetMatchLabels(service.Name, appName),
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: GetMatchLabels(service.Name, appName),
},
},
},
},
configMaps: map[string]bool{},
}
// add containers
dep.AddContainer(service)
// add volumes
dep.AddVolumes(service, appName)
if service.Environment != nil {
dep.SetEnvFrom(service, appName)
}
return dep
}
// DependsOn adds a initContainer to the deployment that will wait for the service to be up.
func (d *Deployment) DependsOn(to *Deployment) error {
// Add a initContainer with busybox:latest using netcat to check if the service is up
// it will wait until the service responds to all ports
for _, container := range to.Spec.Template.Spec.Containers {
commands := []string{}
for _, port := range container.Ports {
command := fmt.Sprintf("until nc -z %s %d; do\n sleep 1;\ndone", to.Name, port.ContainerPort)
commands = append(commands, command)
}
command := []string{"/bin/sh", "-c", strings.Join(commands, "\n")}
d.Spec.Template.Spec.InitContainers = append(d.Spec.Template.Spec.InitContainers, corev1.Container{
Name: "wait-for-" + to.service.Name,
Image: "busybox:latest",
Command: command,
})
}
return nil
}
// AddContainer adds a container to the deployment.
func (d *Deployment) AddContainer(service types.ServiceConfig) {
ports := []corev1.ContainerPort{}
for _, port := range service.Ports {
name := utils.GetServiceNameByPort(int(port.Target))
if name == "" {
utils.Warn("Port name not found for port ", port.Target, " in service ", service.Name, ". Using port number instead")
}
ports = append(ports, corev1.ContainerPort{
ContainerPort: int32(port.Target),
Name: name,
})
}
container := corev1.Container{
Image: utils.TplValue(service.Name, "repository.image") + ":" +
utils.TplValue(service.Name, "repository.tag", d.defaultTag),
Ports: ports,
Name: service.Name,
ImagePullPolicy: corev1.PullIfNotPresent,
}
if _, ok := d.chart.Values[service.Name]; !ok {
d.chart.Values[service.Name] = NewValue(service, d.isMainApp)
}
d.chart.Values[service.Name].(*Value).ImagePullPolicy = string(corev1.PullIfNotPresent)
// add an imagePullSecret, it actually does not work because the secret is not
// created but it add the reference in the YAML file. We'll change it in Yaml()
// method.
d.Spec.Template.Spec.ImagePullSecrets = []corev1.LocalObjectReference{{
Name: `{{ .Values.pullSecrets | toYaml | indent __indent__ }}`,
}}
d.AddHealthCheck(service, &container)
d.Spec.Template.Spec.Containers = append(d.Spec.Template.Spec.Containers, container)
}
// AddIngress adds an ingress to the deployment. It creates the ingress object.
func (d *Deployment) AddIngress(service types.ServiceConfig, appName string) *Ingress {
return NewIngress(service, d.chart)
}
// AddVolumes adds a volume to the deployment. It does not create the PVC, it only adds the volumes to the deployment.
// If the volume is a bind volume it will warn the user that it is not supported yet.
func (d *Deployment) AddVolumes(service types.ServiceConfig, appName string) {
tobind := map[string]bool{}
if v, ok := service.Labels[LABEL_CM_FILES]; ok {
binds := []string{}
if err := yaml.Unmarshal([]byte(v), &binds); err != nil {
log.Fatal(err)
}
for _, bind := range binds {
tobind[bind] = true
}
}
// Remove duplicates in volumes
volumes := make([]map[string]interface{}, 0)
done := make(map[string]bool)
for _, vol := range deployment.Spec.Template.Spec.Volumes {
name := vol["name"].(string)
if _, ok := done[name]; ok {
continue
isSamePod := false
if v, ok := service.Labels[LABEL_SAME_POD]; !ok {
isSamePod = false
} else {
done[name] = true
volumes = append(volumes, vol)
isSamePod = v != ""
}
}
deployment.Spec.Template.Spec.Volumes = volumes
// Then, create Services and possible Ingresses for ingress labels, "ports" and "expose" section
if len(s.Ports) > 0 || len(s.Expose) > 0 {
for _, s := range generateServicesAndIngresses(name, s) {
if s != nil {
fileGeneratorChan <- s
for _, volume := range service.Volumes {
// not declared as a bind volume, skip
if _, ok := tobind[volume.Source]; !isSamePod && volume.Type == "bind" && !ok {
utils.Warn(
"Bind volumes are not supported yet, " +
"excepting for those declared as " +
LABEL_CM_FILES +
", skipping volume " + volume.Source +
" from service " + service.Name,
)
continue
}
container, index := utils.GetContainerByName(service.Name, d.Spec.Template.Spec.Containers)
if container == nil {
utils.Warn("Container not found for volume", volume.Source)
continue
}
// ensure that the volume is not already present in the container
for _, vm := range container.VolumeMounts {
if vm.Name == volume.Source {
continue
}
}
switch volume.Type {
case "volume":
// Add volume to container
container.VolumeMounts = append(container.VolumeMounts, corev1.VolumeMount{
Name: volume.Source,
MountPath: volume.Target,
})
// Add volume to values.yaml only if it the service is not in the same pod that another service.
// If it is in the same pod, the volume will be added to the other service later
if _, ok := service.Labels[LABEL_SAME_POD]; !ok {
d.chart.Values[service.Name].(*Value).AddPersistence(volume.Source)
}
// Add volume to deployment
d.Spec.Template.Spec.Volumes = append(d.Spec.Template.Spec.Volumes, corev1.Volume{
Name: volume.Source,
VolumeSource: corev1.VolumeSource{
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
ClaimName: utils.TplName(service.Name, appName, volume.Source),
},
},
})
case "bind":
// Add volume to container
cm := NewConfigMapFromFiles(service, appName, volume.Source)
d.Spec.Template.Spec.Volumes = append(d.Spec.Template.Spec.Volumes, corev1.Volume{
Name: utils.PathToName(volume.Source),
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: cm.ObjectMeta.Name,
},
},
},
})
// add the mount path to the container
container.VolumeMounts = append(container.VolumeMounts, corev1.VolumeMount{
Name: utils.PathToName(volume.Source),
MountPath: volume.Target,
})
d.configMaps[utils.PathToName(volume.Source)] = true
// add all subdirectories to the list of directories
stat, err := os.Stat(volume.Source)
if err != nil {
log.Fatal(err)
}
if stat.IsDir() {
files, err := os.ReadDir(volume.Source)
if err != nil {
log.Fatal(err)
}
for _, file := range files {
if file.IsDir() {
cm := NewConfigMapFromFiles(service, appName, filepath.Join(volume.Source, file.Name()))
name := utils.PathToName(volume.Source) + "-" + file.Name()
d.configMaps[name] = true
d.Spec.Template.Spec.Volumes = append(d.Spec.Template.Spec.Volumes, corev1.Volume{
Name: utils.PathToName(volume.Source) + "-" + file.Name(),
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: cm.ObjectMeta.Name,
},
},
},
})
// add the mount path to the container
container.VolumeMounts = append(container.VolumeMounts, corev1.VolumeMount{
Name: name,
MountPath: filepath.Join(volume.Target, file.Name()),
})
}
}
}
}
// add the volumes in Values
if len(VolumeValues[name]) > 0 {
AddValues(name, map[string]EnvVal{"persistence": VolumeValues[name]})
d.Spec.Template.Spec.Containers[index] = *container
}
}
// the deployment is ready, give it
fileGeneratorChan <- deployment
// and then, we can say that it's the end
fileGeneratorChan <- nil
func (d *Deployment) BindFrom(service types.ServiceConfig, binded *Deployment) {
log.Printf("In %s deployment, add volumes for service %s from binded deployment %s", d.Name, service.Name, binded.Name)
// find the volume in the binded deployment
for _, bindedVolume := range binded.Spec.Template.Spec.Volumes {
log.Println("bindedVolume.Name found", bindedVolume.Name)
skip := false
for _, targetVol := range d.Spec.Template.Spec.Volumes {
if targetVol.Name == bindedVolume.Name {
log.Println("Volume", bindedVolume.Name, "already exists in deployment", d.Name)
skip = true
break
}
}
if !skip {
// add the volume to the current deployment
d.Spec.Template.Spec.Volumes = append(d.Spec.Template.Spec.Volumes, bindedVolume)
log.Println("d.Spec.Template.Spec.Volumes", d.Spec.Template.Spec.Volumes)
// get the container
}
// add volume mount to the container
targetContainer, ti := utils.GetContainerByName(service.Name, d.Spec.Template.Spec.Containers)
sourceContainer, _ := utils.GetContainerByName(service.Name, binded.Spec.Template.Spec.Containers)
for _, bindedMount := range sourceContainer.VolumeMounts {
if bindedMount.Name == bindedVolume.Name {
log.Println("bindedMount.Name found", bindedMount.Name)
targetContainer.VolumeMounts = append(targetContainer.VolumeMounts, bindedMount)
}
}
d.Spec.Template.Spec.Containers[ti] = *targetContainer
}
}
// SetEnvFrom sets the environment variables to a configmap. The configmap is created.
func (d *Deployment) SetEnvFrom(service types.ServiceConfig, appName string) {
if len(service.Environment) == 0 {
return
}
drop := []string{}
secrets := []string{}
// secrets from label
labelSecrets := []string{}
if v, ok := service.Labels[LABEL_SECRETS]; ok {
err := yaml.Unmarshal([]byte(v), &labelSecrets)
if err != nil {
log.Fatal(err)
}
}
// values from label
varDescriptons := utils.GetValuesFromLabel(service, LABEL_VALUES)
labelValues := []string{}
for v := range varDescriptons {
labelValues = append(labelValues, v)
}
for _, secret := range labelSecrets {
// get the secret name
_, ok := service.Environment[secret]
if !ok {
drop = append(drop, secret)
utils.Warn("Secret " + secret + " not found in service " + service.Name + " - skpped")
continue
}
secrets = append(secrets, secret)
}
// for each values from label "values", add it to Values map and change the envFrom
// value to {{ .Values.<service>.<value> }}
for _, value := range labelValues {
// get the environment variable name
val, ok := service.Environment[value]
if !ok {
drop = append(drop, value)
utils.Warn("Environment variable " + value + " not found in service " + service.Name + " - skpped")
continue
}
if d.chart.Values[service.Name].(*Value).Environment == nil {
d.chart.Values[service.Name].(*Value).Environment = make(map[string]any)
}
d.chart.Values[service.Name].(*Value).Environment[value] = *val
// set the environment variable to bind to the values.yaml file
v := utils.TplValue(service.Name, "environment."+value)
service.Environment[value] = &v
}
for _, value := range drop {
delete(service.Environment, value)
}
fromSources := []corev1.EnvFromSource{}
if len(service.Environment) > 0 {
fromSources = append(fromSources, corev1.EnvFromSource{
ConfigMapRef: &corev1.ConfigMapEnvSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: utils.TplName(service.Name, appName),
},
},
})
}
if len(secrets) > 0 {
fromSources = append(fromSources, corev1.EnvFromSource{
SecretRef: &corev1.SecretEnvSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: utils.TplName(service.Name, appName),
},
},
})
}
container, index := utils.GetContainerByName(service.Name, d.Spec.Template.Spec.Containers)
if container == nil {
utils.Warn("Container not found for service " + service.Name)
return
}
container.EnvFrom = append(container.EnvFrom, fromSources...)
if container.Env == nil {
container.Env = []corev1.EnvVar{}
}
d.Spec.Template.Spec.Containers[index] = *container
}
func (d *Deployment) AddHealthCheck(service types.ServiceConfig, container *corev1.Container) {
// get the label for healthcheck
if v, ok := service.Labels[LABEL_HEALTHCHECK]; ok {
probes := struct {
LivenessProbe *corev1.Probe `yaml:"livenessProbe"`
ReadinessProbe *corev1.Probe `yaml:"readinessProbe"`
}{}
err := yaml.Unmarshal([]byte(v), &probes)
if err != nil {
log.Fatal(err)
}
container.LivenessProbe = probes.LivenessProbe
container.ReadinessProbe = probes.ReadinessProbe
return
}
if service.HealthCheck != nil {
period := 30.0
if service.HealthCheck.Interval != nil {
period = time.Duration(*service.HealthCheck.Interval).Seconds()
}
container.LivenessProbe = &corev1.Probe{
ProbeHandler: corev1.ProbeHandler{
Exec: &corev1.ExecAction{
Command: service.HealthCheck.Test[1:],
},
},
PeriodSeconds: int32(period),
}
}
}
// Yaml returns the yaml representation of the deployment.
func (d *Deployment) Yaml() ([]byte, error) {
serviceName := d.service.Name
y, err := yaml.Marshal(d)
if err != nil {
return nil, err
}
// for each volume mount, add a condition "if values has persistence"
changing := false
content := strings.Split(string(y), "\n")
spaces := ""
volumeName := ""
// this loop add condition for each volume mount
for line, volume := range content {
// find the volume name
for i := line; i < len(content); i++ {
if strings.Contains(content[i], "name: ") {
volumeName = strings.TrimSpace(strings.Replace(content[i], "name: ", "", 1))
break
}
}
if volumeName == "" {
continue
}
if _, ok := d.configMaps[volumeName]; ok {
continue
}
if strings.Contains(volume, "- mountPath: ") {
spaces = strings.Repeat(" ", utils.CountStartingSpaces(volume))
content[line] = spaces + `{{- if .Values.` + serviceName + `.persistence.` + volumeName + `.enabled }}` + "\n" + volume
changing = true
}
if strings.Contains(volume, "name: ") && changing {
content[line] = volume + "\n" + spaces + "{{- end }}"
changing = false
}
}
changing = false
inVolumes := false
volumeName = ""
// this loop changes imagePullPolicy to {{ .Values.<service>.imagePullPolicy }}
// and the volume definition adding the condition "if values has persistence"
for i, line := range content {
if strings.Contains(line, "imagePullPolicy:") {
spaces = strings.Repeat(" ", utils.CountStartingSpaces(line))
content[i] = spaces + "imagePullPolicy: {{ .Values." + serviceName + ".imagePullPolicy }}"
}
// find the volume name
for i := i; i < len(content); i++ {
if strings.Contains(content[i], "- name: ") {
volumeName = strings.TrimSpace(strings.Replace(content[i], "- name: ", "", 1))
break
}
}
if strings.Contains(line, "volumes:") {
inVolumes = true
}
if volumeName == "" {
continue
}
if _, ok := d.configMaps[volumeName]; ok {
continue
}
if strings.Contains(line, "- name: ") && inVolumes {
spaces = strings.Repeat(" ", utils.CountStartingSpaces(line))
content[i] = spaces + `{{- if .Values.` + serviceName + `.persistence.` + volumeName + `.enabled }}` + "\n" + line
changing = true
}
if strings.Contains(line, "claimName: ") && changing {
content[i] = line + "\n" + spaces + "{{- end }}"
changing = false
}
}
// for impagePullSecrets, replace the name with the value from values.yaml
inpullsecrets := false
for i, line := range content {
if strings.Contains(line, "imagePullSecrets:") {
inpullsecrets = true
}
if inpullsecrets && strings.Contains(line, "- name: ") && inpullsecrets {
line = strings.Replace(line, "- name: ", "", 1)
line = strings.ReplaceAll(line, "'", "")
content[i] = line
inpullsecrets = false
}
}
// Find the replicas line and replace it with the value from values.yaml
for i, line := range content {
if strings.Contains(line, "replicas:") {
line = regexp.MustCompile("replicas: .*$").ReplaceAllString(line, "replicas: {{ .Values."+serviceName+".replicas }}")
content[i] = line
}
}
return []byte(strings.Join(content, "\n")), nil
}
func (d *Deployment) Filename() string {
return d.service.Name + ".deployment.yaml"
}

18
generator/doc.go Normal file
View File

@@ -0,0 +1,18 @@
/*
The generator package generates kubernetes objects from a compose file and transforms them into a helm chart.
The generator package is the core of katenary. It is responsible for generating kubernetes objects from a compose file and transforming them into a helm chart.
Convertion manipulates Yaml representation of kubernetes object to add conditions, labels, annotations, etc. to the objects. It also create the values to be set to
the values.yaml file.
The generate.Convert() create an HelmChart object and call "Generate()" method to convert from a compose file to a helm chart.
It saves the helm chart in the given directory.
If you want to change or override the write behavior, you can use the HelmChart.Generate() function and implement your own write function. This function returns
the helm chart object containing all kubernetes objects and helm chart ingormation. It does not write the helm chart to the disk.
TODO: Manage cronjob + rbac
TODO: create note.txt
TODO: manage emptyDirs
*/
package generator

View File

@@ -1,154 +0,0 @@
package generator
import (
"fmt"
"io/ioutil"
"katenary/compose"
"katenary/helm"
"katenary/logger"
"katenary/tools"
"os"
"path/filepath"
"strings"
"github.com/compose-spec/compose-go/types"
"gopkg.in/yaml.v3"
)
// applyEnvMapLabel will get all LABEL_MAP_ENV to rebuild the env map with tpl.
func applyEnvMapLabel(s *types.ServiceConfig, c *helm.Container) {
locker.Lock()
defer locker.Unlock()
mapenv, ok := s.Labels[helm.LABEL_MAP_ENV]
if !ok {
return
}
// the mapenv is a YAML string
var envmap map[string]EnvVal
err := yaml.Unmarshal([]byte(mapenv), &envmap)
if err != nil {
logger.ActivateColors = true
logger.Red(err.Error())
logger.ActivateColors = false
return
}
// add in envmap
for k, v := range envmap {
vstring := fmt.Sprintf("%v", v)
s.Environment[k] = &vstring
touched := false
if c.Env != nil {
c.Env = make([]*helm.Value, 0)
}
for _, env := range c.Env {
if env.Name == k {
env.Value = v
touched = true
}
}
if !touched {
c.Env = append(c.Env, &helm.Value{Name: k, Value: v})
}
}
}
// readEnvFile read environment file and add to the values.yaml map.
func readEnvFile(envfilename string) map[string]EnvVal {
env := make(map[string]EnvVal)
content, err := ioutil.ReadFile(envfilename)
if err != nil {
logger.ActivateColors = true
logger.Red(err.Error())
logger.ActivateColors = false
os.Exit(2)
}
// each value is on a separate line with KEY=value
lines := strings.Split(string(content), "\n")
for _, line := range lines {
if strings.Contains(line, "=") {
kv := strings.SplitN(line, "=", 2)
env[kv[0]] = kv[1]
}
}
return env
}
// prepareEnvFromFiles generate configMap or secrets from environment files.
func prepareEnvFromFiles(name string, s *types.ServiceConfig, container *helm.Container, fileGeneratorChan HelmFileGenerator) {
// prepare secrets
secretsFiles := make([]string, 0)
if v, ok := s.Labels[helm.LABEL_ENV_SECRET]; ok {
secretsFiles = strings.Split(v, ",")
}
var secretVars []string
if v, ok := s.Labels[helm.LABEL_SECRETVARS]; ok {
secretVars = strings.Split(v, ",")
}
for i, s := range secretVars {
secretVars[i] = strings.TrimSpace(s)
}
// manage environment files (env_file in compose)
for _, envfile := range s.EnvFile {
f := tools.PathToName(envfile)
f = strings.ReplaceAll(f, ".env", "")
isSecret := false
for _, s := range secretsFiles {
s = strings.TrimSpace(s)
if s == envfile {
isSecret = true
}
}
var store helm.InlineConfig
if !isSecret {
logger.Bluef(ICON_CONF+" Generating configMap from %s\n", envfile)
store = helm.NewConfigMap(name, envfile)
} else {
logger.Bluef(ICON_SECRET+" Generating secret from %s\n", envfile)
store = helm.NewSecret(name, envfile)
}
envfile = filepath.Join(compose.GetCurrentDir(), envfile)
if err := store.AddEnvFile(envfile, secretVars); err != nil {
logger.ActivateColors = true
logger.Red(err.Error())
logger.ActivateColors = false
os.Exit(2)
}
section := "configMapRef"
if isSecret {
section = "secretRef"
}
container.EnvFrom = append(container.EnvFrom, map[string]map[string]string{
section: {
"name": store.Metadata().Name,
},
})
// read the envfile and remove them from the container environment or secret
envs := readEnvFile(envfile)
for varname := range envs {
if !isSecret {
// remove varname from container
for i, s := range container.Env {
if s.Name == varname {
container.Env = append(container.Env[:i], container.Env[i+1:]...)
i--
}
}
}
}
if store != nil {
fileGeneratorChan <- store.(HelmFile)
}
}
}

View File

@@ -0,0 +1,2 @@
/* extrafiles package provides function to generate the Chart files that are not objects. Like README.md and notes.txt... */
package extrafiles

View File

@@ -0,0 +1,11 @@
package extrafiles
import _ "embed"
//go:embed notes.tpl
var notesTemplate string
// NoteTXTFile returns the content of the note.txt file.
func NotesFile() string {
return notesTemplate
}

View File

@@ -0,0 +1,27 @@
Your release is named {{ .Release.Name }}.
To learn more about the release, try:
$ helm -n {{ .Release.Namespace }} status {{ .Release.Name }}
$ helm -n {{ .Release.Namespace }} get all {{ .Release.Name }}
To delete the release, run:
$ helm -n {{ .Release.Namespace }} delete {{ .Release.Name }}
You can see this notes again by running:
$ helm -n {{ .Release.Namespace }} get notes {{ .Release.Name }}
{{- $count := 0 -}}
{{- range $s, $v := .Values -}}
{{- if and $v $v.ingress -}}
{{- $count = add $count 1 -}}
{{- if eq $count 1 }}
The ingress list is:
{{ end }}
- {{ $s }}: http://{{ $v.ingress.host }}{{ $v.ingress.path }}
{{- end -}}
{{ end -}}

View File

@@ -0,0 +1,99 @@
package extrafiles
import (
"bytes"
"fmt"
"sort"
"strings"
"text/template"
_ "embed"
"gopkg.in/yaml.v3"
)
type chart struct {
Name string
Description string
Values []string
}
//go:embed readme.tpl
var readmeTemplate string
// ReadMeFile returns the content of the README.md file.
func ReadMeFile(charname, description string, values map[string]any) string {
// values is a yaml structure with keys and structured values...
// we want to make list of dot separated keys and their values
vv := map[string]any{}
out, _ := yaml.Marshal(values)
yaml.Unmarshal(out, &vv)
result := make(map[string]string)
parseValues("", vv, result)
funcMap := template.FuncMap{
"repeat": func(s string, count int) string {
return strings.Repeat(s, count)
},
}
tpl, err := template.New("readme").Funcs(funcMap).Parse(readmeTemplate)
if err != nil {
panic(err)
}
valuesLines := []string{}
maxParamLen := 0
maxDefaultLen := 0
for key, value := range result {
if len(key) > maxParamLen {
maxParamLen = len(key)
}
if len(value) > maxDefaultLen {
maxDefaultLen = len(value)
}
}
for key, value := range result {
valuesLines = append(valuesLines, fmt.Sprintf("| %-*s | %-*s |", maxParamLen, key, maxDefaultLen, value))
}
sort.Strings(valuesLines)
buf := &bytes.Buffer{}
err = tpl.Execute(buf, map[string]any{
"DescrptionPadding": maxParamLen,
"DefaultPadding": maxDefaultLen,
"Chart": chart{
Name: charname,
Description: description,
Values: valuesLines,
},
})
if err != nil {
panic(err)
}
return buf.String()
}
func parseValues(prefix string, values map[string]interface{}, result map[string]string) {
for key, value := range values {
path := key
if prefix != "" {
path = prefix + "." + key
}
switch v := value.(type) {
case []interface{}:
for i, u := range v {
parseValues(fmt.Sprintf("%s[%d]", path, i), map[string]interface{}{"value": u}, result)
}
case map[string]interface{}:
parseValues(path, v, result)
default:
strValue := fmt.Sprintf("`%v`", value)
result["`"+path+"`"] = strValue
}
}
}

View File

@@ -0,0 +1,32 @@
# {{ .Chart.Name }}
{{ .Chart.Description }}
## Installing the Chart
To install the chart with the release name `my-release`:
```bash
# Standard Helm install
$ helm install my-release {{ .Chart.Name }}
# To use a custom namespace and force the creation of the namespace
$ helm install my-release --namespace my-namespace --create-namespace {{ .Chart.Name }}
# To use a custom values file
$ helm install my-release -f my-values.yaml {{ .Chart.Name }}
```
See the [Helm documentation](https://helm.sh/docs/intro/using_helm/) for more information on installing and managing the chart.
## Configuration
The following table lists the configurable parameters of the {{ .Chart.Name }} chart and their default values.
| {{ printf "%-*s" .DescrptionPadding "Parameter" }} | {{ printf "%-*s" .DefaultPadding "Default" }} |
| {{ repeat "-" .DescrptionPadding }} | {{ repeat "-" .DefaultPadding }} |
{{- range .Chart.Values }}
{{ . }}
{{- end }}

658
generator/generator.go Normal file
View File

@@ -0,0 +1,658 @@
package generator
// TODO: configmap from files 20%
import (
"bytes"
"fmt"
"katenary/utils"
"log"
"os"
"path/filepath"
"regexp"
"strconv"
"strings"
"github.com/compose-spec/compose-go/types"
goyaml "gopkg.in/yaml.v3"
corev1 "k8s.io/api/core/v1"
"sigs.k8s.io/yaml"
)
// Generate a chart from a compose project.
// This does not write files to disk, it only creates the HelmChart object.
//
// The Generate function will create the HelmChart object this way:
//
// 1. Detect the service port name or leave the port number if not found.
//
// 2. Create a deployment for each service that are not ingnore.
//
// 3. Create a service and ingresses for each service that has ports and/or declared ingresses.
//
// 4. Create a PVC or Configmap volumes for each volume.
//
// 5. Create init containers for each service which has dependencies to other services.
//
// 6. Create a chart dependencies.
//
// 7. Create a configmap and secrets from the environment variables.
//
// 8. Merge the same-pod services.
func Generate(project *types.Project) (*HelmChart, error) {
var (
appName = project.Name
deployments = make(map[string]*Deployment, len(project.Services))
services = make(map[string]*Service)
podToMerge = make(map[string]*Deployment)
)
chart := NewChart(appName)
// Add the compose files hash to the chart annotations
hash, err := utils.HashComposefiles(project.ComposeFiles)
if err != nil {
return nil, err
}
Annotations[KATENARY_PREFIX+"compose-hash"] = hash
chart.composeHash = &hash
// find the "main-app" label, and set chart.AppVersion to the tag if exists
mainCount := 0
for _, service := range project.Services {
if serviceIsMain(service) {
log.Printf("Found main app %s", service.Name)
mainCount++
if mainCount > 1 {
return nil, fmt.Errorf("found more than one main app")
}
setChartVersion(chart, service)
}
}
if mainCount == 0 {
chart.AppVersion = "0.1.0"
}
// first pass, create all deployments whatewer they are.
for _, service := range project.Services {
// check the "ports" label from container and add it to the service
if err := fixPorts(&service); err != nil {
return nil, err
}
// isgnored service
if isIgnored(service) {
fmt.Printf("%s Ignoring service %s\n", utils.IconInfo, service.Name)
continue
}
// helm dependency
if isHelmDependency, err := setDependencies(chart, service); err != nil {
return nil, err
} else if isHelmDependency {
continue
}
// create all deployments
d := NewDeployment(service, chart)
deployments[service.Name] = d
// generate the cronjob if needed
setCronJob(service, chart, appName)
// get the same-pod label if exists, add it to the list.
// We later will copy some parts to the target deployment and remove this one.
if samePod, ok := service.Labels[LABEL_SAME_POD]; ok && samePod != "" {
podToMerge[samePod] = d
}
// create the needed service for the container port
if len(service.Ports) > 0 {
s := NewService(service, appName)
services[service.Name] = s
}
// create all ingresses
if ingress := d.AddIngress(service, appName); ingress != nil {
y, _ := ingress.Yaml()
chart.Templates[ingress.Filename()] = &ChartTemplate{
Content: y,
Servicename: service.Name,
}
}
}
// now we have all deployments, we can create PVC if needed (it's separated from
// the above loop because we need all deployments to not duplicate PVC for "same-pod" services)
for _, service := range project.Services {
if err := buildVolumes(service, chart, deployments); err != nil {
return nil, err
}
}
// drop all "same-pod" deployments because the containers and volumes are already
// in the target deployment
for _, service := range project.Services {
if samepod, ok := service.Labels[LABEL_SAME_POD]; ok && samepod != "" {
// move this deployment volumes to the target deployment
if target, ok := deployments[samepod]; ok {
target.AddContainer(service)
target.BindFrom(service, deployments[service.Name])
delete(deployments, service.Name)
} else {
log.Printf("service %[1]s is declared as %[2]s, but %[2]s is not defined", service.Name, LABEL_SAME_POD)
}
}
}
// create init containers for all DependsOn
for _, s := range project.Services {
for _, d := range s.GetDependencies() {
if dep, ok := deployments[d]; ok {
deployments[s.Name].DependsOn(dep)
} else {
log.Printf("service %[1]s depends on %[2]s, but %[2]s is not defined", s.Name, d)
}
}
}
// generate configmaps with environment variables
generateConfigMapsAndSecrets(project, chart)
// if the env-from label is set, we need to add the env vars from the configmap
// to the environment of the service
for _, s := range project.Services {
setSharedConf(s, chart, deployments)
}
// generate yaml files
for _, d := range deployments {
y, _ := d.Yaml()
chart.Templates[d.Filename()] = &ChartTemplate{
Content: y,
Servicename: d.service.Name,
}
}
// generate all services
for _, s := range services {
y, _ := s.Yaml()
chart.Templates[s.Filename()] = &ChartTemplate{
Content: y,
Servicename: s.service.Name,
}
}
// compute all needed resplacements in YAML templates
for n, v := range chart.Templates {
v.Content = removeReplaceString(v.Content)
v.Content = computeNIndent(v.Content)
chart.Templates[n].Content = v.Content
}
// generate helper
chart.Helper = Helper(appName)
return chart, nil
}
// computeNIndentm replace all __indent__ labels with the number of spaces before the label.
func computeNIndent(b []byte) []byte {
lines := bytes.Split(b, []byte("\n"))
for i, line := range lines {
if !bytes.Contains(line, []byte("__indent__")) {
continue
}
startSpaces := ""
spaces := regexp.MustCompile(`^\s+`).FindAllString(string(line), -1)
if len(spaces) > 0 {
startSpaces = spaces[0]
}
line = []byte(startSpaces + strings.TrimLeft(string(line), " "))
line = bytes.ReplaceAll(line, []byte("__indent__"), []byte(fmt.Sprintf("%d", len(startSpaces))))
lines[i] = line
}
return bytes.Join(lines, []byte("\n"))
}
// removeReplaceString replace all __replace_ labels with the value of the
// capture group and remove all new lines and repeated spaces.
//
// we created:
//
// __replace_bar: '{{ include "foo.labels" .
// }}'
//
// note the new line and spaces...
//
// we now want to replace it with {{ include "foo.labels" . }}, without the label name.
func removeReplaceString(b []byte) []byte {
// replace all matches with the value of the capture group
// and remove all new lines and repeated spaces
b = replaceLabelRegexp.ReplaceAllFunc(b, func(b []byte) []byte {
inc := replaceLabelRegexp.FindSubmatch(b)[1]
inc = bytes.ReplaceAll(inc, []byte("\n"), []byte(""))
inc = bytes.ReplaceAll(inc, []byte("\r"), []byte(""))
inc = regexp.MustCompile(`\s+`).ReplaceAll(inc, []byte(" "))
return inc
})
return b
}
func serviceIsMain(service types.ServiceConfig) bool {
if main, ok := service.Labels[LABEL_MAIN_APP]; ok {
return main == "true" || main == "yes" || main == "1"
}
return false
}
func setChartVersion(chart *HelmChart, service types.ServiceConfig) {
if chart.Version == "" {
image := service.Image
parts := strings.Split(image, ":")
if len(parts) > 1 {
chart.AppVersion = parts[1]
} else {
chart.AppVersion = "0.1.0"
}
}
}
func fixPorts(service *types.ServiceConfig) error {
// check the "ports" label from container and add it to the service
if portsLabel, ok := service.Labels[LABEL_PORTS]; ok {
ports := []uint32{}
if err := goyaml.Unmarshal([]byte(portsLabel), &ports); err != nil {
// maybe it's a string, comma separated
parts := strings.Split(portsLabel, ",")
for _, part := range parts {
part = strings.TrimSpace(part)
if part == "" {
continue
}
port, err := strconv.ParseUint(part, 10, 32)
if err != nil {
return err
}
ports = append(ports, uint32(port))
}
}
for _, port := range ports {
service.Ports = append(service.Ports, types.ServicePortConfig{
Target: port,
})
}
}
return nil
}
func setCronJob(service types.ServiceConfig, chart *HelmChart, appName string) *CronJob {
if _, ok := service.Labels[LABEL_CRONJOB]; !ok {
return nil
}
cronjob, rbac := NewCronJob(service, chart, appName)
y, _ := cronjob.Yaml()
chart.Templates[cronjob.Filename()] = &ChartTemplate{
Content: y,
Servicename: service.Name,
}
if rbac != nil {
y, _ := rbac.RoleBinding.Yaml()
chart.Templates[rbac.RoleBinding.Filename()] = &ChartTemplate{
Content: y,
Servicename: service.Name,
}
y, _ = rbac.Role.Yaml()
chart.Templates[rbac.Role.Filename()] = &ChartTemplate{
Content: y,
Servicename: service.Name,
}
y, _ = rbac.ServiceAccount.Yaml()
chart.Templates[rbac.ServiceAccount.Filename()] = &ChartTemplate{
Content: y,
Servicename: service.Name,
}
}
return cronjob
}
func setDependencies(chart *HelmChart, service types.ServiceConfig) (bool, error) {
// helm dependency
if v, ok := service.Labels[LABEL_DEPENDENCIES]; ok {
d := Dependency{}
if err := yaml.Unmarshal([]byte(v), &d); err != nil {
return false, err
}
fmt.Printf("%s Adding dependency to %s\n", utils.IconDependency, d.Name)
chart.Dependencies = append(chart.Dependencies, d)
name := d.Name
if d.Alias != "" {
name = d.Alias
}
// add the dependency env vars to the values.yaml
chart.Values[name] = d.Values
return true, nil
}
return false, nil
}
func isIgnored(service types.ServiceConfig) bool {
if v, ok := service.Labels[LABEL_IGNORE]; ok {
return v == "true" || v == "yes" || v == "1"
}
return false
}
func buildVolumes(service types.ServiceConfig, chart *HelmChart, deployments map[string]*Deployment) error {
appName := chart.Name
for _, v := range service.Volumes {
// Do not add volumes if the pod is injected in a deployments
// via "same-pod" and the volume in destination deployment exists
if samePodVolume(service, v, deployments) {
continue
}
switch v.Type {
case "volume":
pvc := NewVolumeClaim(service, v.Source, appName)
// if the service is integrated in another deployment, we need to add the volume
// to the target deployment
if override, ok := service.Labels[LABEL_SAME_POD]; ok {
pvc.nameOverride = override
pvc.PersistentVolumeClaim.Spec.StorageClassName = utils.StrPtr(`{{ .Values.` + override + `.persistence.` + v.Source + `.storageClass }}`)
chart.Values[override].(*Value).AddPersistence(v.Source)
}
y, _ := pvc.Yaml()
chart.Templates[pvc.Filename()] = &ChartTemplate{
Content: y,
Servicename: service.Name, //TODO, use name
}
case "bind":
// ensure the path is in labels
bindPath := map[string]string{}
if _, ok := service.Labels[LABEL_CM_FILES]; ok {
files := []string{}
if err := yaml.Unmarshal([]byte(service.Labels[LABEL_CM_FILES]), &files); err != nil {
return err
}
for _, f := range files {
bindPath[f] = f
}
}
if _, ok := bindPath[v.Source]; !ok {
continue
}
cm := NewConfigMapFromFiles(service, appName, v.Source)
var err error
var y []byte
if y, err = cm.Yaml(); err != nil {
log.Fatal(err)
}
chart.Templates[cm.Filename()] = &ChartTemplate{
Content: y,
Servicename: service.Name,
}
// continue with subdirectories
stat, err := os.Stat(v.Source)
if err != nil {
return err
}
if stat.IsDir() {
files, err := filepath.Glob(filepath.Join(v.Source, "*"))
if err != nil {
return err
}
for _, f := range files {
if f == v.Source {
continue
}
if stat, err := os.Stat(f); err != nil || !stat.IsDir() {
continue
}
cm := NewConfigMapFromFiles(service, appName, f)
var err error
var y []byte
if y, err = cm.Yaml(); err != nil {
log.Fatal(err)
}
log.Printf("Adding configmap %s %s", cm.Filename(), f)
chart.Templates[cm.Filename()] = &ChartTemplate{
Content: y,
Servicename: service.Name,
}
}
}
}
}
return nil
}
func generateConfigMapsAndSecrets(project *types.Project, chart *HelmChart) error {
appName := chart.Name
for _, s := range project.Services {
if s.Environment == nil || len(s.Environment) == 0 {
continue
}
originalEnv := types.MappingWithEquals{}
secretsVar := types.MappingWithEquals{}
// copy env to originalEnv
for k, v := range s.Environment {
originalEnv[k] = v
}
if v, ok := s.Labels[LABEL_SECRETS]; ok {
list := []string{}
if err := yaml.Unmarshal([]byte(v), &list); err != nil {
log.Fatal("error unmarshaling secrets label:", err)
}
for _, secret := range list {
if secret == "" {
continue
}
if _, ok := s.Environment[secret]; !ok {
fmt.Printf("%s secret %s not found in environment", utils.IconWarning, secret)
continue
}
secretsVar[secret] = s.Environment[secret]
}
}
if len(secretsVar) > 0 {
s.Environment = secretsVar
sec := NewSecret(s, appName)
y, _ := sec.Yaml()
name := sec.service.Name
chart.Templates[name+".secret.yaml"] = &ChartTemplate{
Content: y,
Servicename: s.Name,
}
}
// remove secrets from env
s.Environment = originalEnv // back to original
for k := range secretsVar {
delete(s.Environment, k)
}
if len(s.Environment) > 0 {
cm := NewConfigMap(s, appName)
y, _ := cm.Yaml()
name := cm.service.Name
chart.Templates[name+".configmap.yaml"] = &ChartTemplate{
Content: y,
Servicename: s.Name,
}
}
}
return nil
}
func mergePods(target, from *Deployment, services map[string]*Service, chart *HelmChart) {
targetName := target.service.Name
fromName := from.service.Name
// copy the volumes from the source deployment
for _, v := range from.Spec.Template.Spec.Volumes {
// ensure that the volume is not already present
found := false
for _, tv := range target.Spec.Template.Spec.Volumes {
if tv.Name == v.Name {
found = true
break
}
}
if found {
continue
}
target.Spec.Template.Spec.Volumes = append(target.Spec.Template.Spec.Volumes, v)
}
// copy the containers from the source deployment
for _, c := range from.Spec.Template.Spec.Containers {
target.Spec.Template.Spec.Containers = append(target.Spec.Template.Spec.Containers, c)
}
// copy the init containers from the source deployment
for _, c := range from.Spec.Template.Spec.InitContainers {
target.Spec.Template.Spec.InitContainers = append(target.Spec.Template.Spec.InitContainers, c)
}
// drop the deployment from the chart
delete(chart.Templates, fromName+".deployment.yaml")
// rewite the target deployment
y, err := target.Yaml()
if err != nil {
log.Fatal("error rewriting deployment:", err)
}
chart.Templates[target.Filename()] = &ChartTemplate{
Content: y,
Servicename: targetName,
}
// now, if the source deployment has a service, we need to merge it with the target service
if _, ok := chart.Templates[targetName+".service.yaml"]; ok {
container, _ := utils.GetContainerByName(fromName, target.Spec.Template.Spec.Containers)
if container.Ports == nil || len(container.Ports) == 0 {
return
}
targetService := services[targetName]
for _, port := range container.Ports {
targetService.AddPort(types.ServicePortConfig{
Target: uint32(port.ContainerPort),
Protocol: "TCP",
}, port.Name)
}
// rewrite the tartget service
y, _ := targetService.Yaml()
chart.Templates[targetName+".service.yaml"] = &ChartTemplate{
Content: y,
Servicename: target.service.Name,
}
// and remove the source service from the chart
delete(chart.Templates, fromName+".service.yaml")
// In Valuses, remove the "replicas" key from the source service
if v, ok := chart.Values[fromName]; ok {
// if v is a Value
if v, ok := v.(*Value); ok {
v.Replicas = nil
}
}
}
}
func samePodVolume(service types.ServiceConfig, v types.ServiceVolumeConfig, deployments map[string]*Deployment) bool {
// if the service has volumes, and it has "same-pod" label
// - get the target deployment
// - check if it has the same volume
// if not, return false
if v.Source == "" {
return false
}
if service.Volumes == nil || len(service.Volumes) == 0 {
return false
}
targetDeployment := ""
if targetName, ok := service.Labels[LABEL_SAME_POD]; !ok {
return false
} else {
targetDeployment = targetName
}
// get the target deployment
var target *Deployment
for _, d := range deployments {
if d.service.Name == targetDeployment {
target = d
break
}
}
if target == nil {
return false
}
// check if it has the same volume
for _, tv := range target.Spec.Template.Spec.Volumes {
if tv.Name == v.Source {
log.Printf("found same pod volume %s in deployment %s and %s", tv.Name, service.Name, targetDeployment)
return true
}
}
return false
}
func setSharedConf(service types.ServiceConfig, chart *HelmChart, deployments map[string]*Deployment) {
// if the service has the "shared-conf" label, we need to add the configmap
// to the chart and add the env vars to the service
if _, ok := service.Labels[LABEL_ENV_FROM]; !ok {
return
}
fromservices := []string{}
if err := yaml.Unmarshal([]byte(service.Labels[LABEL_ENV_FROM]), &fromservices); err != nil {
log.Fatal("error unmarshaling env-from label:", err)
}
// find the configmap in the chart templates
for _, fromservice := range fromservices {
if _, ok := chart.Templates[fromservice+".configmap.yaml"]; !ok {
log.Printf("configmap %s not found in chart templates", fromservice)
continue
}
// find the corresponding target deployment
var target *Deployment
for _, d := range deployments {
if d.service.Name == service.Name {
target = d
break
}
}
if target == nil {
continue
}
// add the configmap to the service
for i, c := range target.Spec.Template.Spec.Containers {
if c.Name != service.Name {
continue
}
c.EnvFrom = append(c.EnvFrom, corev1.EnvFromSource{
ConfigMapRef: &corev1.ConfigMapEnvSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: utils.TplName(fromservice, chart.Name),
},
},
})
target.Spec.Template.Spec.Containers[i] = c
}
}
}

19
generator/globals.go Normal file
View File

@@ -0,0 +1,19 @@
package generator
import "regexp"
var (
// regexp to all tpl strings
tplValueRegexp = regexp.MustCompile(`\{\{.*\}\}-`)
// find all labels starting by __replace_ and ending with ":"
// and get the value between the quotes
// ?s => multiline
// (?P<inc>.+?) => named capture group to "inc" variable (so we could use $inc in the replace)
replaceLabelRegexp = regexp.MustCompile(`(?s)__replace_.+?: '(?P<inc>.+?)'`)
// Standard annotationss
Annotations = map[string]string{
KATENARY_PREFIX + "version": Version,
}
)

36
generator/helmHelper.tpl Normal file
View File

@@ -0,0 +1,36 @@
{{- define "__APP__.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- define "__APP__.name" -}}
{{- if .Values.nameOverride -}}
{{- .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- define "__APP__.labels" -}}
{{ include "__APP__.selectorLabels" .}}
{{ if .Chart.Version -}}
{{ printf "__PREFIX__chart-version: %s" .Chart.Version }}
{{- end }}
{{ if .Chart.AppVersion -}}
{{ printf "__PREFIX__app-version: %s" .Chart.AppVersion }}
{{- end }}
{{- end -}}
{{- define "__APP__.selectorLabels" -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{ printf "__PREFIX__name: %s" $name }}
{{ printf "__PREFIX__instance: %s" .Release.Name }}
{{- end -}}

19
generator/helper.go Normal file
View File

@@ -0,0 +1,19 @@
package generator
import (
_ "embed"
"strings"
)
// helmHelper is a template for the _helpers.tpl file in the chart templates directory.
//
//go:embed helmHelper.tpl
var helmHelper string
// Helper returns the _helpers.tpl file for a chart.
func Helper(name string) string {
helmHelper := strings.ReplaceAll(helmHelper, "__APP__", name)
helmHelper = strings.ReplaceAll(helmHelper, "__PREFIX__", KATENARY_PREFIX)
helmHelper = strings.ReplaceAll(helmHelper, "__VERSION__", "0.1.0")
return helmHelper
}

175
generator/ingress.go Normal file
View File

@@ -0,0 +1,175 @@
package generator
import (
"katenary/utils"
"log"
"strings"
"github.com/compose-spec/compose-go/types"
goyaml "gopkg.in/yaml.v3"
networkv1 "k8s.io/api/networking/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/yaml"
)
var _ Yaml = (*Ingress)(nil)
type Ingress struct {
*networkv1.Ingress
service *types.ServiceConfig `yaml:"-"`
}
// NewIngress creates a new Ingress from a compose service.
func NewIngress(service types.ServiceConfig, Chart *HelmChart) *Ingress {
appName := Chart.Name
// parse the KATENARY_PREFIX/ingress label from the service
if service.Labels == nil {
service.Labels = make(map[string]string)
}
var label string
var ok bool
if label, ok = service.Labels[LABEL_INGRESS]; !ok {
return nil
}
mapping := map[string]interface{}{
"enabled": false,
"host": service.Name + ".tld",
"path": "/",
"class": "-",
}
if err := goyaml.Unmarshal([]byte(label), &mapping); err != nil {
log.Fatalf("Failed to parse ingress label: %s\n", err)
}
// create the ingress
pathType := networkv1.PathTypeImplementationSpecific
serviceName := `{{ include "` + appName + `.fullname" . }}-` + service.Name
if v, ok := mapping["port"]; ok {
if port, ok := v.(int); ok {
mapping["port"] = int32(port)
}
} else {
log.Fatalf("No port provided for ingress target in service %s\n", service.Name)
}
// Add the ingress host to the values.yaml
if Chart.Values[service.Name] == nil {
Chart.Values[service.Name] = &Value{}
}
Chart.Values[service.Name].(*Value).Ingress = &IngressValue{
Enabled: mapping["enabled"].(bool),
Path: mapping["path"].(string),
Host: mapping["host"].(string),
Class: mapping["class"].(string),
Annotations: map[string]string{},
}
//ingressClassName := `{{ .Values.` + service.Name + `.ingress.class }}`
ingressClassName := utils.TplValue(service.Name, "ingress.class")
servicePortName := utils.GetServiceNameByPort(int(mapping["port"].(int32)))
ingressService := &networkv1.IngressServiceBackend{
Name: serviceName,
Port: networkv1.ServiceBackendPort{},
}
if servicePortName != "" {
ingressService.Port.Name = servicePortName
} else {
ingressService.Port.Number = mapping["port"].(int32)
}
ing := &Ingress{
service: &service,
Ingress: &networkv1.Ingress{
TypeMeta: metav1.TypeMeta{
Kind: "Ingress",
APIVersion: "networking.k8s.io/v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: utils.TplName(service.Name, appName),
Labels: GetLabels(service.Name, appName),
Annotations: Annotations,
},
Spec: networkv1.IngressSpec{
IngressClassName: &ingressClassName,
Rules: []networkv1.IngressRule{
{
Host: utils.TplValue(service.Name, "ingress.host"),
IngressRuleValue: networkv1.IngressRuleValue{
HTTP: &networkv1.HTTPIngressRuleValue{
Paths: []networkv1.HTTPIngressPath{
{
Path: utils.TplValue(service.Name, "ingress.path"),
PathType: &pathType,
Backend: networkv1.IngressBackend{
Service: ingressService,
},
},
},
},
},
},
},
TLS: []networkv1.IngressTLS{
{
Hosts: []string{
`{{ tpl .Values.` + service.Name + `.ingress.host . }}`,
},
SecretName: `{{ include "` + appName + `.fullname" . }}-` + service.Name + `-tls`,
},
},
},
},
}
return ing
}
func (ingress *Ingress) Yaml() ([]byte, error) {
serviceName := ingress.service.Name
ret, err := yaml.Marshal(ingress)
if err != nil {
return nil, err
}
lines := strings.Split(string(ret), "\n")
out := []string{
`{{- if .Values.` + serviceName + `.ingress.enabled -}}`,
}
for _, line := range lines {
if strings.Contains(line, "loadBalancer: ") {
continue
}
if strings.Contains(line, "labels:") {
// add annotations above labels from values.yaml
content := `` +
` {{- if .Values.` + serviceName + `.ingress.annotations -}}` + "\n" +
` {{- toYaml .Values.` + serviceName + `.ingress.annotations | nindent 4 }}` + "\n" +
` {{- end }}` + "\n" +
line
out = append(out, content)
} else if strings.Contains(line, "ingressClassName: ") {
content := utils.Wrap(
line,
`{{- if ne .Values.`+serviceName+`.ingress.class "-" }}`,
`{{- end }}`,
)
out = append(out, content)
} else {
out = append(out, line)
}
}
out = append(out, `{{- end -}}`)
ret = []byte(strings.Join(out, "\n"))
return ret, nil
}
func (ingress *Ingress) Filename() string {
return ingress.service.Name + ".ingress.yaml"
}

229
generator/katenaryLabels.go Normal file
View File

@@ -0,0 +1,229 @@
package generator
import (
"bytes"
_ "embed"
"fmt"
"katenary/utils"
"regexp"
"sort"
"strings"
"text/tabwriter"
"text/template"
"sigs.k8s.io/yaml"
)
var (
// Set the documentation of labels here
//
//go:embed katenaryLabelsDoc.yaml
labelFullHelpYAML []byte
// parsed yaml
labelFullHelp map[string]Help
)
// Label is a katenary label to find in compose files.
type Label = string
// Help is the documentation of a label.
type Help struct {
Short string `yaml:"short"`
Long string `yaml:"long"`
Example string `yaml:"example"`
Type string `yaml:"type"`
}
const KATENARY_PREFIX = "katenary.v3/"
// Known labels.
const (
LABEL_MAIN_APP Label = KATENARY_PREFIX + "main-app"
LABEL_VALUES Label = KATENARY_PREFIX + "values"
LABEL_SECRETS Label = KATENARY_PREFIX + "secrets"
LABEL_PORTS Label = KATENARY_PREFIX + "ports"
LABEL_INGRESS Label = KATENARY_PREFIX + "ingress"
LABEL_MAP_ENV Label = KATENARY_PREFIX + "map-env"
LABEL_HEALTHCHECK Label = KATENARY_PREFIX + "health-check"
LABEL_SAME_POD Label = KATENARY_PREFIX + "same-pod"
LABEL_DESCRIPTION Label = KATENARY_PREFIX + "description"
LABEL_IGNORE Label = KATENARY_PREFIX + "ignore"
LABEL_DEPENDENCIES Label = KATENARY_PREFIX + "dependencies"
LABEL_CM_FILES Label = KATENARY_PREFIX + "configmap-files"
LABEL_CRONJOB Label = KATENARY_PREFIX + "cronjob"
LABEL_ENV_FROM Label = KATENARY_PREFIX + "env-from"
)
func init() {
if err := yaml.Unmarshal(labelFullHelpYAML, &labelFullHelp); err != nil {
panic(err)
}
}
// Generate the help for the labels.
func GetLabelHelp(asMarkdown bool) string {
names := GetLabelNames() // sorted
if !asMarkdown {
return generatePlainHelp(names)
}
return generateMarkdownHelp(names)
}
func generatePlainHelp(names []string) string {
var builder strings.Builder
for _, name := range names {
help := labelFullHelp[name]
fmt.Fprintf(&builder, "%s%s:\t%s\t%s\n", KATENARY_PREFIX, name, help.Type, help.Short)
}
// use tabwriter to align the help text
buf := new(strings.Builder)
w := tabwriter.NewWriter(buf, 0, 8, 0, '\t', tabwriter.AlignRight)
fmt.Fprintln(w, builder.String())
w.Flush()
head := "To get more information about a label, use `katenary help-label <name_without_prefix>\ne.g. katenary help-label dependencies\n\n"
return head + buf.String()
}
func generateMarkdownHelp(names []string) string {
var builder strings.Builder
var maxNameLength, maxDescriptionLength, maxTypeLength int
max := func(a, b int) int {
if a > b {
return a
}
return b
}
for _, name := range names {
help := labelFullHelp[name]
maxNameLength = max(maxNameLength, len(name)+2+len(KATENARY_PREFIX))
maxDescriptionLength = max(maxDescriptionLength, len(help.Short))
maxTypeLength = max(maxTypeLength, len(help.Type))
}
fmt.Fprintf(&builder, "%s\n", generateTableHeader(maxNameLength, maxDescriptionLength, maxTypeLength))
fmt.Fprintf(&builder, "%s\n", generateTableHeaderSeparator(maxNameLength, maxDescriptionLength, maxTypeLength))
for _, name := range names {
help := labelFullHelp[name]
fmt.Fprintf(&builder, "| %-*s | %-*s | %-*s |\n",
maxNameLength, "`"+KATENARY_PREFIX+name+"`", // enclose in backticks
maxDescriptionLength, help.Short,
maxTypeLength, help.Type,
)
}
return builder.String()
}
func generateTableHeader(maxNameLength, maxDescriptionLength, maxTypeLength int) string {
return fmt.Sprintf(
"| %-*s | %-*s | %-*s |",
maxNameLength, "Label name",
maxDescriptionLength, "Description",
maxTypeLength, "Type",
)
}
func generateTableHeaderSeparator(maxNameLength, maxDescriptionLength, maxTypeLength int) string {
return fmt.Sprintf(
"| %s | %s | %s |",
strings.Repeat("-", maxNameLength),
strings.Repeat("-", maxDescriptionLength),
strings.Repeat("-", maxTypeLength),
)
}
// GetLabelHelpFor returns the help for a specific label.
func GetLabelHelpFor(labelname string, asMarkdown bool) string {
help, ok := labelFullHelp[labelname]
if !ok {
return "No help available for " + labelname + "."
}
help.Long = strings.TrimPrefix(help.Long, "\n")
help.Example = strings.TrimPrefix(help.Example, "\n")
help.Short = strings.TrimPrefix(help.Short, "\n")
// get help template
helpTemplate := getHelpTemplate(asMarkdown)
if asMarkdown {
// enclose templates in backticks
help.Long = regexp.MustCompile(`\{\{(.*?)\}\}`).ReplaceAllString(help.Long, "`{{$1}}`")
help.Long = strings.ReplaceAll(help.Long, "__APP__", "`__APP__`")
} else {
help.Long = strings.ReplaceAll(help.Long, " \n", "\n")
help.Long = strings.ReplaceAll(help.Long, "`", "")
help.Long = strings.ReplaceAll(help.Long, "<code>", "")
help.Long = strings.ReplaceAll(help.Long, "</code>", "")
help.Long = utils.WordWrap(help.Long, 80)
}
var buf bytes.Buffer
template.Must(template.New("shorthelp").Parse(help.Long)).Execute(&buf, struct {
KATENARY_PREFIX string
}{
KATENARY_PREFIX: KATENARY_PREFIX,
})
help.Long = buf.String()
buf.Reset()
template.Must(template.New("example").Parse(help.Example)).Execute(&buf, struct {
KATENARY_PREFIX string
}{
KATENARY_PREFIX: KATENARY_PREFIX,
})
help.Example = buf.String()
buf.Reset()
template.Must(template.New("complete").Parse(helpTemplate)).Execute(&buf, struct {
Name string
Help Help
KATENARY_PREFIX string
}{
Name: labelname,
Help: help,
KATENARY_PREFIX: KATENARY_PREFIX,
})
return buf.String()
}
// GetLabelNames returns a sorted list of all katenary label names.
func GetLabelNames() []string {
var names []string
for name := range labelFullHelp {
names = append(names, name)
}
sort.Strings(names)
return names
}
func getHelpTemplate(asMarkdown bool) string {
if asMarkdown {
return `## {{ .KATENARY_PREFIX }}{{ .Name }}
{{ .Help.Short }}
**Type**: ` + "`" + `{{ .Help.Type }}` + "`" + `
{{ .Help.Long }}
**Example:**` + "\n\n```yaml\n" + `{{ .Help.Example }}` + "\n```\n"
}
return `{{ .KATENARY_PREFIX }}{{ .Name }}: {{ .Help.Short }}
Type: {{ .Help.Type }}
{{ .Help.Long }}
Example:
{{ .Help.Example }}
`
}

36
generator/labels.go Normal file
View File

@@ -0,0 +1,36 @@
package generator
import (
"fmt"
)
// LabelType identifies the type of label to generate in objects.
// TODO: is this still needed?
type LabelType uint8
const (
DeploymentLabel LabelType = iota
ServiceLabel
)
func GetLabels(serviceName, appName string) map[string]string {
labels := map[string]string{
KATENARY_PREFIX + "component": serviceName,
}
key := `{{- include "%s.labels" . | nindent __indent__ }}`
labels[`__replace_`+serviceName] = fmt.Sprintf(key, appName)
return labels
}
func GetMatchLabels(serviceName, appName string) map[string]string {
labels := map[string]string{
KATENARY_PREFIX + "component": serviceName,
}
key := `{{- include "%s.selectorLabels" . | nindent __indent__ }}`
labels[`__replace_`+serviceName] = fmt.Sprintf(key, appName)
return labels
}

View File

@@ -1,304 +0,0 @@
package generator
import (
"fmt"
"io/ioutil"
"katenary/helm"
"katenary/logger"
"katenary/tools"
"log"
"net/url"
"os"
"path/filepath"
"runtime"
"strconv"
"strings"
"sync"
"github.com/compose-spec/compose-go/types"
)
type EnvVal = helm.EnvValue
const (
ICON_PACKAGE = "📦"
ICON_SERVICE = "🔌"
ICON_SECRET = "🔏"
ICON_CONF = "📝"
ICON_STORE = "⚡"
ICON_INGRESS = "🌐"
ICON_RBAC = "🔑"
ICON_CRON = "🕒"
)
var (
EmptyDirs = []string{}
servicesMap = make(map[string]int)
locker = &sync.Mutex{}
dependScript = `
OK=0
echo "Checking __service__ port"
while [ $OK != 1 ]; do
echo -n "."
nc -z ` + helm.ReleaseNameTpl + `-__service__ __port__ 2>&1 >/dev/null && OK=1 || sleep 1
done
echo
echo "Done"
`
madeDeployments = make(map[string]helm.Deployment, 0)
)
// Create a Deployment for a given compose.Service. It returns a list chan
// of HelmFileGenerator which will be used to generate the files (deployment, secrets, configMap...).
func CreateReplicaObject(name string, s types.ServiceConfig, linked map[string]types.ServiceConfig) HelmFileGenerator {
ret := make(chan HelmFile, runtime.NumCPU())
// there is a bug woth typs.ServiceConfig if we use the pointer. So we need to dereference it.
go buildDeployment(name, &s, linked, ret)
return ret
}
// Create a service (k8s).
func generateServicesAndIngresses(name string, s *types.ServiceConfig) []HelmFile {
ret := make([]HelmFile, 0) // can handle helm.Service or helm.Ingress
logger.Magenta(ICON_SERVICE+" Generating service for ", name)
ks := helm.NewService(name)
for _, p := range s.Ports {
target := int(p.Target)
ks.Spec.Ports = append(ks.Spec.Ports, helm.NewServicePort(target, target))
}
ks.Spec.Selector = buildSelector(name, s)
ret = append(ret, ks)
if v, ok := s.Labels[helm.LABEL_INGRESS]; ok {
port, err := strconv.Atoi(v)
if err != nil {
log.Fatalf("The given port \"%v\" as ingress port in \"%s\" service is not an integer\n", v, name)
}
logger.Cyanf(ICON_INGRESS+" Create an ingress for port %d on %s service\n", port, name)
ing := createIngress(name, port, s)
ret = append(ret, ing)
}
if len(s.Expose) > 0 {
logger.Magenta(ICON_SERVICE+" Generating service for ", name+"-external")
ks := helm.NewService(name + "-external")
ks.Spec.Type = "NodePort"
for _, expose := range s.Expose {
p, _ := strconv.Atoi(expose)
ks.Spec.Ports = append(ks.Spec.Ports, helm.NewServicePort(p, p))
}
ks.Spec.Selector = buildSelector(name, s)
ret = append(ret, ks)
}
return ret
}
// Create an ingress.
func createIngress(name string, port int, s *types.ServiceConfig) *helm.Ingress {
ingress := helm.NewIngress(name)
annotations := map[string]string{}
ingressVal := map[string]interface{}{
"class": "nginx",
"host": name + "." + helm.Appname + ".tld",
"enabled": false,
"annotations": annotations,
}
// add Annotations in values
AddValues(name, map[string]EnvVal{"ingress": ingressVal})
ingress.Spec.Rules = []helm.IngressRule{
{
Host: fmt.Sprintf("{{ .Values.%s.ingress.host }}", name),
Http: helm.IngressHttp{
Paths: []helm.IngressPath{{
Path: "/",
PathType: "Prefix",
Backend: &helm.IngressBackend{
Service: helm.IngressService{
Name: helm.ReleaseNameTpl + "-" + name,
Port: map[string]interface{}{
"number": port,
},
},
},
}},
},
},
}
ingress.SetIngressClass(name)
return ingress
}
// Build the selector for the service.
func buildSelector(name string, s *types.ServiceConfig) map[string]string {
return map[string]string{
"katenary.io/component": name,
"katenary.io/release": helm.ReleaseNameTpl,
}
}
// buildConfigMapFromPath generates a ConfigMap from a path.
func buildConfigMapFromPath(name, path string) *helm.ConfigMap {
stat, err := os.Stat(path)
if err != nil {
return nil
}
files := make(map[string]string, 0)
if stat.IsDir() {
found, _ := filepath.Glob(path + "/*")
for _, f := range found {
if s, err := os.Stat(f); err != nil || s.IsDir() {
if err != nil {
fmt.Fprintf(os.Stderr, "An error occured reading volume path %s\n", err.Error())
} else {
logger.ActivateColors = true
logger.Yellowf("Warning, %s is a directory, at this time we only "+
"can create configmap for first level file list\n", f)
logger.ActivateColors = false
}
continue
}
_, filename := filepath.Split(f)
c, _ := ioutil.ReadFile(f)
files[filename] = string(c)
}
} else {
c, _ := ioutil.ReadFile(path)
_, filename := filepath.Split(path)
files[filename] = string(c)
}
cm := helm.NewConfigMap(name, tools.GetRelPath(path))
cm.Data = files
return cm
}
// prepareProbes generate http/tcp/command probes for a service.
func prepareProbes(name string, s *types.ServiceConfig, container *helm.Container) {
// first, check if there a label for the probe
if check, ok := s.Labels[helm.LABEL_HEALTHCHECK]; ok {
check = strings.TrimSpace(check)
p := helm.NewProbeFromService(s)
// get the port of the "url" check
if checkurl, err := url.Parse(check); err == nil {
if err == nil {
container.LivenessProbe = buildProtoProbe(p, checkurl)
}
} else {
// it's a command
container.LivenessProbe = p
container.LivenessProbe.Exec = &helm.Exec{
Command: []string{
"sh",
"-c",
check,
},
}
}
return // label overrides everything
}
// if not, we will use the default one
if s.HealthCheck != nil {
container.LivenessProbe = buildCommandProbe(s)
}
}
// buildProtoProbe builds a probe from a url that can be http or tcp.
func buildProtoProbe(probe *helm.Probe, u *url.URL) *helm.Probe {
port, err := strconv.Atoi(u.Port())
if err != nil {
port = 80
}
path := "/"
if u.Path != "" {
path = u.Path
}
switch u.Scheme {
case "http", "https":
probe.HttpGet = &helm.HttpGet{
Path: path,
Port: port,
}
case "tcp":
probe.TCP = &helm.TCP{
Port: port,
}
default:
logger.Redf("Error while parsing healthcheck url %s\n", u.String())
os.Exit(1)
}
return probe
}
func buildCommandProbe(s *types.ServiceConfig) *helm.Probe {
// Get the first element of the command from ServiceConfig
first := s.HealthCheck.Test[0]
p := helm.NewProbeFromService(s)
switch first {
case "CMD", "CMD-SHELL":
// CMD or CMD-SHELL
p.Exec = &helm.Exec{
Command: s.HealthCheck.Test[1:],
}
return p
default:
// badly made but it should work...
p.Exec = &helm.Exec{
Command: []string(s.HealthCheck.Test),
}
return p
}
}
func setSecretVar(name string, s *types.ServiceConfig, c *helm.Container) *helm.Secret {
// get the list of secret vars
secretvars, ok := s.Labels[helm.LABEL_SECRETVARS]
if !ok {
return nil
}
store := helm.NewSecret(name, "")
for _, secretvar := range strings.Split(secretvars, ",") {
secretvar = strings.TrimSpace(secretvar)
// get the value from env
_, ok := s.Environment[secretvar]
if !ok {
continue
}
// add the secret
store.AddEnv(secretvar, ".Values."+name+".environment."+secretvar)
AddEnvironment(name, secretvar, *s.Environment[secretvar])
// Finally remove the secret var from the environment on the service
// and the helm container definition.
defer func(secretvar string) { // defered because AddEnvironment locks the memory
locker.Lock()
defer locker.Unlock()
for i, env := range c.Env {
if env.Name == secretvar {
c.Env = append(c.Env[:i], c.Env[i+1:]...)
i--
}
}
delete(s.Environment, secretvar)
}(secretvar)
}
return store
}

View File

@@ -1,397 +0,0 @@
package generator
import (
"io/ioutil"
"katenary/compose"
"katenary/helm"
"katenary/logger"
"os"
"path/filepath"
"strings"
"testing"
"github.com/compose-spec/compose-go/cli"
)
const DOCKER_COMPOSE_YML = `version: '3'
services:
# first service, very simple
http:
image: nginx
ports:
- "80:80"
# second service, with environment variables
http2:
image: nginx
environment:
SOME_ENV_VAR: some_value
ANOTHER_ENV_VAR: another_value
# third service with ingress label
web:
image: nginx
ports:
- "80:80"
labels:
katenary.io/ingress: 80
web2:
image: nginx
command: ["/bin/sh", "-c", "while true; do echo hello; sleep 1; done"]
# fourth service is a php service depending on database
php:
image: php:7.2-apache
depends_on:
- database
environment:
SOME_ENV_VAR: some_value
ANOTHER_ENV_VAR: another_value
DB_HOST: database
labels:
katenary.io/mapenv: |
DB_HOST: {{ .Release.Name }}-database
database:
image: mysql:5.7
environment:
MYSQL_ROOT_PASSWORD: root
MYSQL_DATABASE: database
MYSQL_USER: user
MYSQL_PASSWORD: password
volumes:
- data:/var/lib/mysql
labels:
katenary.io/ports: 3306
# try to deploy 2 services but one is in the same pod than the other
http3:
image: nginx
http4:
image: nginx
labels:
katenary.io/same-pod: http3
# unmapped volumes
novol:
image: nginx
volumes:
- /tmp/data
labels:
katenary.io/ports: 80
# use = sign for environment variables
eqenv:
image: nginx
environment:
- SOME_ENV_VAR=some_value
- ANOTHER_ENV_VAR=another_value
# use environment file
useenvfile:
image: nginx
env_file:
- config/env
volumes:
data:
`
var defaultCliFiles = cli.DefaultFileNames
var TMP_DIR = ""
var TMPWORK_DIR = ""
func init() {
logger.NOLOG = len(os.Getenv("NOLOG")) < 1
}
func setUp(t *testing.T) (string, *compose.Parser) {
// cleanup "made" files
helm.ResetMadePVC()
cli.DefaultFileNames = defaultCliFiles
// create a temporary directory
tmp, err := os.MkdirTemp(os.TempDir(), "katenary-test-")
if err != nil {
t.Fatal(err)
}
tmpwork, err := os.MkdirTemp(os.TempDir(), "katenary-test-work-")
if err != nil {
t.Fatal(err)
}
composefile := filepath.Join(tmpwork, "docker-compose.yaml")
p := compose.NewParser([]string{composefile}, DOCKER_COMPOSE_YML)
// create envfile for "useenvfile" service
err = os.Mkdir(filepath.Join(tmpwork, "config"), 0777)
if err != nil {
t.Fatal(err)
}
envfile := filepath.Join(tmpwork, "config", "env")
fp, err := os.Create(envfile)
if err != nil {
t.Fatal("MKFILE", err)
}
fp.WriteString("FILEENV1=some_value\n")
fp.WriteString("FILEENV2=another_value\n")
fp.Close()
TMP_DIR = tmp
TMPWORK_DIR = tmpwork
p.Parse("testapp")
Generate(p, "test-0", "testapp", "1.2.3", "4.5.6", DOCKER_COMPOSE_YML, tmp)
return tmp, p
}
func tearDown() {
if len(TMP_DIR) > 0 {
os.RemoveAll(TMP_DIR)
}
if len(TMPWORK_DIR) > 0 {
os.RemoveAll(TMPWORK_DIR)
}
}
// Check if the web2 service has got a command.
func TestCommand(t *testing.T) {
tmp, p := setUp(t)
defer tearDown()
for _, service := range p.Data.Services {
name := service.Name
if name == "web2" {
// Ensure that the command is correctly set
// The command should be a string array
path := filepath.Join(tmp, "templates", name+".deployment.yaml")
path = filepath.Join(tmp, "templates", name+".deployment.yaml")
fp, _ := os.Open(path)
defer fp.Close()
lines, _ := ioutil.ReadAll(fp)
next := false
commands := make([]string, 0)
for _, line := range strings.Split(string(lines), "\n") {
if strings.Contains(line, "command") {
next = true
continue
}
if next {
commands = append(commands, line)
}
}
ok := 0
for _, command := range commands {
if strings.Contains(command, "- /bin/sh") {
ok++
}
if strings.Contains(command, "- -c") {
ok++
}
if strings.Contains(command, "while true; do") {
ok++
}
}
if ok != 3 {
t.Error("Command is not correctly set")
}
}
}
}
// Check if environment is correctly set.
func TestEnvs(t *testing.T) {
tmp, p := setUp(t)
defer tearDown()
for _, service := range p.Data.Services {
name := service.Name
if name == "php" {
// the "DB_HOST" environment variable inside the template must be set to '{{ .Release.Name }}-database'
path := filepath.Join(tmp, "templates", name+".deployment.yaml")
// read the file and find the DB_HOST variable
matched := false
fp, _ := os.Open(path)
defer fp.Close()
lines, _ := ioutil.ReadAll(fp)
next := false
for _, line := range strings.Split(string(lines), "\n") {
if !next && strings.Contains(line, "name: DB_HOST") {
next = true
continue
} else if next && strings.Contains(line, "value:") {
matched = true
if !strings.Contains(line, "{{ tpl .Values.php.environment.DB_HOST . }}") {
t.Error("DB_HOST variable should be set to {{ tpl .Values.php.environment.DB_HOST . }}", line, string(lines))
}
break
}
}
if !matched {
t.Error("DB_HOST variable not found in ", path)
t.Log(string(lines))
}
}
}
}
// Check if the same pod is not deployed twice.
func TestSamePod(t *testing.T) {
tmp, p := setUp(t)
defer tearDown()
for _, service := range p.Data.Services {
name := service.Name
path := filepath.Join(tmp, "templates", name+".deployment.yaml")
if _, found := service.Labels[helm.LABEL_SAMEPOD]; found {
// fail if the service has a deployment
if _, err := os.Stat(path); err == nil {
t.Error("Service ", name, " should not have a deployment")
}
continue
}
// others should have a deployment file
t.Log("Checking ", name, " deployment file")
_, err := os.Stat(path)
if err != nil {
t.Fatal(err)
}
}
}
// Check if the ports are correctly set.
func TestPorts(t *testing.T) {
tmp, p := setUp(t)
defer tearDown()
for _, service := range p.Data.Services {
name := service.Name
path := ""
// if the service has a port found in helm.LABEL_PORT or ports, so the service file should exist
hasPort := false
if _, found := service.Labels[helm.LABEL_PORT]; found {
hasPort = true
}
if service.Ports != nil {
hasPort = true
}
if hasPort {
path = filepath.Join(tmp, "templates", name+".service.yaml")
t.Log("Checking ", name, " service file")
_, err := os.Stat(path)
if err != nil {
t.Error(err)
}
}
}
}
// Check if the volumes are correctly set.
func TestPVC(t *testing.T) {
tmp, p := setUp(t)
defer tearDown()
for _, service := range p.Data.Services {
name := service.Name
path := filepath.Join(tmp, "templates", name+"-data.pvc.yaml")
// the "database" service should have a pvc file in templates (name-data.pvc.yaml)
if name == "database" {
path = filepath.Join(tmp, "templates", name+"-data.pvc.yaml")
t.Log("Checking ", name, " pvc file")
_, err := os.Stat(path)
if err != nil {
list, _ := filepath.Glob(tmp + "/templates/*")
t.Log(list)
t.Fatal(err)
}
}
}
}
//Check if web service has got a ingress.
func TestIngress(t *testing.T) {
tmp, p := setUp(t)
defer tearDown()
for _, service := range p.Data.Services {
name := service.Name
path := filepath.Join(tmp, "templates", name+".ingress.yaml")
// the "web" service should have a ingress file in templates (name.ingress.yaml)
if name == "web" {
path = filepath.Join(tmp, "templates", name+".ingress.yaml")
t.Log("Checking ", name, " ingress file")
_, err := os.Stat(path)
if err != nil {
t.Fatal(err)
}
}
}
}
// Check unmapped volumes
func TestUnmappedVolumes(t *testing.T) {
tmp, p := setUp(t)
defer tearDown()
for _, service := range p.Data.Services {
name := service.Name
if name == "novol" {
path := filepath.Join(tmp, "templates", name+".deployment.yaml")
fp, _ := os.Open(path)
defer fp.Close()
lines, _ := ioutil.ReadAll(fp)
for _, line := range strings.Split(string(lines), "\n") {
if strings.Contains(line, "novol-data") {
t.Error("novol service should not have a volume")
}
}
}
}
}
// Check if service using equal sign for environment works
func TestEqualSignOnEnv(t *testing.T) {
tmp, p := setUp(t)
defer tearDown()
// if the name is eqenv, the service should habe environment
for _, service := range p.Data.Services {
name := service.Name
if name == "eqenv" {
path := filepath.Join(tmp, "templates", name+".deployment.yaml")
fp, _ := os.Open(path)
defer fp.Close()
lines, _ := ioutil.ReadAll(fp)
match := 0
for _, line := range strings.Split(string(lines), "\n") {
// we must find the line with the environment variable name
if strings.Contains(line, "SOME_ENV_VAR") {
// we must find the line with the environment variable value
match++
}
if strings.Contains(line, "ANOTHER_ENV_VAR") {
// we must find the line with the environment variable value
match++
}
}
if match != 4 { // because the value points on .Values...
t.Error("eqenv service should have 2 environment variables")
t.Log(string(lines))
}
}
}
}

139
generator/rbac.go Normal file
View File

@@ -0,0 +1,139 @@
package generator
import (
"katenary/utils"
"github.com/compose-spec/compose-go/types"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/yaml"
)
var (
_ Yaml = (*RoleBinding)(nil)
_ Yaml = (*Role)(nil)
_ Yaml = (*ServiceAccount)(nil)
)
// RBAC is a kubernetes RBAC containing a role, a rolebinding and an associated serviceaccount.
type RBAC struct {
RoleBinding *RoleBinding
Role *Role
ServiceAccount *ServiceAccount
}
// NewRBAC creates a new RBAC from a compose service. The appName is the name of the application taken from the project name.
func NewRBAC(service types.ServiceConfig, appName string) *RBAC {
role := &Role{
Role: &rbacv1.Role{
TypeMeta: metav1.TypeMeta{
Kind: "Role",
APIVersion: "rbac.authorization.k8s.io/v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: utils.TplName(service.Name, appName),
Labels: GetLabels(service.Name, appName),
Annotations: Annotations,
},
Rules: []rbacv1.PolicyRule{
{
APIGroups: []string{"", "extensions", "apps"},
Resources: []string{"*"},
Verbs: []string{"*"},
},
},
},
service: &service,
}
rolebinding := &RoleBinding{
RoleBinding: &rbacv1.RoleBinding{
TypeMeta: metav1.TypeMeta{
Kind: "RoleBinding",
APIVersion: "rbac.authorization.k8s.io/v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: utils.TplName(service.Name, appName),
Labels: GetLabels(service.Name, appName),
Annotations: Annotations,
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
Name: utils.TplName(service.Name, appName),
Namespace: "{{ .Release.Namespace }}",
},
},
RoleRef: rbacv1.RoleRef{
Kind: "Role",
Name: utils.TplName(service.Name, appName),
APIGroup: "rbac.authorization.k8s.io",
},
},
service: &service,
}
serviceaccount := &ServiceAccount{
ServiceAccount: &corev1.ServiceAccount{
TypeMeta: metav1.TypeMeta{
Kind: "ServiceAccount",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: utils.TplName(service.Name, appName),
Labels: GetLabels(service.Name, appName),
Annotations: Annotations,
},
},
service: &service,
}
return &RBAC{
RoleBinding: rolebinding,
Role: role,
ServiceAccount: serviceaccount,
}
}
// RoleBinding is a kubernetes RoleBinding.
type RoleBinding struct {
*rbacv1.RoleBinding
service *types.ServiceConfig
}
func (r *RoleBinding) Yaml() ([]byte, error) {
return yaml.Marshal(r)
}
func (r *RoleBinding) Filename() string {
return r.service.Name + ".rolebinding.yaml"
}
// Role is a kubernetes Role.
type Role struct {
*rbacv1.Role
service *types.ServiceConfig
}
func (r *Role) Yaml() ([]byte, error) {
return yaml.Marshal(r)
}
func (r *Role) Filename() string {
return r.service.Name + ".role.yaml"
}
// ServiceAccount is a kubernetes ServiceAccount.
type ServiceAccount struct {
*corev1.ServiceAccount
service *types.ServiceConfig
}
func (r *ServiceAccount) Yaml() ([]byte, error) {
return yaml.Marshal(r)
}
func (r *ServiceAccount) Filename() string {
return r.service.Name + ".serviceaccount.yaml"
}

111
generator/secret.go Normal file
View File

@@ -0,0 +1,111 @@
package generator
import (
"encoding/base64"
"fmt"
"katenary/utils"
"strings"
"github.com/compose-spec/compose-go/types"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/yaml"
)
var _ DataMap = (*Secret)(nil)
var _ Yaml = (*Secret)(nil)
// Secret is a kubernetes Secret.
//
// Implements the DataMap interface.
type Secret struct {
*corev1.Secret
service types.ServiceConfig `yaml:"-"`
}
// NewSecret creates a new Secret from a compose service
func NewSecret(service types.ServiceConfig, appName string) *Secret {
secret := &Secret{
service: service,
Secret: &corev1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: utils.TplName(service.Name, appName),
Labels: GetLabels(service.Name, appName),
Annotations: Annotations,
},
Data: make(map[string][]byte),
},
}
// check if the value should be in values.yaml
valueList := []string{}
varDescriptons := utils.GetValuesFromLabel(service, LABEL_VALUES)
for value := range varDescriptons {
valueList = append(valueList, value)
}
// wrap values with quotes
for _, value := range service.Environment {
if value == nil {
continue
}
*value = fmt.Sprintf(`"%s"`, *value)
}
for _, value := range valueList {
if val, ok := service.Environment[value]; ok {
value = strings.TrimPrefix(value, `"`)
*val = `.Values.` + service.Name + `.environment.` + value
}
}
for key, value := range service.Environment {
if value == nil {
continue
}
secret.AddData(key, *value)
}
return secret
}
// SetData sets the data of the secret.
func (s *Secret) SetData(data map[string]string) {
for key, value := range data {
s.AddData(key, fmt.Sprintf("%s", value))
}
}
// AddData adds a key value pair to the secret.
func (s *Secret) AddData(key string, value string) {
if value == "" {
return
}
s.Data[key] = []byte(`{{ tpl ` + value + ` $ | quote | b64enc }}`)
}
// Yaml returns the yaml representation of the secret.
func (s *Secret) Yaml() ([]byte, error) {
y, err := yaml.Marshal(s)
if err != nil {
return nil, err
}
// replace the b64 value by the real value
for _, value := range s.Data {
encoded := base64.StdEncoding.EncodeToString([]byte(value))
y = []byte(strings.ReplaceAll(string(y), encoded, string(value)))
}
return y, nil
}
// Filename returns the filename of the secret.
func (s *Secret) Filename() string {
return s.service.Name + ".secret.yaml"
}

95
generator/service.go Normal file
View File

@@ -0,0 +1,95 @@
package generator
import (
"katenary/utils"
"regexp"
"strings"
"github.com/compose-spec/compose-go/types"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"sigs.k8s.io/yaml"
)
var _ Yaml = (*Service)(nil)
// Service is a kubernetes Service.
type Service struct {
*v1.Service `yaml:",inline"`
service *types.ServiceConfig `yaml:"-"`
}
// NewService creates a new Service from a compose service.
func NewService(service types.ServiceConfig, appName string) *Service {
ports := []v1.ServicePort{}
s := &Service{
service: &service,
Service: &v1.Service{
TypeMeta: metav1.TypeMeta{
Kind: "Service",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: utils.TplName(service.Name, appName),
Labels: GetLabels(service.Name, appName),
Annotations: Annotations,
},
Spec: v1.ServiceSpec{
Selector: GetMatchLabels(service.Name, appName),
Ports: ports,
},
},
}
for _, port := range service.Ports {
s.AddPort(port)
}
return s
}
// AddPort adds a port to the service.
func (s *Service) AddPort(port types.ServicePortConfig, serviceName ...string) {
name := s.service.Name
if len(serviceName) > 0 {
name = serviceName[0]
}
var finalport intstr.IntOrString
if targetPort := utils.GetServiceNameByPort(int(port.Target)); targetPort == "" {
finalport = intstr.FromInt(int(port.Target))
} else {
finalport = intstr.FromString(targetPort)
name = targetPort
}
s.Spec.Ports = append(s.Spec.Ports, v1.ServicePort{
Protocol: v1.ProtocolTCP,
Port: int32(port.Target),
TargetPort: finalport,
Name: name,
})
}
// Yaml returns the yaml representation of the service.
func (s *Service) Yaml() ([]byte, error) {
y, err := yaml.Marshal(s)
lines := []string{}
for _, line := range strings.Split(string(y), "\n") {
if regexp.MustCompile(`^\s*loadBalancer:\s*`).MatchString(line) {
continue
}
lines = append(lines, line)
}
y = []byte(strings.Join(lines, "\n"))
return y, err
}
// Filename returns the filename of the service.
func (s *Service) Filename() string {
return s.service.Name + ".service.yaml"
}

13
generator/types.go Normal file
View File

@@ -0,0 +1,13 @@
package generator
// DataMap is a kubernetes ConfigMap or Secret. It can be used to add data to the ConfigMap or Secret.
type DataMap interface {
SetData(map[string]string)
AddData(string, string)
}
// Yaml is a kubernetes object that can be converted to yaml.
type Yaml interface {
Yaml() ([]byte, error)
Filename() string
}

View File

@@ -1,77 +1,121 @@
package generator
import (
"katenary/helm"
"strings"
"github.com/compose-spec/compose-go/types"
)
var (
// Values is kept in memory to create a values.yaml file.
Values = make(map[string]map[string]interface{})
)
// Values is a map of all values for all services. Written to values.yaml.
// var Values = map[string]any{}
// AddValues adds values to the values.yaml map.
func AddValues(servicename string, values map[string]EnvVal) {
locker.Lock()
defer locker.Unlock()
if _, ok := Values[servicename]; !ok {
Values[servicename] = make(map[string]interface{})
// RepositoryValue is a docker repository image and tag that will be saved in values.yaml.
type RepositoryValue struct {
Image string `yaml:"image"`
Tag string `yaml:"tag"`
}
for k, v := range values {
Values[servicename][k] = v
// PersistenceValue is a persistence configuration that will be saved in values.yaml.
type PersistenceValue struct {
Enabled bool `yaml:"enabled"`
StorageClass string `yaml:"storageClass"`
Size string `yaml:"size"`
AccessMode []string `yaml:"accessMode"`
}
// IngressValue is a ingress configuration that will be saved in values.yaml.
type IngressValue struct {
Enabled bool `yaml:"enabled"`
Host string `yaml:"host"`
Path string `yaml:"path"`
Class string `yaml:"class"`
Annotations map[string]string `yaml:"annotations"`
}
// Value will be saved in values.yaml. It contains configuraiton for all deployment and services.
// The content will be lile:
//
// name_of_component:
// repository:
// image: image_name
// tag: image_tag
// persistence:
// enabled: true
// storageClass: storage_class_name
// ingress:
// enabled: true
// host: host_name
// path: path_name
// environment:
// ENV_VAR_1: value_1
// ENV_VAR_2: value_2
type Value struct {
Repository *RepositoryValue `yaml:"repository,omitempty"`
Persistence map[string]*PersistenceValue `yaml:"persistence,omitempty"`
Ingress *IngressValue `yaml:"ingress,omitempty"`
ImagePullPolicy string `yaml:"imagePullPolicy,omitempty"`
Environment map[string]any `yaml:"environment,omitempty"`
Replicas *uint32 `yaml:"replicas,omitempty"`
CronJob *CronJobValue `yaml:"cronjob,omitempty"`
}
// CronJobValue is a cronjob configuration that will be saved in values.yaml.
type CronJobValue struct {
Repository *RepositoryValue `yaml:"repository,omitempty"`
Environment map[string]any `yaml:"environment,omitempty"`
ImagePullPolicy string `yaml:"imagePullPolicy,omitempty"`
Schedule string `yaml:"schedule"`
}
// NewValue creates a new Value from a compose service.
// The value contains the necessary information to deploy the service (image, tag, replicas, etc.).
//
// If `main` is true, the tag will be empty because
// it will be set in the helm chart appVersion.
func NewValue(service types.ServiceConfig, main ...bool) *Value {
replicas := uint32(1)
v := &Value{
Replicas: &replicas,
}
// find the image tag
tag := ""
split := strings.Split(service.Image, ":")
v.Repository = &RepositoryValue{
Image: split[0],
}
// for main service, the tag should the appVersion. So here we set it to empty.
if len(main) > 0 && !main[0] {
if len(split) > 1 {
tag = split[1]
}
v.Repository.Tag = tag
} else {
v.Repository.Tag = ""
}
return v
}
// AddPersistence adds persistence configuration to the Value.
func (v *Value) AddPersistence(volumeName string) {
if v.Persistence == nil {
v.Persistence = make(map[string]*PersistenceValue, 0)
}
v.Persistence[volumeName] = &PersistenceValue{
Enabled: true,
StorageClass: "-",
Size: "1Gi",
AccessMode: []string{"ReadWriteOnce"},
}
}
func AddEnvironment(servicename string, key string, val EnvVal) {
locker.Lock()
defer locker.Unlock()
if _, ok := Values[servicename]; !ok {
Values[servicename] = make(map[string]interface{})
}
if _, ok := Values[servicename]["environment"]; !ok {
Values[servicename]["environment"] = make(map[string]EnvVal)
}
Values[servicename]["environment"].(map[string]EnvVal)[key] = val
}
// setEnvToValues will set the environment variables to the values.yaml map.
func setEnvToValues(name string, s *types.ServiceConfig, c *helm.Container) {
// crete the "environment" key
env := make(map[string]EnvVal)
for k, v := range s.Environment {
env[k] = v
}
if len(env) == 0 {
return
}
for k, v := range env {
k = strings.ReplaceAll(k, ".", "_")
AddEnvironment(name, k, v)
}
//AddValues(name, map[string]EnvVal{"environment": valuesEnv})
for k := range env {
fixedK := strings.ReplaceAll(k, ".", "_")
v := "{{ tpl .Values." + name + ".environment." + fixedK + " . }}"
s.Environment[k] = &v
touched := false
for _, c := range c.Env {
if c.Name == k {
c.Value = v
touched = true
}
}
if !touched {
c.Env = append(c.Env, &helm.Value{Name: k, Value: v})
}
func (v *Value) AddIngress(host, path string) {
v.Ingress = &IngressValue{
Enabled: true,
Host: host,
Path: path,
Class: "-",
}
}

4
generator/version.go Normal file
View File

@@ -0,0 +1,4 @@
package generator
// Version is the version of katenary. It is set at compile time.
var Version = "master" // changed at compile time

119
generator/volume.go Normal file
View File

@@ -0,0 +1,119 @@
package generator
import (
"katenary/utils"
"strings"
"github.com/compose-spec/compose-go/types"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/yaml"
)
var _ Yaml = (*VolumeClaim)(nil)
// VolumeClaim is a kubernetes VolumeClaim. This is a PersistentVolumeClaim.
type VolumeClaim struct {
*v1.PersistentVolumeClaim
service *types.ServiceConfig `yaml:"-"`
volumeName string
nameOverride string
}
// NewVolumeClaim creates a new VolumeClaim from a compose service.
func NewVolumeClaim(service types.ServiceConfig, volumeName, appName string) *VolumeClaim {
return &VolumeClaim{
volumeName: volumeName,
service: &service,
PersistentVolumeClaim: &v1.PersistentVolumeClaim{
TypeMeta: metav1.TypeMeta{
Kind: "PersistentVolumeClaim",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: utils.TplName(service.Name, appName) + "-" + volumeName,
Labels: GetLabels(service.Name, appName),
Annotations: Annotations,
},
Spec: v1.PersistentVolumeClaimSpec{
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
},
StorageClassName: utils.StrPtr(`{{ .Values.` + service.Name + `.persistence.` + volumeName + `.storageClass }}`),
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceStorage: resource.MustParse("1Gi"),
},
},
},
},
}
}
// Yaml marshals a VolumeClaim into yaml.
func (v *VolumeClaim) Yaml() ([]byte, error) {
serviceName := v.service.Name
if v.nameOverride != "" {
serviceName = v.nameOverride
}
volumeName := v.volumeName
out, err := yaml.Marshal(v)
if err != nil {
return nil, err
}
// replace 1Gi to {{ .Values.serviceName.volume.size }}
out = []byte(
strings.Replace(
string(out),
"1Gi",
utils.TplValue(serviceName, "persistence."+volumeName+".size"),
1,
),
)
out = []byte(
strings.Replace(
string(out),
"- ReadWriteOnce",
"{{- .Values."+
serviceName+
".persistence."+
volumeName+
".accessMode | toYaml | nindent __indent__ }}",
1,
),
)
lines := strings.Split(string(out), "\n")
for i, line := range lines {
if strings.Contains(line, "storageClass") {
lines[i] = utils.Wrap(
line,
"{{- if ne .Values."+serviceName+".persistence."+volumeName+".storageClass \"-\" }}",
"{{- end }}",
)
}
}
out = []byte(strings.Join(lines, "\n"))
// add condition
out = []byte(
"{{- if .Values." +
serviceName +
".persistence." +
volumeName +
".enabled }}\n" +
string(out) +
"\n{{- end }}",
)
return out, nil
}
// Filename returns the suggested filename for a VolumeClaim.
func (v *VolumeClaim) Filename() string {
return v.service.Name + "." + v.volumeName + ".volumeclaim.yaml"
}

View File

@@ -1,236 +0,0 @@
package generator
import (
"katenary/helm"
"katenary/logger"
"katenary/tools"
"os"
"path/filepath"
"strings"
"github.com/compose-spec/compose-go/types"
"gopkg.in/yaml.v3"
)
var (
// VolumeValues is the map of volumes for each deployment
// containing volume configuration
VolumeValues = make(map[string]map[string]map[string]EnvVal)
)
// AddVolumeValues add a volume to the values.yaml map for the given deployment name.
func AddVolumeValues(deployment string, volname string, values map[string]EnvVal) {
locker.Lock()
defer locker.Unlock()
if _, ok := VolumeValues[deployment]; !ok {
VolumeValues[deployment] = make(map[string]map[string]EnvVal)
}
VolumeValues[deployment][volname] = values
}
// addVolumeFrom takes the LABEL_VOLUMEFROM to get volumes from another container. This can only work with
// container that has got LABEL_SAMEPOD as we need to get the volumes from another container in the same deployment.
func addVolumeFrom(deployment *helm.Deployment, container *helm.Container, s *types.ServiceConfig) {
labelfrom, ok := s.Labels[helm.LABEL_VOLUMEFROM]
if !ok {
return
}
// decode Yaml from the label
var volumesFrom map[string]map[string]string
err := yaml.Unmarshal([]byte(labelfrom), &volumesFrom)
if err != nil {
logger.ActivateColors = true
logger.Red(err.Error())
logger.ActivateColors = false
return
}
// for each declared volume "from", we will find it from the deployment volumes and add it to the container.
// Then, to avoid duplicates, we will remove it from the ServiceConfig object.
for name, volumes := range volumesFrom {
for volumeName := range volumes {
initianame := volumeName
volumeName = tools.PathToName(volumeName)
// get the volume from the deployment container "name"
var ctn *helm.Container
for _, c := range deployment.Spec.Template.Spec.Containers {
if c.Name == name {
ctn = c
break
}
}
if ctn == nil {
logger.ActivateColors = true
logger.Redf("VolumeFrom: container %s not found", name)
logger.ActivateColors = false
continue
}
// get the volume from the container
for _, v := range ctn.VolumeMounts {
switch v := v.(type) {
case map[string]interface{}:
if v["name"] == volumeName {
if container.VolumeMounts == nil {
container.VolumeMounts = make([]interface{}, 0)
}
// make a copy of the volume mount and then add it to the VolumeMounts
var mountpoint = make(map[string]interface{})
for k, v := range v {
mountpoint[k] = v
}
container.VolumeMounts = append(container.VolumeMounts, mountpoint)
// remove the volume from the ServiceConfig
for i, vol := range s.Volumes {
if vol.Source == initianame {
s.Volumes = append(s.Volumes[:i], s.Volumes[i+1:]...)
i--
break
}
}
}
}
}
}
}
}
// prepareVolumes add the volumes of a service.
func prepareVolumes(
deployment, name string,
s *types.ServiceConfig,
container *helm.Container,
fileGeneratorChan HelmFileGenerator) []map[string]interface{} {
volumes := make([]map[string]interface{}, 0)
mountPoints := make([]interface{}, 0)
configMapsVolumes := make([]string, 0)
if v, ok := s.Labels[helm.LABEL_VOL_CM]; ok {
configMapsVolumes = strings.Split(v, ",")
for i, cm := range configMapsVolumes {
configMapsVolumes[i] = strings.TrimSpace(cm)
}
}
for _, vol := range s.Volumes {
volname := vol.Source
volepath := vol.Target
if volname == "" {
logger.ActivateColors = true
logger.Yellowf("Warning, volume source to %s is empty for %s -- skipping\n", volepath, name)
logger.ActivateColors = false
continue
}
isConfigMap := false
for _, cmVol := range configMapsVolumes {
if tools.GetRelPath(volname) == cmVol {
isConfigMap = true
break
}
}
// local volume cannt be mounted
if !isConfigMap && (strings.HasPrefix(volname, ".") || strings.HasPrefix(volname, "/")) {
logger.ActivateColors = true
logger.Redf("You cannot, at this time, have local volume in %s deployment\n", name)
logger.ActivateColors = false
continue
}
if isConfigMap {
// check if the volname path points on a file, if so, we need to add subvolume to the interface
stat, err := os.Stat(volname)
if err != nil {
logger.ActivateColors = true
logger.Redf("An error occured reading volume path %s\n", err.Error())
logger.ActivateColors = false
continue
}
pointToFile := ""
if !stat.IsDir() {
pointToFile = filepath.Base(volname)
}
// the volume is a path and it's explicitally asked to be a configmap in labels
cm := buildConfigMapFromPath(name, volname)
cm.K8sBase.Metadata.Name = helm.ReleaseNameTpl + "-" + name + "-" + tools.PathToName(volname)
// build a configmapRef for this volume
volname := tools.PathToName(volname)
volumes = append(volumes, map[string]interface{}{
"name": volname,
"configMap": map[string]string{
"name": cm.K8sBase.Metadata.Name,
},
})
if len(pointToFile) > 0 {
mountPoints = append(mountPoints, map[string]interface{}{
"name": volname,
"mountPath": volepath,
"subPath": pointToFile,
})
} else {
mountPoints = append(mountPoints, map[string]interface{}{
"name": volname,
"mountPath": volepath,
})
}
if cm != nil {
fileGeneratorChan <- cm
}
} else {
// It's a Volume. Mount this from PVC to declare.
volname = strings.ReplaceAll(volname, "-", "")
isEmptyDir := false
for _, v := range EmptyDirs {
v = strings.ReplaceAll(v, "-", "")
if v == volname {
volumes = append(volumes, map[string]interface{}{
"name": volname,
"emptyDir": map[string]string{},
})
mountPoints = append(mountPoints, map[string]interface{}{
"name": volname,
"mountPath": volepath,
})
container.VolumeMounts = append(container.VolumeMounts, mountPoints...)
isEmptyDir = true
break
}
}
if isEmptyDir {
continue
}
volumes = append(volumes, map[string]interface{}{
"name": volname,
"persistentVolumeClaim": map[string]string{
"claimName": helm.ReleaseNameTpl + "-" + volname,
},
})
mountPoints = append(mountPoints, map[string]interface{}{
"name": volname,
"mountPath": volepath,
})
logger.Yellow(ICON_STORE+" Generate volume values", volname, "for container named", name, "in deployment", deployment)
AddVolumeValues(deployment, volname, map[string]EnvVal{
"enabled": false,
"capacity": "1Gi",
})
if pvc := helm.NewPVC(deployment, volname); pvc != nil {
fileGeneratorChan <- pvc
}
}
}
// add the volume in the container and return the volume definition to add in Deployment
container.VolumeMounts = append(container.VolumeMounts, mountPoints...)
return volumes
}

View File

@@ -1,236 +0,0 @@
package generator
import (
"katenary/compose"
"katenary/generator/writers"
"katenary/helm"
"katenary/tools"
"log"
"os"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
"github.com/compose-spec/compose-go/types"
"gopkg.in/yaml.v3"
)
// HelmFile represents a helm file from helm package that has got some necessary methods
// to generate a helm file.
type HelmFile interface {
GetType() string
GetPathRessource() string
}
// HelmFileGenerator is a chanel of HelmFile.
type HelmFileGenerator chan HelmFile
var PrefixRE = regexp.MustCompile(`\{\{.*\}\}-?`)
func portExists(port int, ports []types.ServicePortConfig) bool {
for _, p := range ports {
if p.Target == uint32(port) {
log.Println("portExists:", port, p.Target)
return true
}
}
return false
}
// Generate get a parsed compose file, and generate the helm files.
func Generate(p *compose.Parser, katernayVersion, appName, appVersion, chartVersion, composeFile, dirName string) {
// make the appname global (yes... ugly but easy)
helm.Appname = appName
helm.Version = katernayVersion
templatesDir := filepath.Join(dirName, "templates")
// try to create the directory
err := os.MkdirAll(templatesDir, 0755)
if err != nil {
log.Fatal(err)
}
generators := make(map[string]HelmFileGenerator)
// remove skipped services from the parsed data
for i, service := range p.Data.Services {
if v, ok := service.Labels[helm.LABEL_IGNORE]; !ok || v != "true" {
continue
}
p.Data.Services = append(p.Data.Services[:i], p.Data.Services[i+1:]...)
i--
// find this service in others as "depends_on" and remove it
for _, service2 := range p.Data.Services {
delete(service2.DependsOn, service.Name)
}
}
for i, service := range p.Data.Services {
n := service.Name
// if the service port is declared in labels, add it to the service.
if ports, ok := service.Labels[helm.LABEL_PORT]; ok {
if service.Ports == nil {
service.Ports = make([]types.ServicePortConfig, 0)
}
for _, port := range strings.Split(ports, ",") {
port = strings.TrimSpace(port)
target, err := strconv.Atoi(port)
if err != nil {
log.Fatal(err)
}
if portExists(target, service.Ports) {
continue
}
service.Ports = append(service.Ports, types.ServicePortConfig{
Target: uint32(target),
})
}
}
// find port and store it in servicesMap
for _, port := range service.Ports {
target := int(port.Target)
if target != 0 {
servicesMap[n] = target
break
}
}
// manage emptyDir volumes
if empty, ok := service.Labels[helm.LABEL_EMPTYDIRS]; ok {
//split empty list by coma
emptyDirs := strings.Split(empty, ",")
for i, emptyDir := range emptyDirs {
emptyDirs[i] = strings.TrimSpace(emptyDir)
}
//append them in EmptyDirs
EmptyDirs = append(EmptyDirs, emptyDirs...)
}
p.Data.Services[i] = service
}
// for all services in linked map, and not in samePods map, generate the service
for _, s := range p.Data.Services {
name := s.Name
// do not make a deployment for services declared to be in the same pod than another
if _, ok := s.Labels[helm.LABEL_SAMEPOD]; ok {
continue
}
// find services that is in the same pod
linked := make(map[string]types.ServiceConfig, 0)
for _, service := range p.Data.Services {
n := service.Name
if linkname, ok := service.Labels[helm.LABEL_SAMEPOD]; ok && linkname == name {
linked[n] = service
delete(s.DependsOn, n)
}
}
generators[name] = CreateReplicaObject(name, s, linked)
}
// to generate notes, we need to keep an Ingresses list
ingresses := make(map[string]*helm.Ingress)
for n, generator := range generators { // generators is a map : name -> generator
for helmFile := range generator { // generator is a chan
if helmFile == nil { // generator finished
break
}
kind := helmFile.(helm.Kinded).Get()
kind = strings.ToLower(kind)
// Add a SHA inside the generated file, it's only
// to make it easy to check it the compose file corresponds to the
// generated helm chart
helmFile.(helm.Signable).BuildSHA(composeFile)
// Some types need special fixes in yaml generation
switch c := helmFile.(type) {
case *helm.Storage:
// For storage, we need to add a "condition" to activate it
writers.BuildStorage(c, n, templatesDir)
case *helm.Deployment:
// for the deployment, we need to fix persitence volumes
// to be activated only when the storage is "enabled",
// either we use an "emptyDir"
writers.BuildDeployment(c, n, templatesDir)
case *helm.Service:
// Change the type for service if it's an "exposed" port
writers.BuildService(c, n, templatesDir)
case *helm.Ingress:
// we need to make ingresses "activable" from values
ingresses[n] = c // keep it to generate notes
writers.BuildIngress(c, n, templatesDir)
case *helm.ConfigMap, *helm.Secret:
// there could be several files, so let's force the filename
name := c.(helm.Named).Name() + "." + c.GetType()
suffix := c.GetPathRessource()
suffix = tools.PathToName(suffix)
name += suffix
name = PrefixRE.ReplaceAllString(name, "")
writers.BuildConfigMap(c, kind, n, name, templatesDir)
default:
name := c.(helm.Named).Name() + "." + c.GetType()
name = PrefixRE.ReplaceAllString(name, "")
fname := filepath.Join(templatesDir, name+".yaml")
fp, err := os.Create(fname)
if err != nil {
log.Fatal(err)
}
defer fp.Close()
enc := yaml.NewEncoder(fp)
enc.SetIndent(writers.IndentSize)
enc.Encode(c)
}
}
}
// Create the values.yaml file
valueFile, err := os.Create(filepath.Join(dirName, "values.yaml"))
if err != nil {
log.Fatal(err)
}
defer valueFile.Close()
enc := yaml.NewEncoder(valueFile)
enc.SetIndent(writers.IndentSize)
enc.Encode(Values)
// Create tht Chart.yaml file
chartFile, err := os.Create(filepath.Join(dirName, "Chart.yaml"))
if err != nil {
log.Fatal(err)
}
defer chartFile.Close()
chartFile.WriteString(`# Create on ` + time.Now().Format(time.RFC3339) + "\n")
chartFile.WriteString(`# Katenary command line: ` + strings.Join(os.Args, " ") + "\n")
enc = yaml.NewEncoder(chartFile)
enc.SetIndent(writers.IndentSize)
enc.Encode(map[string]interface{}{
"apiVersion": "v2",
"name": appName,
"description": "A helm chart for " + appName,
"type": "application",
"version": chartVersion,
"appVersion": appVersion,
})
// And finally, create a NOTE.txt file
noteFile, err := os.Create(filepath.Join(templatesDir, "NOTES.txt"))
if err != nil {
log.Fatal(err)
}
defer noteFile.Close()
noteFile.WriteString(helm.GenerateNotesFile(ingresses))
}

View File

@@ -1,18 +0,0 @@
package writers
import (
"os"
"path/filepath"
"gopkg.in/yaml.v3"
)
// BuildConfigMap writes the configMap.
func BuildConfigMap(c interface{}, kind, servicename, name, templatesDir string) {
fname := filepath.Join(templatesDir, name+"."+kind+".yaml")
fp, _ := os.Create(fname)
enc := yaml.NewEncoder(fp)
enc.SetIndent(IndentSize)
enc.Encode(c)
fp.Close()
}

View File

@@ -1,44 +0,0 @@
package writers
import (
"bytes"
"katenary/helm"
"os"
"path/filepath"
"strings"
"gopkg.in/yaml.v3"
)
// BuildDeployment builds a deployment.
func BuildDeployment(deployment *helm.Deployment, name, templatesDir string) {
kind := "deployment"
fname := filepath.Join(templatesDir, name+"."+kind+".yaml")
fp, _ := os.Create(fname)
buffer := bytes.NewBuffer(nil)
enc := yaml.NewEncoder(buffer)
enc.SetIndent(IndentSize)
enc.Encode(deployment)
_content := string(buffer.Bytes())
content := strings.Split(string(_content), "\n")
dataname := ""
component := deployment.Spec.Selector["matchLabels"].(map[string]string)[helm.K+"/component"]
n := 0 // will be count of lines only on "persistentVolumeClaim" line, to indent "else" and "end" at the right place
for _, line := range content {
if strings.Contains(line, "name:") {
dataname = strings.Split(line, ":")[1]
dataname = strings.TrimSpace(dataname)
} else if strings.Contains(line, "persistentVolumeClaim") {
n = CountSpaces(line)
line = strings.Repeat(" ", n) + "{{- if .Values." + component + ".persistence." + dataname + ".enabled }}\n" + line
} else if strings.Contains(line, "claimName") {
spaces := strings.Repeat(" ", n)
line += "\n" + spaces + "{{ else }}"
line += "\n" + spaces + "emptyDir: {}"
line += "\n" + spaces + "{{- end }}"
}
fp.WriteString(line + "\n")
}
fp.Close()
}

View File

@@ -1,101 +0,0 @@
package writers
import (
"bytes"
"katenary/helm"
"os"
"path/filepath"
"strings"
"gopkg.in/yaml.v3"
)
const (
classAndVersionCondition = `{{- if and .Values.__name__.ingress.class (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }}` + "\n"
versionCondition118 = `{{- if semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion }}` + "\n"
versionCondition119 = `{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion }}` + "\n"
apiVersion = `{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}}
apiVersion: networking.k8s.io/v1
{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
apiVersion: networking.k8s.io/v1beta1
{{- else -}}
apiVersion: extensions/v1beta1
{{- end }}`
)
// BuildIngress generates the ingress yaml file with conditions.
func BuildIngress(ingress *helm.Ingress, name, templatesDir string) {
// Set the backend for 1.18
for _, b := range ingress.Spec.Rules {
for _, p := range b.Http.Paths {
p.Backend.ServiceName = p.Backend.Service.Name
if n, ok := p.Backend.Service.Port["number"]; ok {
p.Backend.ServicePort = n
}
}
}
kind := "ingress"
buffer := bytes.NewBuffer(nil)
fname := filepath.Join(templatesDir, name+"."+kind+".yaml")
enc := yaml.NewEncoder(buffer)
enc.SetIndent(IndentSize)
buffer.WriteString("{{- if .Values." + name + ".ingress.enabled -}}\n")
enc.Encode(ingress)
buffer.WriteString("{{- end -}}")
fp, err := os.Create(fname)
if err != nil {
panic(err)
}
defer fp.Close()
content := string(buffer.Bytes())
lines := strings.Split(content, "\n")
backendHit := false
for _, l := range lines {
// apiVersion is a pain...
if strings.Contains(l, "apiVersion:") {
l = apiVersion
}
// add annotations linked to the Values
if strings.Contains(l, "annotations:") {
n := CountSpaces(l) + IndentSize
l += "\n" + strings.Repeat(" ", n) + "{{- range $k, $v := .Values.__name__.ingress.annotations }}\n"
l += strings.Repeat(" ", n) + "{{ $k }}: {{ $v }}\n"
l += strings.Repeat(" ", n) + "{{- end }}"
l = strings.ReplaceAll(l, "__name__", name)
}
// pathTyype is ony for 1.19+
if strings.Contains(l, "pathType:") {
n := CountSpaces(l)
l = strings.Repeat(" ", n) + versionCondition118 +
l + "\n" +
strings.Repeat(" ", n) + "{{- end }}"
}
if strings.Contains(l, "ingressClassName") {
// should be set only if the version of Kubernetes is 1.18-0 or higher
cond := strings.ReplaceAll(classAndVersionCondition, "__name__", name)
l = ` ` + cond + l + "\n" + ` {{- end }}`
}
// manage the backend format following the Kubernetes 1.19-0 version or higher
if strings.Contains(l, "service:") {
n := CountSpaces(l)
l = strings.Repeat(" ", n) + versionCondition119 + l
}
if strings.Contains(l, "serviceName:") || strings.Contains(l, "servicePort:") {
n := CountSpaces(l)
if !backendHit {
l = strings.Repeat(" ", n) + "{{- else }}\n" + l
} else {
l = l + "\n" + strings.Repeat(" ", n) + "{{- end }}\n"
}
backendHit = true
}
fp.WriteString(l + "\n")
}
}

View File

@@ -1,24 +0,0 @@
package writers
import (
"katenary/helm"
"os"
"path/filepath"
"gopkg.in/yaml.v3"
)
// BuildService writes the service (external or not).
func BuildService(service *helm.Service, name, templatesDir string) {
kind := "service"
suffix := ""
if service.Spec.Type == "NodePort" {
suffix = "-external"
}
fname := filepath.Join(templatesDir, name+suffix+"."+kind+".yaml")
fp, _ := os.Create(fname)
enc := yaml.NewEncoder(fp)
enc.SetIndent(IndentSize)
enc.Encode(service)
fp.Close()
}

View File

@@ -1,32 +0,0 @@
package writers
import (
"katenary/helm"
"log"
"os"
"path/filepath"
"gopkg.in/yaml.v3"
)
// BuildStorage writes the persistentVolumeClaim.
func BuildStorage(storage *helm.Storage, name, templatesDir string) {
kind := "pvc"
name = storage.Metadata.Labels[helm.K+"/component"]
pvcname := storage.Metadata.Labels[helm.K+"/pvc-name"]
fname := filepath.Join(templatesDir, name+"-"+pvcname+"."+kind+".yaml")
fp, err := os.Create(fname)
if err != nil {
log.Fatal(err)
}
defer fp.Close()
volname := storage.K8sBase.Metadata.Labels[helm.K+"/pvc-name"]
fp.WriteString("{{ if .Values." + name + ".persistence." + volname + ".enabled }}\n")
enc := yaml.NewEncoder(fp)
enc.SetIndent(IndentSize)
if err := enc.Encode(storage); err != nil {
log.Fatal(err)
}
fp.WriteString("{{- end -}}")
}

Some files were not shown because too many files have changed in this diff Show More