Merge pull request #3723 from Budibase/infra/consolidate-k8s-config

Infra/consolidate k8s config
This commit is contained in:
Martin McKeaveney 2021-12-08 17:36:13 +00:00 committed by GitHub
commit 42c234cb91
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
38 changed files with 1102 additions and 1000 deletions

View File

@ -43,15 +43,20 @@ jobs:
DOCKER_PASSWORD: ${{ secrets.DOCKER_API_KEY }} DOCKER_PASSWORD: ${{ secrets.DOCKER_API_KEY }}
BUDIBASE_RELEASE_VERSION: ${{ steps.previoustag.outputs.tag }} BUDIBASE_RELEASE_VERSION: ${{ steps.previoustag.outputs.tag }}
- name: Configure Git
run: |
git config user.name "Budibase Production Bot"
git config user.email "<>"
- uses: azure/setup-helm@v1 - uses: azure/setup-helm@v1
id: install id: install
# So, we need to inject the values into this
- run: yarn release:helm - run: yarn release:helm
- name: Run chart-releaser - name: Run chart-releaser
uses: helm/chart-releaser-action@v1.1.0 uses: helm/chart-releaser-action@v1.1.0
with: with:
charts_dir: docs charts_dir: docs
branch: helm-repo
env: env:
CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}" CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}"

View File

@ -56,3 +56,5 @@ jobs:
DOCKER_USER: ${{ secrets.DOCKER_USERNAME }} DOCKER_USER: ${{ secrets.DOCKER_USERNAME }}
DOCKER_PASSWORD: ${{ secrets.DOCKER_API_KEY }} DOCKER_PASSWORD: ${{ secrets.DOCKER_API_KEY }}
BUDIBASE_RELEASE_VERSION: ${{ steps.previoustag.outputs.tag }} BUDIBASE_RELEASE_VERSION: ${{ steps.previoustag.outputs.tag }}
# Release to pre-prod environment

View File

@ -1,9 +1,35 @@
apiVersion: v1 apiVersion: v1
entries: entries:
budibase: budibase:
- apiVersion: v2
appVersion: 1.0.6
created: "2021-12-08T16:26:47.061065Z"
dependencies:
- condition: services.couchdb.enabled
name: couchdb
repository: https://apache.github.io/couchdb-helm
version: 3.3.4
- condition: ingress.nginx
name: ingress-nginx
repository: https://github.com/kubernetes/ingress-nginx
version: 3.35.0
description: Budibase is an open source low-code platform, helping thousands of teams build apps for their workplace in minutes.
digest: 9e3d5b600368a4fd65ba827986c943b65d7ffae6544b3fda0418e760866e8929
keywords:
- low-code
- database
- cluster
name: budibase
sources:
- https://github.com/Budibase/budibase
- https://budibase.com
type: application
urls:
- https://budibase.github.io/budibase/budibase-0.2.4.tgz
version: 0.2.4
- apiVersion: v2 - apiVersion: v2
appVersion: 0.9.169 appVersion: 0.9.169
created: "2021-10-20T14:27:23.521358+01:00" created: "2021-12-08T16:26:47.055284Z"
dependencies: dependencies:
- condition: services.couchdb.enabled - condition: services.couchdb.enabled
name: couchdb name: couchdb
@ -29,7 +55,7 @@ entries:
version: 0.2.2 version: 0.2.2
- apiVersion: v2 - apiVersion: v2
appVersion: 0.9.163 appVersion: 0.9.163
created: "2021-10-20T14:27:23.5153+01:00" created: "2021-12-08T16:26:47.051008Z"
dependencies: dependencies:
- condition: services.couchdb.enabled - condition: services.couchdb.enabled
name: couchdb name: couchdb
@ -55,7 +81,7 @@ entries:
version: 0.2.1 version: 0.2.1
- apiVersion: v2 - apiVersion: v2
appVersion: 0.9.163 appVersion: 0.9.163
created: "2021-10-20T14:27:23.510041+01:00" created: "2021-12-08T16:26:47.046825Z"
dependencies: dependencies:
- condition: services.couchdb.enabled - condition: services.couchdb.enabled
name: couchdb name: couchdb
@ -81,7 +107,7 @@ entries:
version: 0.2.0 version: 0.2.0
- apiVersion: v2 - apiVersion: v2
appVersion: 0.9.56 appVersion: 0.9.56
created: "2021-10-20T14:27:23.504543+01:00" created: "2021-12-08T16:26:47.042113Z"
dependencies: dependencies:
- condition: services.couchdb.enabled - condition: services.couchdb.enabled
name: couchdb name: couchdb
@ -106,7 +132,7 @@ entries:
version: 0.1.1 version: 0.1.1
- apiVersion: v2 - apiVersion: v2
appVersion: 0.9.56 appVersion: 0.9.56
created: "2021-10-20T14:27:23.496847+01:00" created: "2021-12-08T16:26:47.036016Z"
dependencies: dependencies:
- condition: services.couchdb.enabled - condition: services.couchdb.enabled
name: couchdb name: couchdb
@ -129,4 +155,4 @@ entries:
urls: urls:
- https://budibase.github.io/budibase/budibase-0.1.0.tgz - https://budibase.github.io/budibase/budibase-0.1.0.tgz
version: 0.1.0 version: 0.1.0
generated: "2021-10-20T14:27:23.491132+01:00" generated: "2021-12-08T16:26:47.031998Z"

View File

@ -1 +0,0 @@
hosting.properties

View File

@ -0,0 +1,19 @@
# Budibase DigitalOcean One Click
You will find in this directory configuration for packaging and creating a snapshot for the Budibase 1 click Digitalocean build. We use this configuration to have an immutable and reproducible build package for Digitalocean, that rarely needs updated.
## Prerequisites
You must install Hashicorps `packer` to build the snapshot for digitalocean. Follow the instructions to install packer [here](https://learn.hashicorp.com/tutorials/packer/get-started-install-cli)
You must have the `DIGITALOCEAN_TOKEN` environment variable set, so that packer can reach out to the digitalocean API for build information.
## Building
Just run the following command:
```
yarn build:digitalocean
```
## Uploading to Marketplace
You can upload the snapshot to the Digitalocean vendor portal at the following link (Requires vendor account):
https://marketplace.digitalocean.com/vendorportal

2
hosting/digitalocean/build.sh Executable file
View File

@ -0,0 +1,2 @@
#!/bin/bash
packer build template.json

View File

@ -0,0 +1,19 @@
#!/bin/sh
#
# Configured as part of the DigitalOcean 1-Click Image build process
myip=$(hostname -I | awk '{print$1}')
cat <<EOF
********************************************************************************
Welcome to the Budibase DigitalOcean 1-Click Droplet.
To keep this Droplet secure, the UFW firewall is enabled.
All ports are BLOCKED except 22 (SSH), 80 (HTTP), 443 (HTTPS), and 10000
* Budibase website: http://budibase.com
For help and more information, visit https://docs.budibase.com/self-hosting/hosting-methods/digitalocean
********************************************************************************
To delete this message of the day: rm -rf $(readlink -f ${0})
EOF

View File

@ -0,0 +1,22 @@
#!/bin/bash
# go into the app dir
cd /root
# fetch envoy and docker-compose files
wget https://raw.githubusercontent.com/Budibase/budibase/master/hosting/docker-compose.yaml
wget https://raw.githubusercontent.com/Budibase/budibase/master/hosting/envoy.yaml
wget https://raw.githubusercontent.com/Budibase/budibase/master/hosting/hosting.properties
# Create .env file from hosting.properties using bash and then remove it
while read line; do
uuid=$(uuidgen)
echo $line | sed "s/budibase/$uuid/g" | sed "s/testsecret/$uuid/g" >> .env
done <hosting.properties
rm hosting.properties
# boot the stack
docker-compose up -d
# return
cd -

View File

@ -0,0 +1,49 @@
#!/bin/bash
# DigitalOcean Marketplace Image Validation Tool
# © 2021 DigitalOcean LLC.
# This code is licensed under Apache 2.0 license (see LICENSE.md for details)
set -o errexit
# Ensure /tmp exists and has the proper permissions before
# checking for security updates
# https://github.com/digitalocean/marketplace-partners/issues/94
if [[ ! -d /tmp ]]; then
mkdir /tmp
fi
chmod 1777 /tmp
if [ -n "$(command -v yum)" ]; then
yum update -y
yum clean all
elif [ -n "$(command -v apt-get)" ]; then
export DEBIAN_FRONTEND=noninteractive
apt-get -y update
apt-get -o Dpkg::Options::="--force-confold" upgrade -q -y --force-yes
apt-get -y autoremove
apt-get -y autoclean
fi
rm -rf /tmp/* /var/tmp/*
history -c
cat /dev/null > /root/.bash_history
unset HISTFILE
find /var/log -mtime -1 -type f -exec truncate -s 0 {} \;
rm -rf /var/log/*.gz /var/log/*.[0-9] /var/log/*-????????
rm -rf /var/lib/cloud/instances/*
rm -f /root/.ssh/authorized_keys /etc/ssh/*key*
touch /etc/ssh/revoked_keys
chmod 600 /etc/ssh/revoked_keys
# Securely erase the unused portion of the filesystem
GREEN='\033[0;32m'
NC='\033[0m'
printf "\n${GREEN}Writing zeros to the remaining disk space to securely
erase the unused portion of the file system.
Depending on your disk size this may take several minutes.
The secure erase will complete successfully when you see:${NC}
dd: writing to '/zerofile': No space left on device\n
Beginning secure erase now\n"
dd if=/dev/zero of=/zerofile bs=4096 || rm /zerofile

View File

@ -0,0 +1,617 @@
#!/bin/bash
# DigitalOcean Marketplace Image Validation Tool
# © 2021 DigitalOcean LLC.
# This code is licensed under Apache 2.0 license (see LICENSE.md for details)
VERSION="v. 1.6"
RUNDATE=$( date )
# Script should be run with SUDO
if [ "$EUID" -ne 0 ]
then echo "[Error] - This script must be run with sudo or as the root user."
exit 1
fi
STATUS=0
PASS=0
WARN=0
FAIL=0
# $1 == command to check for
# returns: 0 == true, 1 == false
cmdExists() {
if command -v "$1" > /dev/null 2>&1; then
return 0
else
return 1
fi
}
function getDistro {
if [ -f /etc/os-release ]; then
# freedesktop.org and systemd
. /etc/os-release
OS=$NAME
VER=$VERSION_ID
elif type lsb_release >/dev/null 2>&1; then
# linuxbase.org
OS=$(lsb_release -si)
VER=$(lsb_release -sr)
elif [ -f /etc/lsb-release ]; then
# For some versions of Debian/Ubuntu without lsb_release command
. /etc/lsb-release
OS=$DISTRIB_ID
VER=$DISTRIB_RELEASE
elif [ -f /etc/debian_version ]; then
# Older Debian/Ubuntu/etc.
OS=Debian
VER=$(cat /etc/debian_version)
elif [ -f /etc/SuSe-release ]; then
# Older SuSE/etc.
:
elif [ -f /etc/redhat-release ]; then
# Older Red Hat, CentOS, etc.
VER=$( cat /etc/redhat-release | cut -d" " -f3 | cut -d "." -f1)
d=$( cat /etc/redhat-release | cut -d" " -f1 | cut -d "." -f1)
if [[ $d == "CentOS" ]]; then
OS="CentOS Linux"
fi
else
# Fall back to uname, e.g. "Linux <version>", also works for BSD, etc.
OS=$(uname -s)
VER=$(uname -r)
fi
}
function loadPasswords {
SHADOW=$(cat /etc/shadow)
}
function checkAgent {
# Check for the presence of the do-agent in the filesystem
if [ -d /var/opt/digitalocean/do-agent ];then
echo -en "\e[41m[FAIL]\e[0m DigitalOcean Monitoring Agent detected.\n"
((FAIL++))
STATUS=2
if [[ $OS == "CentOS Linux" ]] || [[ $OS == "CentOS Stream" ]] || [[ $OS == "Rocky Linux" ]]; then
echo "The agent can be removed with 'sudo yum remove do-agent' "
elif [[ $OS == "Ubuntu" ]]; then
echo "The agent can be removed with 'sudo apt-get purge do-agent' "
fi
else
echo -en "\e[32m[PASS]\e[0m DigitalOcean Monitoring agent was not found\n"
((PASS++))
fi
}
function checkLogs {
cp_ignore="/var/log/cpanel-install.log"
echo -en "\nChecking for log files in /var/log\n\n"
# Check if there are log archives or log files that have not been recently cleared.
for f in /var/log/*-????????; do
[[ -e $f ]] || break
if [ $f != $cp_ignore ]; then
echo -en "\e[93m[WARN]\e[0m Log archive ${f} found\n"
((WARN++))
if [[ $STATUS != 2 ]]; then
STATUS=1
fi
fi
done
for f in /var/log/*.[0-9];do
[[ -e $f ]] || break
echo -en "\e[93m[WARN]\e[0m Log archive ${f} found\n"
((WARN++))
if [[ $STATUS != 2 ]]; then
STATUS=1
fi
done
for f in /var/log/*.log; do
[[ -e $f ]] || break
if [[ "${f}" = '/var/log/lfd.log' && "$( cat "${f}" | egrep -v '/var/log/messages has been reset| Watching /var/log/messages' | wc -c)" -gt 50 ]]; then
if [ $f != $cp_ignore ]; then
echo -en "\e[93m[WARN]\e[0m un-cleared log file, ${f} found\n"
((WARN++))
if [[ $STATUS != 2 ]]; then
STATUS=1
fi
fi
elif [[ "${f}" != '/var/log/lfd.log' && "$( cat "${f}" | wc -c)" -gt 50 ]]; then
if [ $f != $cp_ignore ]; then
echo -en "\e[93m[WARN]\e[0m un-cleared log file, ${f} found\n"
((WARN++))
if [[ $STATUS != 2 ]]; then
STATUS=1
fi
fi
fi
done
}
function checkTMP {
# Check the /tmp directory to ensure it is empty. Warn on any files found.
return 1
}
function checkRoot {
user="root"
uhome="/root"
for usr in $SHADOW
do
IFS=':' read -r -a u <<< "$usr"
if [[ "${u[0]}" == "${user}" ]]; then
if [[ ${u[1]} == "!" ]] || [[ ${u[1]} == "!!" ]] || [[ ${u[1]} == "*" ]]; then
echo -en "\e[32m[PASS]\e[0m User ${user} has no password set.\n"
((PASS++))
else
echo -en "\e[41m[FAIL]\e[0m User ${user} has a password set on their account.\n"
((FAIL++))
STATUS=2
fi
fi
done
if [ -d ${uhome}/ ]; then
if [ -d ${uhome}/.ssh/ ]; then
if ls ${uhome}/.ssh/*> /dev/null 2>&1; then
for key in ${uhome}/.ssh/*
do
if [ "${key}" == "${uhome}/.ssh/authorized_keys" ]; then
if [ "$( cat "${key}" | wc -c)" -gt 50 ]; then
echo -en "\e[41m[FAIL]\e[0m User \e[1m${user}\e[0m has a populated authorized_keys file in \e[93m${key}\e[0m\n"
akey=$(cat ${key})
echo "File Contents:"
echo $akey
echo "--------------"
((FAIL++))
STATUS=2
fi
elif [ "${key}" == "${uhome}/.ssh/id_rsa" ]; then
if [ "$( cat "${key}" | wc -c)" -gt 0 ]; then
echo -en "\e[41m[FAIL]\e[0m User \e[1m${user}\e[0m has a private key file in \e[93m${key}\e[0m\n"
akey=$(cat ${key})
echo "File Contents:"
echo $akey
echo "--------------"
((FAIL++))
STATUS=2
else
echo -en "\e[93m[WARN]\e[0m User \e[1m${user}\e[0m has empty private key file in \e[93m${key}\e[0m\n"
((WARN++))
if [[ $STATUS != 2 ]]; then
STATUS=1
fi
fi
elif [ "${key}" != "${uhome}/.ssh/known_hosts" ]; then
echo -en "\e[93m[WARN]\e[0m User \e[1m${user}\e[0m has a file in their .ssh directory at \e[93m${key}\e[0m\n"
((WARN++))
if [[ $STATUS != 2 ]]; then
STATUS=1
fi
else
if [ "$( cat "${key}" | wc -c)" -gt 50 ]; then
echo -en "\e[93m[WARN]\e[0m User \e[1m${user}\e[0m has a populated known_hosts file in \e[93m${key}\e[0m\n"
((WARN++))
if [[ $STATUS != 2 ]]; then
STATUS=1
fi
fi
fi
done
else
echo -en "\e[32m[ OK ]\e[0m User \e[1m${user}\e[0m has no SSH keys present\n"
fi
else
echo -en "\e[32m[ OK ]\e[0m User \e[1m${user}\e[0m does not have an .ssh directory\n"
fi
if [ -f /root/.bash_history ];then
BH_S=$( cat /root/.bash_history | wc -c)
if [[ $BH_S -lt 200 ]]; then
echo -en "\e[32m[PASS]\e[0m ${user}'s Bash History appears to have been cleared\n"
((PASS++))
else
echo -en "\e[41m[FAIL]\e[0m ${user}'s Bash History should be cleared to prevent sensitive information from leaking\n"
((FAIL++))
STATUS=2
fi
return 1;
else
echo -en "\e[32m[PASS]\e[0m The Root User's Bash History is not present\n"
((PASS++))
fi
else
echo -en "\e[32m[ OK ]\e[0m User \e[1m${user}\e[0m does not have a directory in /home\n"
fi
echo -en "\n\n"
return 1
}
function checkUsers {
# Check each user-created account
for user in $(awk -F: '$3 >= 1000 && $1 != "nobody" {print $1}' /etc/passwd;)
do
# Skip some other non-user system accounts
if [[ $user == "centos" ]]; then
:
elif [[ $user == "nfsnobody" ]]; then
:
else
echo -en "\nChecking user: ${user}...\n"
for usr in $SHADOW
do
IFS=':' read -r -a u <<< "$usr"
if [[ "${u[0]}" == "${user}" ]]; then
if [[ ${u[1]} == "!" ]] || [[ ${u[1]} == "!!" ]] || [[ ${u[1]} == "*" ]]; then
echo -en "\e[32m[PASS]\e[0m User ${user} has no password set.\n"
((PASS++))
else
echo -en "\e[41m[FAIL]\e[0m User ${user} has a password set on their account. Only system users are allowed on the image.\n"
((FAIL++))
STATUS=2
fi
fi
done
#echo "User Found: ${user}"
uhome="/home/${user}"
if [ -d "${uhome}/" ]; then
if [ -d "${uhome}/.ssh/" ]; then
if ls "${uhome}/.ssh/*"> /dev/null 2>&1; then
for key in ${uhome}/.ssh/*
do
if [ "${key}" == "${uhome}/.ssh/authorized_keys" ]; then
if [ "$( cat "${key}" | wc -c)" -gt 50 ]; then
echo -en "\e[41m[FAIL]\e[0m User \e[1m${user}\e[0m has a populated authorized_keys file in \e[93m${key}\e[0m\n"
akey=$(cat ${key})
echo "File Contents:"
echo $akey
echo "--------------"
((FAIL++))
STATUS=2
fi
elif [ "${key}" == "${uhome}/.ssh/id_rsa" ]; then
if [ "$( cat "${key}" | wc -c)" -gt 0 ]; then
echo -en "\e[41m[FAIL]\e[0m User \e[1m${user}\e[0m has a private key file in \e[93m${key}\e[0m\n"
akey=$(cat ${key})
echo "File Contents:"
echo $akey
echo "--------------"
((FAIL++))
STATUS=2
else
echo -en "\e[93m[WARN]\e[0m User \e[1m${user}\e[0m has empty private key file in \e[93m${key}\e[0m\n"
((WARN++))
if [[ $STATUS != 2 ]]; then
STATUS=1
fi
fi
elif [ "${key}" != "${uhome}/.ssh/known_hosts" ]; then
echo -en "\e[93m[WARN]\e[0m User \e[1m${user}\e[0m has a file in their .ssh directory named \e[93m${key}\e[0m\n"
((WARN++))
if [[ $STATUS != 2 ]]; then
STATUS=1
fi
else
if [ "$( cat "${key}" | wc -c)" -gt 50 ]; then
echo -en "\e[93m[WARN]\e[0m User \e[1m${user}\e[0m has a known_hosts file in \e[93m${key}\e[0m\n"
((WARN++))
if [[ $STATUS != 2 ]]; then
STATUS=1
fi
fi
fi
done
else
echo -en "\e[32m[ OK ]\e[0m User \e[1m${user}\e[0m has no SSH keys present\n"
fi
else
echo -en "\e[32m[ OK ]\e[0m User \e[1m${user}\e[0m does not have an .ssh directory\n"
fi
else
echo -en "\e[32m[ OK ]\e[0m User \e[1m${user}\e[0m does not have a directory in /home\n"
fi
# Check for an uncleared .bash_history for this user
if [ -f "${uhome}/.bash_history" ]; then
BH_S=$( cat "${uhome}/.bash_history" | wc -c )
if [[ $BH_S -lt 200 ]]; then
echo -en "\e[32m[PASS]\e[0m ${user}'s Bash History appears to have been cleared\n"
((PASS++))
else
echo -en "\e[41m[FAIL]\e[0m ${user}'s Bash History should be cleared to prevent sensitive information from leaking\n"
((FAIL++))
STATUS=2
fi
echo -en "\n\n"
fi
fi
done
}
function checkFirewall {
if [[ $OS == "Ubuntu" ]]; then
fw="ufw"
ufwa=$(ufw status |head -1| sed -e "s/^Status:\ //")
if [[ $ufwa == "active" ]]; then
FW_VER="\e[32m[PASS]\e[0m Firewall service (${fw}) is active\n"
((PASS++))
else
FW_VER="\e[93m[WARN]\e[0m No firewall is configured. Ensure ${fw} is installed and configured\n"
((WARN++))
fi
elif [[ $OS == "CentOS Linux" ]] || [[ $OS == "CentOS Stream" ]] || [[ $OS == "Rocky Linux" ]]; then
if [ -f /usr/lib/systemd/system/csf.service ]; then
fw="csf"
if [[ $(systemctl status $fw >/dev/null 2>&1) ]]; then
FW_VER="\e[32m[PASS]\e[0m Firewall service (${fw}) is active\n"
((PASS++))
elif cmdExists "firewall-cmd"; then
if [[ $(systemctl is-active firewalld >/dev/null 2>&1 && echo 1 || echo 0) ]]; then
FW_VER="\e[32m[PASS]\e[0m Firewall service (${fw}) is active\n"
((PASS++))
else
FW_VER="\e[93m[WARN]\e[0m No firewall is configured. Ensure ${fw} is installed and configured\n"
((WARN++))
fi
else
FW_VER="\e[93m[WARN]\e[0m No firewall is configured. Ensure ${fw} is installed and configured\n"
((WARN++))
fi
else
fw="firewalld"
if [[ $(systemctl is-active firewalld >/dev/null 2>&1 && echo 1 || echo 0) ]]; then
FW_VER="\e[32m[PASS]\e[0m Firewall service (${fw}) is active\n"
((PASS++))
else
FW_VER="\e[93m[WARN]\e[0m No firewall is configured. Ensure ${fw} is installed and configured\n"
((WARN++))
fi
fi
elif [[ "$OS" =~ Debian.* ]]; then
# user could be using a number of different services for managing their firewall
# we will check some of the most common
if cmdExists 'ufw'; then
fw="ufw"
ufwa=$(ufw status |head -1| sed -e "s/^Status:\ //")
if [[ $ufwa == "active" ]]; then
FW_VER="\e[32m[PASS]\e[0m Firewall service (${fw}) is active\n"
((PASS++))
else
FW_VER="\e[93m[WARN]\e[0m No firewall is configured. Ensure ${fw} is installed and configured\n"
((WARN++))
fi
elif cmdExists "firewall-cmd"; then
fw="firewalld"
if [[ $(systemctl is-active --quiet $fw) ]]; then
FW_VER="\e[32m[PASS]\e[0m Firewall service (${fw}) is active\n"
((PASS++))
else
FW_VER="\e[93m[WARN]\e[0m No firewall is configured. Ensure ${fw} is installed and configured\n"
((WARN++))
fi
else
# user could be using vanilla iptables, check if kernel module is loaded
fw="iptables"
if [[ $(lsmod | grep -q '^ip_tables' 2>/dev/null) ]]; then
FW_VER="\e[32m[PASS]\e[0m Firewall service (${fw}) is active\n"
((PASS++))
else
FW_VER="\e[93m[WARN]\e[0m No firewall is configured. Ensure ${fw} is installed and configured\n"
((WARN++))
fi
fi
fi
}
function checkUpdates {
if [[ $OS == "Ubuntu" ]] || [[ "$OS" =~ Debian.* ]]; then
# Ensure /tmp exists and has the proper permissions before
# checking for security updates
# https://github.com/digitalocean/marketplace-partners/issues/94
if [[ ! -d /tmp ]]; then
mkdir /tmp
fi
chmod 1777 /tmp
echo -en "\nUpdating apt package database to check for security updates, this may take a minute...\n\n"
apt-get -y update > /dev/null
uc=$(apt-get --just-print upgrade | grep -i "security" | wc -l)
if [[ $uc -gt 0 ]]; then
update_count=$(( ${uc} / 2 ))
else
update_count=0
fi
if [[ $update_count -gt 0 ]]; then
echo -en "\e[41m[FAIL]\e[0m There are ${update_count} security updates available for this image that have not been installed.\n"
echo -en
echo -en "Here is a list of the security updates that are not installed:\n"
sleep 2
apt-get --just-print upgrade | grep -i security | awk '{print $2}' | awk '!seen[$0]++'
echo -en
((FAIL++))
STATUS=2
else
echo -en "\e[32m[PASS]\e[0m There are no pending security updates for this image.\n\n"
fi
elif [[ $OS == "CentOS Linux" ]] || [[ $OS == "CentOS Stream" ]] || [[ $OS == "Rocky Linux" ]]; then
echo -en "\nChecking for available security updates, this may take a minute...\n\n"
update_count=$(yum check-update --security --quiet | wc -l)
if [[ $update_count -gt 0 ]]; then
echo -en "\e[41m[FAIL]\e[0m There are ${update_count} security updates available for this image that have not been installed.\n"
((FAIL++))
STATUS=2
else
echo -en "\e[32m[PASS]\e[0m There are no pending security updates for this image.\n"
((PASS++))
fi
else
echo "Error encountered"
exit 1
fi
return 1;
}
function checkCloudInit {
if hash cloud-init 2>/dev/null; then
CI="\e[32m[PASS]\e[0m Cloud-init is installed.\n"
((PASS++))
else
CI="\e[41m[FAIL]\e[0m No valid verison of cloud-init was found.\n"
((FAIL++))
STATUS=2
fi
return 1
}
function version_gt() { test "$(printf '%s\n' "$@" | sort -V | head -n 1)" != "$1"; }
clear
echo "DigitalOcean Marketplace Image Validation Tool ${VERSION}"
echo "Executed on: ${RUNDATE}"
echo "Checking local system for Marketplace compatibility..."
getDistro
echo -en "\n\e[1mDistribution:\e[0m ${OS}\n"
echo -en "\e[1mVersion:\e[0m ${VER}\n\n"
ost=0
osv=0
if [[ $OS == "Ubuntu" ]]; then
ost=1
if [[ $VER == "20.04" ]]; then
osv=1
elif [[ $VER == "18.04" ]]; then
osv=1
elif [[ $VER == "16.04" ]]; then
osv=1
else
osv=0
fi
elif [[ "$OS" =~ Debian.* ]]; then
ost=1
case "$VER" in
9)
osv=1
;;
10)
osv=1
;;
*)
osv=2
;;
esac
elif [[ $OS == "CentOS Linux" ]]; then
ost=1
if [[ $VER == "8" ]]; then
osv=1
elif [[ $VER == "7" ]]; then
osv=1
elif [[ $VER == "6" ]]; then
osv=1
else
osv=2
fi
elif [[ $OS == "CentOS Stream" ]]; then
ost=1
if [[ $VER == "8" ]]; then
osv=1
else
osv=2
fi
elif [[ $OS == "Rocky Linux" ]]; then
ost=1
if [[ $VER =~ "8." ]]; then
osv=1
else
osv=2
fi
else
ost=0
fi
if [[ $ost == 1 ]]; then
echo -en "\e[32m[PASS]\e[0m Supported Operating System Detected: ${OS}\n"
((PASS++))
else
echo -en "\e[41m[FAIL]\e[0m ${OS} is not a supported Operating System\n"
((FAIL++))
STATUS=2
fi
if [[ $osv == 1 ]]; then
echo -en "\e[32m[PASS]\e[0m Supported Release Detected: ${VER}\n"
((PASS++))
elif [[ $ost == 1 ]]; then
echo -en "\e[41m[FAIL]\e[0m ${OS} ${VER} is not a supported Operating System Version\n"
((FAIL++))
STATUS=2
else
echo "Exiting..."
exit 1
fi
checkCloudInit
echo -en "${CI}"
checkFirewall
echo -en "${FW_VER}"
checkUpdates
loadPasswords
checkLogs
echo -en "\n\nChecking all user-created accounts...\n"
checkUsers
echo -en "\n\nChecking the root account...\n"
checkRoot
checkAgent
# Summary
echo -en "\n\n---------------------------------------------------------------------------------------------------\n"
if [[ $STATUS == 0 ]]; then
echo -en "Scan Complete.\n\e[32mAll Tests Passed!\e[0m\n"
elif [[ $STATUS == 1 ]]; then
echo -en "Scan Complete. \n\e[93mSome non-critical tests failed. Please review these items.\e[0m\e[0m\n"
else
echo -en "Scan Complete. \n\e[41mOne or more tests failed. Please review these items and re-test.\e[0m\n"
fi
echo "---------------------------------------------------------------------------------------------------"
echo -en "\e[1m${PASS} Tests PASSED\e[0m\n"
echo -en "\e[1m${WARN} WARNINGS\e[0m\n"
echo -en "\e[1m${FAIL} Tests FAILED\e[0m\n"
echo -en "---------------------------------------------------------------------------------------------------\n"
if [[ $STATUS == 0 ]]; then
echo -en "We did not detect any issues with this image. Please be sure to manually ensure that all software installed on the base system is functional, secure and properly configured (or facilities for configuration on first-boot have been created).\n\n"
exit 0
elif [[ $STATUS == 1 ]]; then
echo -en "Please review all [WARN] items above and ensure they are intended or resolved. If you do not have a specific requirement, we recommend resolving these items before image submission\n\n"
exit 0
else
echo -en "Some critical tests failed. These items must be resolved and this scan re-run before you submit your image to the DigitalOcean Marketplace.\n\n"
exit 1
fi

View File

@ -0,0 +1,65 @@
{
"variables": {
"token": "{{env `DIGITALOCEAN_TOKEN`}}",
"image_name": "budibase-marketplace-snapshot-{{timestamp}}",
"apt_packages": "jq"
},
"builders": [
{
"type": "digitalocean",
"api_token": "{{user `token`}}",
"image": "docker-20-04",
"region": "lon1",
"size": "s-1vcpu-1gb",
"ssh_username": "root",
"snapshot_name": "{{user `image_name`}}"
}
],
"provisioners": [
{
"type": "shell",
"inline": [
"cloud-init status --wait"
]
},
{
"type": "file",
"source": "files/etc/",
"destination": "/etc/"
},
{
"type": "file",
"source": "files/var/",
"destination": "/var/"
},
{
"type": "shell",
"environment_vars": [
"DEBIAN_FRONTEND=noninteractive",
"LC_ALL=C",
"LANG=en_US.UTF-8",
"LC_CTYPE=en_US.UTF-8"
],
"inline": [
"apt -qqy update",
"apt -qqy -o Dpkg::Options::='--force-confdef' -o Dpkg::Options::='--force-confold' full-upgrade",
"apt -qqy -o Dpkg::Options::='--force-confdef' -o Dpkg::Options::='--force-confold' install {{user `apt_packages`}}"
]
},
{
"type": "shell",
"environment_vars": [
"application_name={{user `application_name`}}",
"application_version={{user `application_version`}}",
"DEBIAN_FRONTEND=noninteractive",
"LC_ALL=C",
"LANG=en_US.UTF-8",
"LC_CTYPE=en_US.UTF-8"
],
"scripts": [
"scripts/90-cleanup.sh",
"scripts/99-img_check.sh"
]
}
]
}

View File

@ -1,35 +1,18 @@
apiVersion: v2 apiVersion: v2
name: budibase name: budibase
description: Budibase is an open source low-code platform, helping thousands of teams build apps for their workplace in minutes. description: >-
Budibase is an open source low-code platform, helping thousands of teams build
apps for their workplace in minutes.
keywords: keywords:
- low-code - low-code
- database - database
- cluster - cluster
sources: sources:
- https://github.com/Budibase/budibase - https://github.com/Budibase/budibase
- https://budibase.com - https://budibase.com
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application type: application
version: 0.2.4
# This is the chart version. This version number should be incremented each time you make changes appVersion: 1.0.6
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.2.2
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "0.9.169"
dependencies: dependencies:
- name: couchdb - name: couchdb
version: 3.3.4 version: 3.3.4

Binary file not shown.

View File

@ -1,19 +0,0 @@
apiVersion: v1
appVersion: 3.1.0
description: A database featuring seamless multi-master sync, that scales from big
data to mobile, with an intuitive HTTP/JSON API and designed for reliability.
home: https://couchdb.apache.org/
icon: http://couchdb.apache.org/CouchDB-visual-identity/logo/CouchDB-couch-symbol.svg
keywords:
- couchdb
- database
- nosql
maintainers:
- email: kocolosk@apache.org
name: kocolosk
- email: willholley@apache.org
name: willholley
name: couchdb
sources:
- https://github.com/apache/couchdb-docker
version: 3.3.4

View File

@ -1,244 +0,0 @@
# CouchDB
Apache CouchDB is a database featuring seamless multi-master sync, that scales
from big data to mobile, with an intuitive HTTP/JSON API and designed for
reliability.
This chart deploys a CouchDB cluster as a StatefulSet. It creates a ClusterIP
Service in front of the Deployment for load balancing by default, but can also
be configured to deploy other Service types or an Ingress Controller. The
default persistence mechanism is simply the ephemeral local filesystem, but
production deployments should set `persistentVolume.enabled` to `true` to attach
storage volumes to each Pod in the Deployment.
## TL;DR
```bash
$ helm repo add couchdb https://apache.github.io/couchdb-helm
$ helm install couchdb/couchdb \
--set allowAdminParty=true \
--set couchdbConfig.couchdb.uuid=$(curl https://www.uuidgenerator.net/api/version4 2>/dev/null | tr -d -)
```
## Prerequisites
- Kubernetes 1.9+ with Beta APIs enabled
- Ingress requires Kubernetes 1.14+
## Installing the Chart
To install the chart with the release name `my-release`:
Add the CouchDB Helm repository:
```bash
$ helm repo add couchdb https://apache.github.io/couchdb-helm
```
Afterwards install the chart replacing the UUID
`decafbaddecafbaddecafbaddecafbad` with a custom one:
```bash
$ helm install \
--name my-release \
--set couchdbConfig.couchdb.uuid=decafbaddecafbaddecafbaddecafbad \
couchdb/couchdb
```
This will create a Secret containing the admin credentials for the cluster.
Those credentials can be retrieved as follows:
```bash
$ kubectl get secret my-release-couchdb -o go-template='{{ .data.adminPassword }}' | base64 --decode
```
If you prefer to configure the admin credentials directly you can create a
Secret containing `adminUsername`, `adminPassword` and `cookieAuthSecret` keys:
```bash
$ kubectl create secret generic my-release-couchdb --from-literal=adminUsername=foo --from-literal=adminPassword=bar --from-literal=cookieAuthSecret=baz
```
If you want to set the `adminHash` directly to achieve consistent salts between
different nodes you need to addionally add the key `password.ini` to the secret:
```bash
$ kubectl create secret generic my-release-couchdb \
--from-literal=adminUsername=foo \
--from-literal=cookieAuthSecret=baz \
--from-file=./my-password.ini
```
With the following contents in `my-password.ini`:
```
[admins]
foo = <pbkdf2-hash>
```
and then install the chart while overriding the `createAdminSecret` setting:
```bash
$ helm install \
--name my-release \
--set createAdminSecret=false \
--set couchdbConfig.couchdb.uuid=decafbaddecafbaddecafbaddecafbad \
couchdb/couchdb
```
This Helm chart deploys CouchDB on the Kubernetes cluster in a default
configuration. The [configuration](#configuration) section lists
the parameters that can be configured during installation.
> **Tip**: List all releases using `helm list`
## Uninstalling the Chart
To uninstall/delete the `my-release` Deployment:
```bash
$ helm delete my-release
```
The command removes all the Kubernetes components associated with the chart and
deletes the release.
## Upgrading an existing Release to a new major version
A major chart version change (like v0.2.3 -> v1.0.0) indicates that there is an
incompatible breaking change needing manual actions.
### Upgrade to 3.0.0
Since version 3.0.0 setting the CouchDB server instance UUID is mandatory.
Therefore you need to generate a UUID and supply it as a value during the
upgrade as follows:
```bash
$ helm upgrade <release-name> \
--reuse-values \
--set couchdbConfig.couchdb.uuid=<UUID> \
couchdb/couchdb
```
## Migrating from stable/couchdb
This chart replaces the `stable/couchdb` chart previously hosted by Helm and continues the
version semantics. You can upgrade directly from `stable/couchdb` to this chart using:
```bash
$ helm repo add couchdb https://apache.github.io/couchdb-helm
$ helm upgrade my-release couchdb/couchdb
```
## Configuration
The following table lists the most commonly configured parameters of the
CouchDB chart and their default values:
| Parameter | Description | Default |
|---------------------------------|-------------------------------------------------------|----------------------------------------|
| `clusterSize` | The initial number of nodes in the CouchDB cluster | 3 |
| `couchdbConfig` | Map allowing override elements of server .ini config | *See below* |
| `allowAdminParty` | If enabled, start cluster without admin account | false (requires creating a Secret) |
| `createAdminSecret` | If enabled, create an admin account and cookie secret | true |
| `schedulerName` | Name of the k8s scheduler (other than default) | `nil` |
| `erlangFlags` | Map of flags supplied to the underlying Erlang VM | name: couchdb, setcookie: monster
| `persistentVolume.enabled` | Boolean determining whether to attach a PV to each node | false
| `persistentVolume.size` | If enabled, the size of the persistent volume to attach | 10Gi
| `enableSearch` | Adds a sidecar for Lucene-powered text search | false |
You can set the values of the `couchdbConfig` map according to the
[official configuration][4]. The following shows the map's default values and
required options to set:
| Parameter | Description | Default |
|---------------------------------|--------------------------------------------------------------------|----------------------------------------|
| `couchdb.uuid` | UUID for this CouchDB server instance ([Required in a cluster][5]) | |
| `chttpd.bind_address` | listens on all interfaces when set to any | any |
| `chttpd.require_valid_user` | disables all the anonymous requests to the port 5984 when true | false |
A variety of other parameters are also configurable. See the comments in the
`values.yaml` file for further details:
| Parameter | Default |
|--------------------------------------|----------------------------------------|
| `adminUsername` | admin |
| `adminPassword` | auto-generated |
| `adminHash` | |
| `cookieAuthSecret` | auto-generated |
| `image.repository` | couchdb |
| `image.tag` | 3.1.0 |
| `image.pullPolicy` | IfNotPresent |
| `searchImage.repository` | kocolosk/couchdb-search |
| `searchImage.tag` | 0.1.0 |
| `searchImage.pullPolicy` | IfNotPresent |
| `initImage.repository` | busybox |
| `initImage.tag` | latest |
| `initImage.pullPolicy` | Always |
| `ingress.enabled` | false |
| `ingress.hosts` | chart-example.local |
| `ingress.annotations` | |
| `ingress.path` | / |
| `ingress.tls` | |
| `persistentVolume.accessModes` | ReadWriteOnce |
| `persistentVolume.storageClass` | Default for the Kube cluster |
| `podManagementPolicy` | Parallel |
| `affinity` | |
| `annotations` | |
| `tolerations` | |
| `resources` | |
| `service.annotations` | |
| `service.enabled` | true |
| `service.type` | ClusterIP |
| `service.externalPort` | 5984 |
| `dns.clusterDomainSuffix` | cluster.local |
| `networkPolicy.enabled` | true |
| `serviceAccount.enabled` | true |
| `serviceAccount.create` | true |
| `serviceAccount.imagePullSecrets` | |
| `sidecars` | {} |
| `livenessProbe.enabled` | true |
| `livenessProbe.failureThreshold` | 3 |
| `livenessProbe.initialDelaySeconds` | 0 |
| `livenessProbe.periodSeconds` | 10 |
| `livenessProbe.successThreshold` | 1 |
| `livenessProbe.timeoutSeconds` | 1 |
| `readinessProbe.enabled` | true |
| `readinessProbe.failureThreshold` | 3 |
| `readinessProbe.initialDelaySeconds` | 0 |
| `readinessProbe.periodSeconds` | 10 |
| `readinessProbe.successThreshold` | 1 |
| `readinessProbe.timeoutSeconds` | 1 |
## Feedback, Issues, Contributing
General feedback is welcome at our [user][1] or [developer][2] mailing lists.
Apache CouchDB has a [CONTRIBUTING][3] file with details on how to get started
with issue reporting or contributing to the upkeep of this project. In short,
use GitHub Issues, do not report anything on Docker's website.
## Non-Apache CouchDB Development Team Contributors
- [@natarajaya](https://github.com/natarajaya)
- [@satchpx](https://github.com/satchpx)
- [@spanato](https://github.com/spanato)
- [@jpds](https://github.com/jpds)
- [@sebastien-prudhomme](https://github.com/sebastien-prudhomme)
- [@stepanstipl](https://github.com/sebastien-stepanstipl)
- [@amatas](https://github.com/amatas)
- [@Chimney42](https://github.com/Chimney42)
- [@mattjmcnaughton](https://github.com/mattjmcnaughton)
- [@mainephd](https://github.com/mainephd)
- [@AdamDang](https://github.com/AdamDang)
- [@mrtyler](https://github.com/mrtyler)
- [@kevinwlau](https://github.com/kevinwlau)
- [@jeyenzo](https://github.com/jeyenzo)
- [@Pinpin31.](https://github.com/Pinpin31)
[1]: http://mail-archives.apache.org/mod_mbox/couchdb-user/
[2]: http://mail-archives.apache.org/mod_mbox/couchdb-dev/
[3]: https://github.com/apache/couchdb/blob/master/CONTRIBUTING.md
[4]: https://docs.couchdb.org/en/stable/config/index.html
[5]: https://docs.couchdb.org/en/latest/setup/cluster.html#preparing-couchdb-nodes-to-be-joined-into-a-cluster

View File

@ -1,3 +0,0 @@
couchdbConfig:
couchdb:
uuid: "decafbaddecafbaddecafbaddecafbad"

View File

@ -1,9 +0,0 @@
sidecars:
- name: foo
image: "busybox"
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: "0.1"
memory: 10Mi
command: ['while true; do echo "foo"; sleep 5; done;']

View File

@ -1,2 +0,0 @@
[admins]
{{ .Values.adminUsername }} = {{ .Values.adminHash }}

View File

@ -1,20 +0,0 @@
Apache CouchDB is starting. Check the status of the Pods using:
kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "couchdb.name" . }},release={{ .Release.Name }}"
Once all of the Pods are fully Ready, execute the following command to create
some required system databases:
kubectl exec --namespace {{ .Release.Namespace }} {{ if not .Values.allowAdminParty }}-it {{ end }}{{ template "couchdb.fullname" . }}-0 -c couchdb -- \
curl -s \
http://127.0.0.1:5984/_cluster_setup \
-X POST \
-H "Content-Type: application/json" \
{{- if .Values.allowAdminParty }}
-d '{"action": "finish_cluster"}'
{{- else }}
-d '{"action": "finish_cluster"}' \
-u <adminUsername>
{{- end }}
Then it's time to relax.

View File

@ -1,81 +0,0 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "couchdb.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "couchdb.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- printf "%s-%s" .Values.fullnameOverride .Chart.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{/*
In the event that we create both a headless service and a traditional one,
ensure that the latter gets a unique name.
*/}}
{{- define "couchdb.svcname" -}}
{{- if .Values.fullnameOverride -}}
{{- printf "%s-svc-%s" .Values.fullnameOverride .Chart.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- printf "%s-svc-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{/*
Create a random string if the supplied key does not exist
*/}}
{{- define "couchdb.defaultsecret" -}}
{{- if . -}}
{{- . | b64enc | quote -}}
{{- else -}}
{{- randAlphaNum 20 | b64enc | quote -}}
{{- end -}}
{{- end -}}
{{/*
Labels used to define Pods in the CouchDB statefulset
*/}}
{{- define "couchdb.ss.selector" -}}
app: {{ template "couchdb.name" . }}
release: {{ .Release.Name }}
{{- end -}}
{{/*
Generates a comma delimited list of nodes in the cluster
*/}}
{{- define "couchdb.seedlist" -}}
{{- $nodeCount := min 5 .Values.clusterSize | int }}
{{- range $index0 := until $nodeCount -}}
{{- $index1 := $index0 | add1 -}}
{{ $.Values.erlangFlags.name }}@{{ template "couchdb.fullname" $ }}-{{ $index0 }}.{{ template "couchdb.fullname" $ }}.{{ $.Release.Namespace }}.svc.{{ $.Values.dns.clusterDomainSuffix }}{{ if ne $index1 $nodeCount }},{{ end }}
{{- end -}}
{{- end -}}
{{/*
If serviceAccount.name is specified, use that, else use the couchdb instance name
*/}}
{{- define "couchdb.serviceAccount" -}}
{{- if .Values.serviceAccount.name -}}
{{- .Values.serviceAccount.name }}
{{- else -}}
{{- template "couchdb.fullname" . -}}
{{- end -}}
{{- end -}}
{{/*
Fail if couchdbConfig.couchdb.uuid is undefined
*/}}
{{- define "couchdb.uuid" -}}
{{- required "A value for couchdbConfig.couchdb.uuid must be set" (.Values.couchdbConfig.couchdb | default dict).uuid -}}
{{- end -}}

View File

@ -1,23 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "couchdb.fullname" . }}
labels:
app: {{ template "couchdb.name" . }}
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
heritage: {{ .Release.Service | quote }}
release: {{ .Release.Name | quote }}
data:
inifile: |
{{ $couchdbConfig := dict "couchdb" (dict "uuid" (include "couchdb.uuid" .)) -}}
{{- $couchdbConfig := merge $couchdbConfig .Values.couchdbConfig -}}
{{- range $section, $settings := $couchdbConfig -}}
{{ printf "[%s]" $section }}
{{ range $key, $value := $settings -}}
{{ printf "%s = %s" $key ($value | toString) }}
{{ end }}
{{ end }}
seedlistinifile: |
[cluster]
seedlist = {{ template "couchdb.seedlist" . }}

View File

@ -1,17 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: {{ template "couchdb.fullname" . }}
labels:
app: {{ template "couchdb.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
clusterIP: None
publishNotReadyAddresses: true
ports:
- name: couchdb
port: 5984
selector:
{{ include "couchdb.ss.selector" . | indent 4 }}

View File

@ -1,33 +0,0 @@
{{- if .Values.ingress.enabled -}}
{{- $serviceName := include "couchdb.fullname" . -}}
{{- $servicePort := .Values.service.externalPort -}}
{{- $path := .Values.ingress.path | quote -}}
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: {{ template "couchdb.fullname" . }}
labels:
app: {{ template "couchdb.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
annotations:
{{- range $key, $value := .Values.ingress.annotations }}
{{ $key }}: {{ $value | quote }}
{{- end }}
spec:
rules:
{{- range $host := .Values.ingress.hosts }}
- host: {{ $host }}
http:
paths:
- path: {{ $path }}
backend:
serviceName: {{ $serviceName }}
servicePort: {{ $servicePort }}
{{- end -}}
{{- if .Values.ingress.tls }}
tls:
{{ toYaml .Values.ingress.tls | indent 4 }}
{{- end -}}
{{- end -}}

View File

@ -1,31 +0,0 @@
{{- if .Values.networkPolicy.enabled }}
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
name: {{ template "couchdb.fullname" . }}
labels:
app: {{ template "couchdb.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
podSelector:
matchLabels:
{{ include "couchdb.ss.selector" . | indent 6 }}
ingress:
- ports:
- protocol: TCP
port: 5984
- ports:
- protocol: TCP
port: 9100
- protocol: TCP
port: 4369
from:
- podSelector:
matchLabels:
{{ include "couchdb.ss.selector" . | indent 14 }}
policyTypes:
- Ingress
{{- end }}

View File

@ -1,19 +0,0 @@
{{- if .Values.createAdminSecret -}}
apiVersion: v1
kind: Secret
metadata:
name: {{ template "couchdb.fullname" . }}
labels:
app: {{ template "couchdb.fullname" . }}
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
release: "{{ .Release.Name }}"
heritage: "{{ .Release.Service }}"
type: Opaque
data:
adminUsername: {{ template "couchdb.defaultsecret" .Values.adminUsername }}
adminPassword: {{ template "couchdb.defaultsecret" .Values.adminPassword }}
cookieAuthSecret: {{ template "couchdb.defaultsecret" .Values.cookieAuthSecret }}
{{- if .Values.adminHash }}
password.ini: {{ tpl (.Files.Get "password.ini") . | b64enc }}
{{- end -}}
{{- end -}}

View File

@ -1,23 +0,0 @@
{{- if .Values.service.enabled -}}
apiVersion: v1
kind: Service
metadata:
name: {{ template "couchdb.svcname" . }}
labels:
app: {{ template "couchdb.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
{{- if .Values.service.annotations }}
annotations:
{{ toYaml .Values.service.annotations | indent 4 }}
{{- end }}
spec:
ports:
- port: {{ .Values.service.externalPort }}
protocol: TCP
targetPort: 5984
type: {{ .Values.service.type }}
selector:
{{ include "couchdb.ss.selector" . | indent 4 }}
{{- end -}}

View File

@ -1,15 +0,0 @@
{{- if .Values.serviceAccount.create }}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ template "couchdb.serviceAccount" . }}
labels:
app: {{ template "couchdb.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
{{- if .Values.serviceAccount.imagePullSecrets }}
imagePullSecrets:
{{ toYaml .Values.serviceAccount.imagePullSecrets }}
{{- end }}
{{- end }}

View File

@ -1,202 +0,0 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: {{ template "couchdb.fullname" . }}
labels:
app: {{ template "couchdb.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
replicas: {{ .Values.clusterSize }}
serviceName: {{ template "couchdb.fullname" . }}
podManagementPolicy: {{ .Values.podManagementPolicy }}
selector:
matchLabels:
{{ include "couchdb.ss.selector" . | indent 6 }}
template:
metadata:
labels:
{{ include "couchdb.ss.selector" . | indent 8 }}
{{- with .Values.annotations }}
annotations:
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
{{ toYaml . | indent 8 }}
{{- end }}
spec:
{{- if .Values.schedulerName }}
schedulerName: "{{ .Values.schedulerName }}"
{{- end }}
{{- if .Values.serviceAccount.enabled }}
serviceAccountName: {{ template "couchdb.serviceAccount" . }}
{{- end }}
initContainers:
- name: init-copy
image: "{{ .Values.initImage.repository }}:{{ .Values.initImage.tag }}"
imagePullPolicy: {{ .Values.initImage.pullPolicy }}
command: ['sh','-c','cp /tmp/chart.ini /default.d; cp /tmp/seedlist.ini /default.d; ls -lrt /default.d;']
volumeMounts:
- name: config
mountPath: /tmp/
- name: config-storage
mountPath: /default.d
{{- if .Values.adminHash }}
- name: admin-hash-copy
image: "{{ .Values.initImage.repository }}:{{ .Values.initImage.tag }}"
imagePullPolicy: {{ .Values.initImage.pullPolicy }}
command: ['sh','-c','cp /tmp/password.ini /local.d/ ;']
volumeMounts:
- name: admin-password
mountPath: /tmp/password.ini
subPath: "password.ini"
- name: local-config-storage
mountPath: /local.d
{{- end }}
containers:
- name: couchdb
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- name: couchdb
containerPort: 5984
- name: epmd
containerPort: 4369
- containerPort: 9100
env:
{{- if not .Values.allowAdminParty }}
- name: COUCHDB_USER
valueFrom:
secretKeyRef:
name: {{ template "couchdb.fullname" . }}
key: adminUsername
- name: COUCHDB_PASSWORD
valueFrom:
secretKeyRef:
name: {{ template "couchdb.fullname" . }}
key: adminPassword
- name: COUCHDB_SECRET
valueFrom:
secretKeyRef:
name: {{ template "couchdb.fullname" . }}
key: cookieAuthSecret
{{- end }}
- name: ERL_FLAGS
value: "{{ range $k, $v := .Values.erlangFlags }} -{{ $k }} {{ $v }} {{ end }}"
{{- if .Values.livenessProbe.enabled }}
livenessProbe:
{{- if .Values.couchdbConfig.chttpd.require_valid_user }}
exec:
command:
- sh
- -c
- curl -G --silent --fail -u ${COUCHDB_USER}:${COUCHDB_PASSWORD} http://localhost:5984/_up
{{- else }}
httpGet:
path: /_up
port: 5984
{{- end }}
failureThreshold: {{ .Values.livenessProbe.failureThreshold }}
initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.livenessProbe.periodSeconds }}
successThreshold: {{ .Values.livenessProbe.successThreshold }}
timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }}
{{- end }}
{{- if .Values.readinessProbe.enabled }}
readinessProbe:
{{- if .Values.couchdbConfig.chttpd.require_valid_user }}
exec:
command:
- sh
- -c
- curl -G --silent --fail -u ${COUCHDB_USER}:${COUCHDB_PASSWORD} http://localhost:5984/_up
{{- else }}
httpGet:
path: /_up
port: 5984
{{- end }}
failureThreshold: {{ .Values.readinessProbe.failureThreshold }}
initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.readinessProbe.periodSeconds }}
successThreshold: {{ .Values.readinessProbe.successThreshold }}
timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }}
{{- end }}
resources:
{{ toYaml .Values.resources | indent 12 }}
volumeMounts:
- name: config-storage
mountPath: /opt/couchdb/etc/default.d
{{- if .Values.adminHash }}
- name: local-config-storage
mountPath: /opt/couchdb/etc/local.d
{{- end }}
- name: database-storage
mountPath: /opt/couchdb/data
{{- if .Values.enableSearch }}
- name: clouseau
image: "{{ .Values.searchImage.repository }}:{{ .Values.searchImage.tag }}"
imagePullPolicy: {{ .Values.searchImage.pullPolicy }}
volumeMounts:
- name: database-storage
mountPath: /opt/couchdb-search/data
{{- end }}
{{- if .Values.sidecars }}
{{ toYaml .Values.sidecars | indent 8}}
{{- end }}
{{- if .Values.nodeSelector }}
nodeSelector:
{{ toYaml .Values.nodeSelector | indent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{ toYaml . | indent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{ toYaml . | indent 8 }}
{{- end }}
volumes:
- name: config-storage
emptyDir: {}
- name: config
configMap:
name: {{ template "couchdb.fullname" . }}
items:
- key: inifile
path: chart.ini
- key: seedlistinifile
path: seedlist.ini
{{- if .Values.adminHash }}
- name: local-config-storage
emptyDir: {}
- name: admin-password
secret:
secretName: {{ template "couchdb.fullname" . }}
{{- end -}}
{{- if not .Values.persistentVolume.enabled }}
- name: database-storage
emptyDir: {}
{{- else }}
volumeClaimTemplates:
- metadata:
name: database-storage
labels:
app: {{ template "couchdb.name" . }}
release: {{ .Release.Name }}
spec:
accessModes:
{{- range .Values.persistentVolume.accessModes }}
- {{ . | quote }}
{{- end }}
resources:
requests:
storage: {{ .Values.persistentVolume.size | quote }}
{{- if .Values.persistentVolume.storageClass }}
{{- if (eq "-" .Values.persistentVolume.storageClass) }}
storageClassName: ""
{{- else }}
storageClassName: "{{ .Values.persistentVolume.storageClass }}"
{{- end }}
{{- end }}
{{- end }}

View File

@ -1,201 +0,0 @@
## clusterSize is the initial size of the CouchDB cluster.
clusterSize: 3
## If allowAdminParty is enabled the cluster will start up without any database
## administrator account; i.e., all users will be granted administrative
## access. Otherwise, the system will look for a Secret called
## <ReleaseName>-couchdb containing `adminUsername`, `adminPassword` and
## `cookieAuthSecret` keys. See the `createAdminSecret` flag.
## ref: https://kubernetes.io/docs/concepts/configuration/secret/
allowAdminParty: false
## If createAdminSecret is enabled a Secret called <ReleaseName>-couchdb will
## be created containing auto-generated credentials. Users who prefer to set
## these values themselves have a couple of options:
##
## 1) The `adminUsername`, `adminPassword`, `adminHash`, and `cookieAuthSecret`
## can be defined directly in the chart's values. Note that all of a chart's
## values are currently stored in plaintext in a ConfigMap in the tiller
## namespace.
##
## 2) This flag can be disabled and a Secret with the required keys can be
## created ahead of time.
createAdminSecret: true
# adminUsername: budibase
# adminPassword: budibase
# adminHash: -pbkdf2-this_is_not_necessarily_secure_either
# cookieAuthSecret: admin
## When enabled, will deploy a networkpolicy that allows CouchDB pods to
## communicate with each other for clustering and ingress on port 5984
networkPolicy:
enabled: true
## Use an alternate scheduler, e.g. "stork".
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
# schedulerName:
# Use a service account
serviceAccount:
enabled: true
create: true
# name:
# imagePullSecrets:
# - name: myimagepullsecret
## The storage volume used by each Pod in the StatefulSet. If a
## persistentVolume is not enabled, the Pods will use `emptyDir` ephemeral
## local storage. Setting the storageClass attribute to "-" disables dynamic
## provisioning of Persistent Volumes; leaving it unset will invoke the default
## provisioner.
persistentVolume:
enabled: false
accessModes:
- ReadWriteOnce
size: 10Gi
storageClass: ""
## The CouchDB image
image:
repository: couchdb
tag: 3.1.0
pullPolicy: IfNotPresent
## Experimental integration with Lucene-powered fulltext search
searchImage:
repository: kocolosk/couchdb-search
tag: 0.2.0
pullPolicy: IfNotPresent
## Flip this to flag to include the Search container in each Pod
enableSearch: true
initImage:
repository: busybox
tag: latest
pullPolicy: Always
## CouchDB is happy to spin up cluster nodes in parallel, but if you encounter
## problems you can try setting podManagementPolicy to the StatefulSet default
## `OrderedReady`
podManagementPolicy: Parallel
## To better tolerate Node failures, we can prevent Kubernetes scheduler from
## assigning more than one Pod of CouchDB StatefulSet per Node using podAntiAffinity.
affinity: {}
# podAntiAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# - labelSelector:
# matchExpressions:
# - key: "app"
# operator: In
# values:
# - couchdb
# topologyKey: "kubernetes.io/hostname"
## Optional pod annotations
annotations: {}
## Optional tolerations
tolerations: []
## A StatefulSet requires a headless Service to establish the stable network
## identities of the Pods, and that Service is created automatically by this
## chart without any additional configuration. The Service block below refers
## to a second Service that governs how clients connect to the CouchDB cluster.
service:
# annotations:
enabled: true
type: ClusterIP
externalPort: 5984
## An Ingress resource can provide name-based virtual hosting and TLS
## termination among other things for CouchDB deployments which are accessed
## from outside the Kubernetes cluster.
## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/
ingress:
enabled: false
hosts:
- chart-example.local
path: /
annotations: []
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
tls:
# Secrets must be manually created in the namespace.
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
## Optional resource requests and limits for the CouchDB container
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
resources:
{}
# requests:
# cpu: 100m
# memory: 128Mi
# limits:
# cpu: 56
# memory: 256Gi
## erlangFlags is a map that is passed to the Erlang VM as flags using the
## ERL_FLAGS env. `name` and `setcookie` flags are minimally required to
## establish connectivity between cluster nodes.
## ref: http://erlang.org/doc/man/erl.html#init_flags
erlangFlags:
name: couchdb
setcookie: monster
## couchdbConfig will override default CouchDB configuration settings.
## The contents of this map are reformatted into a .ini file laid down
## by a ConfigMap object.
## ref: http://docs.couchdb.org/en/latest/config/index.html
couchdbConfig:
couchdb:
uuid: budibase-couchdb # REQUIRED: Unique identifier for this CouchDB server instance
# cluster:
# q: 8 # Create 8 shards for each database
chttpd:
bind_address: any
# chttpd.require_valid_user disables all the anonymous requests to the port
# 5984 when is set to true.
require_valid_user: false
# Kubernetes local cluster domain.
# This is used to generate FQDNs for peers when joining the CouchDB cluster.
dns:
clusterDomainSuffix: cluster.local
## Configure liveness and readiness probe values
## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
livenessProbe:
enabled: true
failureThreshold: 3
initialDelaySeconds: 0
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
readinessProbe:
enabled: true
failureThreshold: 3
initialDelaySeconds: 0
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
# Configure arbitrary sidecar containers for CouchDB pods created by the
# StatefulSet
sidecars: {}
# - name: foo
# image: "busybox"
# imagePullPolicy: IfNotPresent
# resources:
# requests:
# cpu: "0.1"
# memory: 10Mi
# command: ['echo "foo";']
# volumeMounts:
# - name: database-storage
# mountPath: /opt/couchdb/data/

View File

@ -73,17 +73,13 @@ spec:
name: {{ template "budibase.fullname" . }} name: {{ template "budibase.fullname" . }}
key: objectStoreSecret key: objectStoreSecret
- name: MINIO_URL - name: MINIO_URL
{{ if .Values.services.objectStore.url }}
value: {{ .Values.services.objectStore.url }} value: {{ .Values.services.objectStore.url }}
{{ else }}
value: http://minio-service:{{ .Values.services.objectStore.port }}
{{ end }}
- name: PORT - name: PORT
value: {{ .Values.services.apps.port | quote }} value: {{ .Values.services.apps.port | quote }}
- name: MULTI_TENANCY - name: MULTI_TENANCY
value: {{ .Values.globals.multiTenancy | quote }} value: {{ .Values.globals.multiTenancy | quote }}
- name: LOG_LEVEL - name: LOG_LEVEL
value: {{ .Values.services.apps.logLevel | quote }} value: {{ default "info" .Values.services.apps.logLevel | quote }}
- name: REDIS_PASSWORD - name: REDIS_PASSWORD
value: {{ .Values.services.redis.password }} value: {{ .Values.services.redis.password }}
- name: REDIS_URL - name: REDIS_URL
@ -110,7 +106,7 @@ spec:
value: {{ .Values.globals.accountPortalApiKey | quote }} value: {{ .Values.globals.accountPortalApiKey | quote }}
- name: COOKIE_DOMAIN - name: COOKIE_DOMAIN
value: {{ .Values.globals.cookieDomain | quote }} value: {{ .Values.globals.cookieDomain | quote }}
image: budibase/apps image: budibase/apps:{{ .Chart.appVersion }}
imagePullPolicy: Always imagePullPolicy: Always
name: bbapps name: bbapps
ports: ports:

View File

@ -0,0 +1,43 @@
{{- if .Values.services.couchdb.backup.enabled }}
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
kompose.cmd: kompose convert
kompose.version: 1.21.0 (992df58d8)
creationTimestamp: null
labels:
app.kubernetes.io/name: couchdb-backup
name: couchdb-backup
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: couchdb-backup
strategy:
type: Recreate
template:
metadata:
annotations:
kompose.cmd: kompose convert
kompose.version: 1.21.0 (992df58d8)
creationTimestamp: null
labels:
app.kubernetes.io/name: couchdb-backup
spec:
containers:
- env:
- name: SOURCE
value: {{ .Values.services.couchdb.url }}
- name: TARGET
value: {{ .Values.services.couchdb.backup.target }}
- name: RUN_EVERY_SECS
value: {{ .Values.services.couchdb.backup.interval }}
- name: VERBOSE
value: "true"
image: redgeoff/replicate-couchdb-cluster
imagePullPolicy: Always
name: couchdb-backup
resources: {}
status: {}
{{- end }}

View File

@ -70,17 +70,13 @@ spec:
name: {{ template "budibase.fullname" . }} name: {{ template "budibase.fullname" . }}
key: objectStoreSecret key: objectStoreSecret
- name: MINIO_URL - name: MINIO_URL
{{ if .Values.services.objectStore.url }}
value: {{ .Values.services.objectStore.url }} value: {{ .Values.services.objectStore.url }}
{{ else }}
value: http://minio-service:{{ .Values.services.objectStore.port }}
{{ end }}
- name: PORT - name: PORT
value: {{ .Values.services.worker.port | quote }} value: {{ .Values.services.worker.port | quote }}
- name: MULTI_TENANCY - name: MULTI_TENANCY
value: {{ .Values.globals.multiTenancy | quote }} value: {{ .Values.globals.multiTenancy | quote }}
- name: LOG_LEVEL - name: LOG_LEVEL
value: {{ .Values.services.worker.logLevel | quote }} value: {{ default "info" .Values.services.worker.logLevel | quote }}
- name: REDIS_PASSWORD - name: REDIS_PASSWORD
value: {{ .Values.services.redis.password | quote }} value: {{ .Values.services.redis.password | quote }}
- name: REDIS_URL - name: REDIS_URL
@ -115,7 +111,7 @@ spec:
value: {{ .Values.globals.smtp.from | quote }} value: {{ .Values.globals.smtp.from | quote }}
- name: APPS_URL - name: APPS_URL
value: http://app-service:{{ .Values.services.apps.port }} value: http://app-service:{{ .Values.services.apps.port }}
image: budibase/worker image: budibase/worker:{{ .Chart.appVersion }}
imagePullPolicy: Always imagePullPolicy: Always
name: bbworker name: bbworker
ports: ports:

View File

@ -2,8 +2,6 @@
# This is a YAML-formatted file. # This is a YAML-formatted file.
# Declare variables to be passed into your templates. # Declare variables to be passed into your templates.
replicaCount: 1
image: image:
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion. # Overrides the image tag whose default is the chart appVersion.
@ -109,6 +107,7 @@ globals:
enabled: false enabled: false
services: services:
budibaseVersion: latest
dns: cluster.local dns: cluster.local
proxy: proxy:
@ -126,12 +125,16 @@ services:
couchdb: couchdb:
enabled: true enabled: true
replicaCount: 3
# url: "" # only change if pointing to existing couch server # url: "" # only change if pointing to existing couch server
# user: "" # only change if pointing to existing couch server # user: "" # only change if pointing to existing couch server
# password: "" # only change if pointing to existing couch server # password: "" # only change if pointing to existing couch server
port: 5984 port: 5984
storage: 100Mi backup:
enabled: false
# target couchDB instance to back up to
target: ""
# backup interval in seconds
interval: ""
redis: redis:
enabled: true # disable if using external redis enabled: true # disable if using external redis
@ -149,5 +152,153 @@ services:
accessKey: "" # AWS_ACCESS_KEY if using S3 or existing minio access key accessKey: "" # AWS_ACCESS_KEY if using S3 or existing minio access key
secretKey: "" # AWS_SECRET_ACCESS_KEY if using S3 or existing minio secret secretKey: "" # AWS_SECRET_ACCESS_KEY if using S3 or existing minio secret
region: "" # AWS_REGION if using S3 or existing minio secret region: "" # AWS_REGION if using S3 or existing minio secret
url: "" # only change if pointing to existing minio cluster and minio: false url: "http://minio-service:9000" # only change if pointing to existing minio cluster or S3 and minio: false
storage: 100Mi storage: 100Mi
# Override values in couchDB subchart
couchdb:
## clusterSize is the initial size of the CouchDB cluster.
clusterSize: 3
allowAdminParty: false
# Secret Management
createAdminSecret: true
# adminUsername: budibase
# adminPassword: budibase
# adminHash: -pbkdf2-this_is_not_necessarily_secure_either
# cookieAuthSecret: admin
## When enabled, will deploy a networkpolicy that allows CouchDB pods to
## communicate with each other for clustering and ingress on port 5984
networkPolicy:
enabled: true
# Use a service account
serviceAccount:
enabled: true
create: true
# name:
# imagePullSecrets:
# - name: myimagepullsecret
## The storage volume used by each Pod in the StatefulSet. If a
## persistentVolume is not enabled, the Pods will use `emptyDir` ephemeral
## local storage. Setting the storageClass attribute to "-" disables dynamic
## provisioning of Persistent Volumes; leaving it unset will invoke the default
## provisioner.
persistentVolume:
enabled: false
accessModes:
- ReadWriteOnce
size: 10Gi
storageClass: ""
## The CouchDB image
image:
repository: couchdb
tag: 3.1.0
pullPolicy: IfNotPresent
## Experimental integration with Lucene-powered fulltext search
enableSearch: true
searchImage:
repository: kocolosk/couchdb-search
tag: 0.2.0
pullPolicy: IfNotPresent
initImage:
repository: busybox
tag: latest
pullPolicy: Always
## CouchDB is happy to spin up cluster nodes in parallel, but if you encounter
## problems you can try setting podManagementPolicy to the StatefulSet default
## `OrderedReady`
podManagementPolicy: Parallel
## Optional pod annotations
annotations: {}
## Optional tolerations
tolerations: []
service:
# annotations:
enabled: true
type: ClusterIP
externalPort: 5984
## An Ingress resource can provide name-based virtual hosting and TLS
## termination among other things for CouchDB deployments which are accessed
## from outside the Kubernetes cluster.
## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/
ingress:
enabled: false
hosts:
- chart-example.local
path: /
annotations: []
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
tls:
# Secrets must be manually created in the namespace.
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
## Optional resource requests and limits for the CouchDB container
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
resources:
{}
# requests:
# cpu: 100m
# memory: 128Mi
# limits:
# cpu: 56
# memory: 256Gi
## erlangFlags is a map that is passed to the Erlang VM as flags using the
## ERL_FLAGS env. `name` and `setcookie` flags are minimally required to
## establish connectivity between cluster nodes.
## ref: http://erlang.org/doc/man/erl.html#init_flags
erlangFlags:
name: couchdb
setcookie: monster
## couchdbConfig will override default CouchDB configuration settings.
## The contents of this map are reformatted into a .ini file laid down
## by a ConfigMap object.
## ref: http://docs.couchdb.org/en/latest/config/index.html
couchdbConfig:
couchdb:
uuid: budibase-couchdb # REQUIRED: Unique identifier for this CouchDB server instance
# cluster:
# q: 8 # Create 8 shards for each database
chttpd:
bind_address: any
# chttpd.require_valid_user disables all the anonymous requests to the port
# 5984 when is set to true.
require_valid_user: false
# Kubernetes local cluster domain.
# This is used to generate FQDNs for peers when joining the CouchDB cluster.
dns:
clusterDomainSuffix: cluster.local
## Configure liveness and readiness probe values
## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
livenessProbe:
enabled: true
failureThreshold: 3
initialDelaySeconds: 0
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
readinessProbe:
enabled: true
failureThreshold: 3
initialDelaySeconds: 0
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1

View File

@ -8,6 +8,7 @@
"eslint-plugin-cypress": "^2.11.3", "eslint-plugin-cypress": "^2.11.3",
"eslint-plugin-svelte3": "^3.2.0", "eslint-plugin-svelte3": "^3.2.0",
"husky": "^7.0.1", "husky": "^7.0.1",
"js-yaml": "^4.1.0",
"kill-port": "^1.6.1", "kill-port": "^1.6.1",
"lerna": "3.14.1", "lerna": "3.14.1",
"prettier": "^2.3.1", "prettier": "^2.3.1",
@ -47,8 +48,9 @@
"build:docker:selfhost": "lerna run build:docker && cd hosting/scripts/linux/ && ./release-to-docker-hub.sh latest && cd -", "build:docker:selfhost": "lerna run build:docker && cd hosting/scripts/linux/ && ./release-to-docker-hub.sh latest && cd -",
"build:docker:develop": "node scripts/pinVersions && lerna run build:docker && cd hosting/scripts/linux/ && ./release-to-docker-hub.sh develop && cd -", "build:docker:develop": "node scripts/pinVersions && lerna run build:docker && cd hosting/scripts/linux/ && ./release-to-docker-hub.sh develop && cd -",
"build:docker:airgap": "node hosting/scripts/airgapped/airgappedDockerBuild", "build:docker:airgap": "node hosting/scripts/airgapped/airgappedDockerBuild",
"build:digitalocean": "cd hosting/digitalocean && ./build.sh && cd -",
"build:docs": "lerna run build:docs", "build:docs": "lerna run build:docs",
"release:helm": "./scripts/release_helm_chart.sh", "release:helm": "node scripts/releaseHelmChart",
"env:multi:enable": "lerna run env:multi:enable", "env:multi:enable": "lerna run env:multi:enable",
"env:multi:disable": "lerna run env:multi:disable", "env:multi:disable": "lerna run env:multi:disable",
"env:selfhost:enable": "lerna run env:selfhost:enable", "env:selfhost:enable": "lerna run env:selfhost:enable",

View File

@ -14,7 +14,7 @@
"cy:setup": "node ./cypress/setup.js", "cy:setup": "node ./cypress/setup.js",
"cy:run": "cypress run", "cy:run": "cypress run",
"cy:open": "cypress open", "cy:open": "cypress open",
"cy:run:ci": "cypress run --record --key f308590b-6070-41af-b970-794a3823d451", "cy:run:ci": "cypress run --record",
"cy:test": "start-server-and-test cy:setup http://localhost:10001/builder cy:run", "cy:test": "start-server-and-test cy:setup http://localhost:10001/builder cy:run",
"cy:ci": "start-server-and-test cy:setup http://localhost:10001/builder cy:run", "cy:ci": "start-server-and-test cy:setup http://localhost:10001/builder cy:run",
"cy:debug": "start-server-and-test cy:setup http://localhost:10001/builder cy:open" "cy:debug": "start-server-and-test cy:setup http://localhost:10001/builder cy:open"

39
scripts/releaseHelmChart.js Executable file
View File

@ -0,0 +1,39 @@
const yaml = require("js-yaml")
const { execSync } = require("child_process")
const fs = require("fs")
const path = require("path")
const UpgradeTypes = {
MAJOR: "major",
MINOR: "minor",
PATCH: "patch"
}
const CHART_PATH = path.join(__dirname, "../", "hosting", "kubernetes", "budibase", "Chart.yaml")
const UPGRADE_VERSION = process.env.BUDIBASE_RELEASE_VERSION
const UPGRADE_TYPE = process.env.HELM_CHART_UPGRADE_TYPE || UpgradeTypes.PATCH
if (!UPGRADE_VERSION) {
throw new Error("BUDIBASE_RELEASE_VERSION env var must be set.")
}
try {
const chartFile = fs.readFileSync(CHART_PATH, "utf-8")
const chart = yaml.load(chartFile)
// Upgrade app version in chart to match budibase release version
chart.appVersion = UPGRADE_VERSION
// semantically version the chart
const [major, minor, patch] = chart.version.split(".")
const newPatch = parseInt(patch) + 1
chart.version = [major, minor, newPatch].join(".")
const updatedChartYaml = yaml.dump(chart)
fs.writeFileSync(CHART_PATH, updatedChartYaml)
// package the chart and write to docs dir
execSync(`helm package hosting/kubernetes/budibase --destination docs`)
} catch (err) {
console.error("Error releasing helm chart")
throw err
}

View File

@ -1,3 +0,0 @@
cd docs
helm package ../hosting/kubernetes/budibase
helm repo index . --url https://budibase.github.io/budibase

View File

@ -1082,6 +1082,11 @@ argparse@^1.0.7:
dependencies: dependencies:
sprintf-js "~1.0.2" sprintf-js "~1.0.2"
argparse@^2.0.1:
version "2.0.1"
resolved "https://registry.yarnpkg.com/argparse/-/argparse-2.0.1.tgz#246f50f3ca78a3240f6c997e8a9bd1eac49e4b38"
integrity sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==
arr-diff@^4.0.0: arr-diff@^4.0.0:
version "4.0.0" version "4.0.0"
resolved "https://registry.yarnpkg.com/arr-diff/-/arr-diff-4.0.0.tgz#d6461074febfec71e7e15235761a329a5dc7c520" resolved "https://registry.yarnpkg.com/arr-diff/-/arr-diff-4.0.0.tgz#d6461074febfec71e7e15235761a329a5dc7c520"
@ -3260,6 +3265,13 @@ js-yaml@^3.13.1:
argparse "^1.0.7" argparse "^1.0.7"
esprima "^4.0.0" esprima "^4.0.0"
js-yaml@^4.1.0:
version "4.1.0"
resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-4.1.0.tgz#c1fb65f8f5017901cdd2c951864ba18458a10602"
integrity sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==
dependencies:
argparse "^2.0.1"
jsbn@~0.1.0: jsbn@~0.1.0:
version "0.1.1" version "0.1.1"
resolved "https://registry.yarnpkg.com/jsbn/-/jsbn-0.1.1.tgz#a5e654c2e5a2deb5f201d96cefbca80c0ef2f513" resolved "https://registry.yarnpkg.com/jsbn/-/jsbn-0.1.1.tgz#a5e654c2e5a2deb5f201d96cefbca80c0ef2f513"