diff --git a/.github/workflows/ecs_deploy.yml b/.github/workflows/ecs_deploy.yml deleted file mode 100644 index 0d662c9c28..0000000000 --- a/.github/workflows/ecs_deploy.yml +++ /dev/null @@ -1,85 +0,0 @@ -# This workflow will build and push a new container image to Amazon ECR, -# and then will deploy a new task definition to Amazon ECS, when a release is created -# -# To use this workflow, you will need to complete the following set-up steps: -# -# 1. Create an ECR repository to store your images. -# For example: `aws ecr create-repository --repository-name my-ecr-repo --region us-east-2`. -# Replace the value of `ECR_REPOSITORY` in the workflow below with your repository's name. -# Replace the value of `aws-region` in the workflow below with your repository's region. -# -# 2. Create an ECS task definition, an ECS cluster, and an ECS service. -# For example, follow the Getting Started guide on the ECS console: -# https://us-east-2.console.aws.amazon.com/ecs/home?region=us-east-2#/firstRun -# Replace the values for `service` and `cluster` in the workflow below with your service and cluster names. -# -# 3. Store your ECS task definition as a JSON file in your repository. -# The format should follow the output of `aws ecs register-task-definition --generate-cli-skeleton`. -# Replace the value of `task-definition` in the workflow below with your JSON file's name. -# Replace the value of `container-name` in the workflow below with the name of the container -# in the `containerDefinitions` section of the task definition. -# -# 4. Store an IAM user access key in GitHub Actions secrets named `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`. -# See the documentation for each action used below for the recommended IAM policies for this IAM user, -# and best practices on handling the access key credentials. - -on: - push: - tags: - - 'v*' - -name: Deploy to Amazon ECS - -jobs: - deploy: - name: deploy - runs-on: ubuntu-16.04 - - steps: - - name: Checkout - uses: actions/checkout@v2 - - - name: Configure AWS credentials - uses: aws-actions/configure-aws-credentials@v1 - with: - aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} - aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - aws-region: eu-west-1 - - - name: Download task definition - run: | - aws ecs describe-task-definition --task-definition ProdAppServerStackprodbudiapplbfargateserviceprodbudiappserverfargatetaskdefinition2EF7F1E7 --query taskDefinition > task-definition.json - - - name: Login to Amazon ECR - id: login-ecr - uses: aws-actions/amazon-ecr-login@v1 - - - name: Build, tag, and push image to Amazon ECR - id: build-image - env: - ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }} - ECR_REPOSITORY: prod-budi-app-server - IMAGE_TAG: ${{ github.sha }} - run: | - # Build a docker container and - # push it to ECR so that it can - # be deployed to ECS - cd packages/server - docker build -t $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG . - docker push $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG - echo "::set-output name=image::$ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG" - - name: Fill in the new image ID in the Amazon ECS task definition - id: task-def - uses: aws-actions/amazon-ecs-render-task-definition@v1 - with: - task-definition: task-definition.json - container-name: prod-budi-app-server - image: ${{ steps.build-image.outputs.image }} - - - name: Deploy Amazon ECS task definition - uses: aws-actions/amazon-ecs-deploy-task-definition@v1 - with: - task-definition: ${{ steps.task-def.outputs.task-definition }} - service: prod-budi-app-server-service - cluster: prod-budi-app-server - wait-for-service-stability: true diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 4e76626d2c..588f0c54ae 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -42,6 +42,10 @@ jobs: echo //registry.npmjs.org/:_authToken=${NPM_TOKEN} >> .npmrc yarn release + - name: Get Previous tag + id: previoustag + uses: "WyriHaximus/github-action-get-previous-tag@v1" + - name: Build/release Docker images run: | docker login -u $DOCKER_USER -p $DOCKER_PASSWORD @@ -50,3 +54,18 @@ jobs: env: DOCKER_USER: ${{ secrets.DOCKER_USERNAME }} DOCKER_PASSWORD: ${{ secrets.DOCKER_API_KEY }} + BUDIBASE_RELEASE_VERSION: ${{ steps.previoustag.outputs.tag }} + + - uses: azure/setup-helm@v1 + id: install + + # So, we need to inject the values into this + - run: yarn release:helm + + - name: Run chart-releaser + uses: helm/chart-releaser-action@v1.1.0 + with: + charts_dir: docs + env: + CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}" + \ No newline at end of file diff --git a/.gitignore b/.gitignore index b2a2021cc9..3a5fc5dc7b 100644 --- a/.gitignore +++ b/.gitignore @@ -55,7 +55,7 @@ typings/ .node_repl_history # Output of 'npm pack' -*.tgz +# *.tgz # Yarn Integrity file .yarn-integrity @@ -91,4 +91,4 @@ hosting/.generated-envoy.dev.yaml # Sublime text *.sublime-project -*.sublime-workspace \ No newline at end of file +*.sublime-workspace diff --git a/.husky/pre-commit b/.husky/pre-commit new file mode 100755 index 0000000000..3b614330e0 --- /dev/null +++ b/.husky/pre-commit @@ -0,0 +1,4 @@ +#!/bin/sh +. "$(dirname "$0")/_/husky.sh" + +yarn run lint diff --git a/docs/budibase-0.1.0.tgz b/docs/budibase-0.1.0.tgz new file mode 100644 index 0000000000..7873874ab0 Binary files /dev/null and b/docs/budibase-0.1.0.tgz differ diff --git a/docs/budibase-0.1.1.tgz b/docs/budibase-0.1.1.tgz new file mode 100644 index 0000000000..b38527c4a4 Binary files /dev/null and b/docs/budibase-0.1.1.tgz differ diff --git a/docs/index.html b/docs/index.html new file mode 100644 index 0000000000..0fa6060f8f --- /dev/null +++ b/docs/index.html @@ -0,0 +1,9 @@ + + + Budibase Helm Chart Repo + + +

Budibase Charts Repo

+

Point Helm at this repo to see charts.

+ + \ No newline at end of file diff --git a/docs/index.yaml b/docs/index.yaml new file mode 100644 index 0000000000..4e064f3dd0 --- /dev/null +++ b/docs/index.yaml @@ -0,0 +1,54 @@ +apiVersion: v1 +entries: + budibase: + - apiVersion: v2 + appVersion: 0.9.56 + created: "2021-08-18T18:41:52.640176+01:00" + dependencies: + - condition: services.couchdb.enabled + name: couchdb + repository: https://apache.github.io/couchdb-helm + version: 3.3.4 + - name: ingress-nginx + repository: https://github.com/kubernetes/ingress-nginx + version: 3.35.0 + description: Budibase is an open source low-code platform, helping thousands of teams build apps for their workplace in minutes. + digest: 8dc4f2ed4d98cad5adf25936aefea680042d3e4e17832f846b961fd8708ad192 + keywords: + - low-code + - database + - cluster + name: budibase + sources: + - https://github.com/Budibase/budibase + - https://budibase.com + type: application + urls: + - https://budibase.github.io/budibase/budibase-0.1.1.tgz + version: 0.1.1 + - apiVersion: v2 + appVersion: 0.9.56 + created: "2021-08-18T18:41:52.635603+01:00" + dependencies: + - condition: services.couchdb.enabled + name: couchdb + repository: https://apache.github.io/couchdb-helm + version: 3.3.4 + - name: ingress-nginx + repository: https://github.com/kubernetes/ingress-nginx + version: 3.35.0 + description: Budibase is an open source low-code platform, helping thousands of teams build apps for their workplace in minutes. + digest: 08031b0803cce0eff64472e569d454d9176119c8207aa9873a9c95ee66cc7d3f + keywords: + - low-code + - database + - cluster + name: budibase + sources: + - https://github.com/Budibase/budibase + - https://budibase.com + type: application + urls: + - https://budibase.github.io/budibase/budibase-0.1.0.tgz + version: 0.1.0 +generated: "2021-08-18T18:41:52.629415+01:00" diff --git a/hosting/envoy.dev.yaml.hbs b/hosting/envoy.dev.yaml.hbs index 76417b3e0d..01d5a09efa 100644 --- a/hosting/envoy.dev.yaml.hbs +++ b/hosting/envoy.dev.yaml.hbs @@ -26,10 +26,18 @@ static_resources: cluster: couchdb-service prefix_rewrite: "/" + - match: { prefix: "/api/system/" } + route: + cluster: worker-dev + - match: { prefix: "/api/admin/" } route: cluster: worker-dev + - match: { prefix: "/api/global/" } + route: + cluster: worker-dev + - match: { prefix: "/api/" } route: cluster: server-dev diff --git a/hosting/envoy.yaml b/hosting/envoy.yaml index d7b34f4d5e..d5f9ebee28 100644 --- a/hosting/envoy.yaml +++ b/hosting/envoy.yaml @@ -37,11 +37,19 @@ static_resources: route: cluster: app-service - # special case for worker admin API + # special cases for worker admin (deprecated), global and system API + - match: { prefix: "/api/global/" } + route: + cluster: worker-service + - match: { prefix: "/api/admin/" } route: cluster: worker-service + - match: { prefix: "/api/system/" } + route: + cluster: worker-service + - match: { path: "/" } route: cluster: app-service diff --git a/hosting/kubernetes/budibase/.helmignore b/hosting/kubernetes/budibase/.helmignore new file mode 100644 index 0000000000..0e8a0eb36f --- /dev/null +++ b/hosting/kubernetes/budibase/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/hosting/kubernetes/budibase/Chart.yaml b/hosting/kubernetes/budibase/Chart.yaml new file mode 100644 index 0000000000..b82cb3bab2 --- /dev/null +++ b/hosting/kubernetes/budibase/Chart.yaml @@ -0,0 +1,41 @@ +apiVersion: v2 +name: budibase +description: Budibase is an open source low-code platform, helping thousands of teams build apps for their workplace in minutes. +keywords: +- low-code +- database +- cluster +sources: +- https://github.com/Budibase/budibase +- https://budibase.com + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.1 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "0.9.56" + +dependencies: + - name: couchdb + version: 3.3.4 + repository: https://apache.github.io/couchdb-helm + condition: services.couchdb.enabled + - name: ingress-nginx + version: 3.35.0 + repository: https://github.com/kubernetes/ingress-nginx + condition: services.ingress.nginx diff --git a/hosting/kubernetes/budibase/README.md b/hosting/kubernetes/budibase/README.md new file mode 100644 index 0000000000..efa78ba75c --- /dev/null +++ b/hosting/kubernetes/budibase/README.md @@ -0,0 +1,39 @@ +# Budibase + +[Budibase](https://budibase.com/) Budibase is an open source low-code platform, helping thousands of teams build apps for their workplace in minutes. + +## TL;DR; +```console +$ cd chart +$ helm install budibase . +``` + +## Introduction + +This chart bootstraps a [Budibase](https://budibase.com/) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +## Prerequisites + +- helm v3 or above +- Kubernetes 1.4+ +- PV provisioner support in the underlying infrastructure (with persistence storage enabled) + +## Installing the Chart + +To install the chart with the release name `budi-release`: + +```console +$ helm install budi-release . +``` + +The command deploys Budibase on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +$ helm delete my-release +``` diff --git a/hosting/kubernetes/budibase/charts/couchdb/Chart.yaml b/hosting/kubernetes/budibase/charts/couchdb/Chart.yaml new file mode 100755 index 0000000000..74ae734a17 --- /dev/null +++ b/hosting/kubernetes/budibase/charts/couchdb/Chart.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +appVersion: 3.1.0 +description: A database featuring seamless multi-master sync, that scales from big + data to mobile, with an intuitive HTTP/JSON API and designed for reliability. +home: https://couchdb.apache.org/ +icon: http://couchdb.apache.org/CouchDB-visual-identity/logo/CouchDB-couch-symbol.svg +keywords: +- couchdb +- database +- nosql +maintainers: +- email: kocolosk@apache.org + name: kocolosk +- email: willholley@apache.org + name: willholley +name: couchdb +sources: +- https://github.com/apache/couchdb-docker +version: 3.3.4 diff --git a/hosting/kubernetes/budibase/charts/couchdb/README.md b/hosting/kubernetes/budibase/charts/couchdb/README.md new file mode 100755 index 0000000000..3227123d06 --- /dev/null +++ b/hosting/kubernetes/budibase/charts/couchdb/README.md @@ -0,0 +1,244 @@ +# CouchDB + +Apache CouchDB is a database featuring seamless multi-master sync, that scales +from big data to mobile, with an intuitive HTTP/JSON API and designed for +reliability. + +This chart deploys a CouchDB cluster as a StatefulSet. It creates a ClusterIP +Service in front of the Deployment for load balancing by default, but can also +be configured to deploy other Service types or an Ingress Controller. The +default persistence mechanism is simply the ephemeral local filesystem, but +production deployments should set `persistentVolume.enabled` to `true` to attach +storage volumes to each Pod in the Deployment. + +## TL;DR + +```bash +$ helm repo add couchdb https://apache.github.io/couchdb-helm +$ helm install couchdb/couchdb \ + --set allowAdminParty=true \ + --set couchdbConfig.couchdb.uuid=$(curl https://www.uuidgenerator.net/api/version4 2>/dev/null | tr -d -) +``` + +## Prerequisites + +- Kubernetes 1.9+ with Beta APIs enabled +- Ingress requires Kubernetes 1.14+ + +## Installing the Chart + +To install the chart with the release name `my-release`: + +Add the CouchDB Helm repository: + +```bash +$ helm repo add couchdb https://apache.github.io/couchdb-helm +``` + +Afterwards install the chart replacing the UUID +`decafbaddecafbaddecafbaddecafbad` with a custom one: + +```bash +$ helm install \ + --name my-release \ + --set couchdbConfig.couchdb.uuid=decafbaddecafbaddecafbaddecafbad \ + couchdb/couchdb +``` + +This will create a Secret containing the admin credentials for the cluster. +Those credentials can be retrieved as follows: + +```bash +$ kubectl get secret my-release-couchdb -o go-template='{{ .data.adminPassword }}' | base64 --decode +``` + +If you prefer to configure the admin credentials directly you can create a +Secret containing `adminUsername`, `adminPassword` and `cookieAuthSecret` keys: + +```bash +$ kubectl create secret generic my-release-couchdb --from-literal=adminUsername=foo --from-literal=adminPassword=bar --from-literal=cookieAuthSecret=baz +``` + +If you want to set the `adminHash` directly to achieve consistent salts between +different nodes you need to addionally add the key `password.ini` to the secret: + +```bash +$ kubectl create secret generic my-release-couchdb \ + --from-literal=adminUsername=foo \ + --from-literal=cookieAuthSecret=baz \ + --from-file=./my-password.ini +``` + +With the following contents in `my-password.ini`: + +``` +[admins] +foo = +``` + +and then install the chart while overriding the `createAdminSecret` setting: + +```bash +$ helm install \ + --name my-release \ + --set createAdminSecret=false \ + --set couchdbConfig.couchdb.uuid=decafbaddecafbaddecafbaddecafbad \ + couchdb/couchdb +``` + +This Helm chart deploys CouchDB on the Kubernetes cluster in a default +configuration. The [configuration](#configuration) section lists +the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` Deployment: + +```bash +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and +deletes the release. + +## Upgrading an existing Release to a new major version + +A major chart version change (like v0.2.3 -> v1.0.0) indicates that there is an +incompatible breaking change needing manual actions. + +### Upgrade to 3.0.0 + +Since version 3.0.0 setting the CouchDB server instance UUID is mandatory. +Therefore you need to generate a UUID and supply it as a value during the +upgrade as follows: + +```bash +$ helm upgrade \ + --reuse-values \ + --set couchdbConfig.couchdb.uuid= \ + couchdb/couchdb +``` + +## Migrating from stable/couchdb + +This chart replaces the `stable/couchdb` chart previously hosted by Helm and continues the +version semantics. You can upgrade directly from `stable/couchdb` to this chart using: + +```bash +$ helm repo add couchdb https://apache.github.io/couchdb-helm +$ helm upgrade my-release couchdb/couchdb +``` + +## Configuration + +The following table lists the most commonly configured parameters of the +CouchDB chart and their default values: + +| Parameter | Description | Default | +|---------------------------------|-------------------------------------------------------|----------------------------------------| +| `clusterSize` | The initial number of nodes in the CouchDB cluster | 3 | +| `couchdbConfig` | Map allowing override elements of server .ini config | *See below* | +| `allowAdminParty` | If enabled, start cluster without admin account | false (requires creating a Secret) | +| `createAdminSecret` | If enabled, create an admin account and cookie secret | true | +| `schedulerName` | Name of the k8s scheduler (other than default) | `nil` | +| `erlangFlags` | Map of flags supplied to the underlying Erlang VM | name: couchdb, setcookie: monster +| `persistentVolume.enabled` | Boolean determining whether to attach a PV to each node | false +| `persistentVolume.size` | If enabled, the size of the persistent volume to attach | 10Gi +| `enableSearch` | Adds a sidecar for Lucene-powered text search | false | + +You can set the values of the `couchdbConfig` map according to the +[official configuration][4]. The following shows the map's default values and +required options to set: + +| Parameter | Description | Default | +|---------------------------------|--------------------------------------------------------------------|----------------------------------------| +| `couchdb.uuid` | UUID for this CouchDB server instance ([Required in a cluster][5]) | | +| `chttpd.bind_address` | listens on all interfaces when set to any | any | +| `chttpd.require_valid_user` | disables all the anonymous requests to the port 5984 when true | false | + +A variety of other parameters are also configurable. See the comments in the +`values.yaml` file for further details: + +| Parameter | Default | +|--------------------------------------|----------------------------------------| +| `adminUsername` | admin | +| `adminPassword` | auto-generated | +| `adminHash` | | +| `cookieAuthSecret` | auto-generated | +| `image.repository` | couchdb | +| `image.tag` | 3.1.0 | +| `image.pullPolicy` | IfNotPresent | +| `searchImage.repository` | kocolosk/couchdb-search | +| `searchImage.tag` | 0.1.0 | +| `searchImage.pullPolicy` | IfNotPresent | +| `initImage.repository` | busybox | +| `initImage.tag` | latest | +| `initImage.pullPolicy` | Always | +| `ingress.enabled` | false | +| `ingress.hosts` | chart-example.local | +| `ingress.annotations` | | +| `ingress.path` | / | +| `ingress.tls` | | +| `persistentVolume.accessModes` | ReadWriteOnce | +| `persistentVolume.storageClass` | Default for the Kube cluster | +| `podManagementPolicy` | Parallel | +| `affinity` | | +| `annotations` | | +| `tolerations` | | +| `resources` | | +| `service.annotations` | | +| `service.enabled` | true | +| `service.type` | ClusterIP | +| `service.externalPort` | 5984 | +| `dns.clusterDomainSuffix` | cluster.local | +| `networkPolicy.enabled` | true | +| `serviceAccount.enabled` | true | +| `serviceAccount.create` | true | +| `serviceAccount.imagePullSecrets` | | +| `sidecars` | {} | +| `livenessProbe.enabled` | true | +| `livenessProbe.failureThreshold` | 3 | +| `livenessProbe.initialDelaySeconds` | 0 | +| `livenessProbe.periodSeconds` | 10 | +| `livenessProbe.successThreshold` | 1 | +| `livenessProbe.timeoutSeconds` | 1 | +| `readinessProbe.enabled` | true | +| `readinessProbe.failureThreshold` | 3 | +| `readinessProbe.initialDelaySeconds` | 0 | +| `readinessProbe.periodSeconds` | 10 | +| `readinessProbe.successThreshold` | 1 | +| `readinessProbe.timeoutSeconds` | 1 | + +## Feedback, Issues, Contributing + +General feedback is welcome at our [user][1] or [developer][2] mailing lists. + +Apache CouchDB has a [CONTRIBUTING][3] file with details on how to get started +with issue reporting or contributing to the upkeep of this project. In short, +use GitHub Issues, do not report anything on Docker's website. + +## Non-Apache CouchDB Development Team Contributors + +- [@natarajaya](https://github.com/natarajaya) +- [@satchpx](https://github.com/satchpx) +- [@spanato](https://github.com/spanato) +- [@jpds](https://github.com/jpds) +- [@sebastien-prudhomme](https://github.com/sebastien-prudhomme) +- [@stepanstipl](https://github.com/sebastien-stepanstipl) +- [@amatas](https://github.com/amatas) +- [@Chimney42](https://github.com/Chimney42) +- [@mattjmcnaughton](https://github.com/mattjmcnaughton) +- [@mainephd](https://github.com/mainephd) +- [@AdamDang](https://github.com/AdamDang) +- [@mrtyler](https://github.com/mrtyler) +- [@kevinwlau](https://github.com/kevinwlau) +- [@jeyenzo](https://github.com/jeyenzo) +- [@Pinpin31.](https://github.com/Pinpin31) + +[1]: http://mail-archives.apache.org/mod_mbox/couchdb-user/ +[2]: http://mail-archives.apache.org/mod_mbox/couchdb-dev/ +[3]: https://github.com/apache/couchdb/blob/master/CONTRIBUTING.md +[4]: https://docs.couchdb.org/en/stable/config/index.html +[5]: https://docs.couchdb.org/en/latest/setup/cluster.html#preparing-couchdb-nodes-to-be-joined-into-a-cluster diff --git a/hosting/kubernetes/budibase/charts/couchdb/ci/required-values.yaml b/hosting/kubernetes/budibase/charts/couchdb/ci/required-values.yaml new file mode 100755 index 0000000000..79589d2e04 --- /dev/null +++ b/hosting/kubernetes/budibase/charts/couchdb/ci/required-values.yaml @@ -0,0 +1,3 @@ +couchdbConfig: + couchdb: + uuid: "decafbaddecafbaddecafbaddecafbad" diff --git a/hosting/kubernetes/budibase/charts/couchdb/ci/sidecar.yaml b/hosting/kubernetes/budibase/charts/couchdb/ci/sidecar.yaml new file mode 100755 index 0000000000..aa570bdf74 --- /dev/null +++ b/hosting/kubernetes/budibase/charts/couchdb/ci/sidecar.yaml @@ -0,0 +1,9 @@ +sidecars: + - name: foo + image: "busybox" + imagePullPolicy: IfNotPresent + resources: + requests: + cpu: "0.1" + memory: 10Mi + command: ['while true; do echo "foo"; sleep 5; done;'] diff --git a/hosting/kubernetes/budibase/charts/couchdb/password.ini b/hosting/kubernetes/budibase/charts/couchdb/password.ini new file mode 100755 index 0000000000..4ce8445aae --- /dev/null +++ b/hosting/kubernetes/budibase/charts/couchdb/password.ini @@ -0,0 +1,2 @@ +[admins] +{{ .Values.adminUsername }} = {{ .Values.adminHash }} diff --git a/hosting/kubernetes/budibase/charts/couchdb/templates/NOTES.txt b/hosting/kubernetes/budibase/charts/couchdb/templates/NOTES.txt new file mode 100755 index 0000000000..a3658bd37f --- /dev/null +++ b/hosting/kubernetes/budibase/charts/couchdb/templates/NOTES.txt @@ -0,0 +1,20 @@ +Apache CouchDB is starting. Check the status of the Pods using: + + kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "couchdb.name" . }},release={{ .Release.Name }}" + +Once all of the Pods are fully Ready, execute the following command to create +some required system databases: + + kubectl exec --namespace {{ .Release.Namespace }} {{ if not .Values.allowAdminParty }}-it {{ end }}{{ template "couchdb.fullname" . }}-0 -c couchdb -- \ + curl -s \ + http://127.0.0.1:5984/_cluster_setup \ + -X POST \ + -H "Content-Type: application/json" \ +{{- if .Values.allowAdminParty }} + -d '{"action": "finish_cluster"}' +{{- else }} + -d '{"action": "finish_cluster"}' \ + -u +{{- end }} + +Then it's time to relax. diff --git a/hosting/kubernetes/budibase/charts/couchdb/templates/_helpers.tpl b/hosting/kubernetes/budibase/charts/couchdb/templates/_helpers.tpl new file mode 100755 index 0000000000..f9d013e487 --- /dev/null +++ b/hosting/kubernetes/budibase/charts/couchdb/templates/_helpers.tpl @@ -0,0 +1,81 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "couchdb.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "couchdb.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- printf "%s-%s" .Values.fullnameOverride .Chart.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +In the event that we create both a headless service and a traditional one, +ensure that the latter gets a unique name. +*/}} +{{- define "couchdb.svcname" -}} +{{- if .Values.fullnameOverride -}} +{{- printf "%s-svc-%s" .Values.fullnameOverride .Chart.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-svc-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Create a random string if the supplied key does not exist +*/}} +{{- define "couchdb.defaultsecret" -}} +{{- if . -}} +{{- . | b64enc | quote -}} +{{- else -}} +{{- randAlphaNum 20 | b64enc | quote -}} +{{- end -}} +{{- end -}} + +{{/* +Labels used to define Pods in the CouchDB statefulset +*/}} +{{- define "couchdb.ss.selector" -}} +app: {{ template "couchdb.name" . }} +release: {{ .Release.Name }} +{{- end -}} + +{{/* +Generates a comma delimited list of nodes in the cluster +*/}} +{{- define "couchdb.seedlist" -}} +{{- $nodeCount := min 5 .Values.clusterSize | int }} + {{- range $index0 := until $nodeCount -}} + {{- $index1 := $index0 | add1 -}} + {{ $.Values.erlangFlags.name }}@{{ template "couchdb.fullname" $ }}-{{ $index0 }}.{{ template "couchdb.fullname" $ }}.{{ $.Release.Namespace }}.svc.{{ $.Values.dns.clusterDomainSuffix }}{{ if ne $index1 $nodeCount }},{{ end }} + {{- end -}} +{{- end -}} + +{{/* +If serviceAccount.name is specified, use that, else use the couchdb instance name +*/}} +{{- define "couchdb.serviceAccount" -}} +{{- if .Values.serviceAccount.name -}} +{{- .Values.serviceAccount.name }} +{{- else -}} +{{- template "couchdb.fullname" . -}} +{{- end -}} +{{- end -}} + +{{/* +Fail if couchdbConfig.couchdb.uuid is undefined +*/}} +{{- define "couchdb.uuid" -}} +{{- required "A value for couchdbConfig.couchdb.uuid must be set" (.Values.couchdbConfig.couchdb | default dict).uuid -}} +{{- end -}} \ No newline at end of file diff --git a/hosting/kubernetes/budibase/charts/couchdb/templates/configmap.yaml b/hosting/kubernetes/budibase/charts/couchdb/templates/configmap.yaml new file mode 100755 index 0000000000..a6a20e0574 --- /dev/null +++ b/hosting/kubernetes/budibase/charts/couchdb/templates/configmap.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "couchdb.fullname" . }} + labels: + app: {{ template "couchdb.name" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} +data: + inifile: | + {{ $couchdbConfig := dict "couchdb" (dict "uuid" (include "couchdb.uuid" .)) -}} + {{- $couchdbConfig := merge $couchdbConfig .Values.couchdbConfig -}} + {{- range $section, $settings := $couchdbConfig -}} + {{ printf "[%s]" $section }} + {{ range $key, $value := $settings -}} + {{ printf "%s = %s" $key ($value | toString) }} + {{ end }} + {{ end }} + + seedlistinifile: | + [cluster] + seedlist = {{ template "couchdb.seedlist" . }} diff --git a/hosting/kubernetes/budibase/charts/couchdb/templates/headless.yaml b/hosting/kubernetes/budibase/charts/couchdb/templates/headless.yaml new file mode 100755 index 0000000000..0ce3ef0f35 --- /dev/null +++ b/hosting/kubernetes/budibase/charts/couchdb/templates/headless.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "couchdb.fullname" . }} + labels: + app: {{ template "couchdb.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + clusterIP: None + publishNotReadyAddresses: true + ports: + - name: couchdb + port: 5984 + selector: +{{ include "couchdb.ss.selector" . | indent 4 }} diff --git a/hosting/kubernetes/budibase/charts/couchdb/templates/ingress.yaml b/hosting/kubernetes/budibase/charts/couchdb/templates/ingress.yaml new file mode 100755 index 0000000000..c547847ce5 --- /dev/null +++ b/hosting/kubernetes/budibase/charts/couchdb/templates/ingress.yaml @@ -0,0 +1,33 @@ +{{- if .Values.ingress.enabled -}} +{{- $serviceName := include "couchdb.fullname" . -}} +{{- $servicePort := .Values.service.externalPort -}} +{{- $path := .Values.ingress.path | quote -}} +apiVersion: networking.k8s.io/v1beta1 +kind: Ingress +metadata: + name: {{ template "couchdb.fullname" . }} + labels: + app: {{ template "couchdb.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + annotations: + {{- range $key, $value := .Values.ingress.annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} +spec: + rules: + {{- range $host := .Values.ingress.hosts }} + - host: {{ $host }} + http: + paths: + - path: {{ $path }} + backend: + serviceName: {{ $serviceName }} + servicePort: {{ $servicePort }} + {{- end -}} + {{- if .Values.ingress.tls }} + tls: +{{ toYaml .Values.ingress.tls | indent 4 }} + {{- end -}} +{{- end -}} diff --git a/hosting/kubernetes/budibase/charts/couchdb/templates/networkpolicy.yaml b/hosting/kubernetes/budibase/charts/couchdb/templates/networkpolicy.yaml new file mode 100755 index 0000000000..2830708bef --- /dev/null +++ b/hosting/kubernetes/budibase/charts/couchdb/templates/networkpolicy.yaml @@ -0,0 +1,31 @@ + +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: {{ template "couchdb.fullname" . }} + labels: + app: {{ template "couchdb.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + podSelector: + matchLabels: +{{ include "couchdb.ss.selector" . | indent 6 }} + ingress: + - ports: + - protocol: TCP + port: 5984 + - ports: + - protocol: TCP + port: 9100 + - protocol: TCP + port: 4369 + from: + - podSelector: + matchLabels: +{{ include "couchdb.ss.selector" . | indent 14 }} + policyTypes: + - Ingress +{{- end }} diff --git a/hosting/kubernetes/budibase/charts/couchdb/templates/secrets.yaml b/hosting/kubernetes/budibase/charts/couchdb/templates/secrets.yaml new file mode 100755 index 0000000000..92f55c6d6b --- /dev/null +++ b/hosting/kubernetes/budibase/charts/couchdb/templates/secrets.yaml @@ -0,0 +1,19 @@ +{{- if .Values.createAdminSecret -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "couchdb.fullname" . }} + labels: + app: {{ template "couchdb.fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +type: Opaque +data: + adminUsername: {{ template "couchdb.defaultsecret" .Values.adminUsername }} + adminPassword: {{ template "couchdb.defaultsecret" .Values.adminPassword }} + cookieAuthSecret: {{ template "couchdb.defaultsecret" .Values.cookieAuthSecret }} +{{- if .Values.adminHash }} + password.ini: {{ tpl (.Files.Get "password.ini") . | b64enc }} +{{- end -}} +{{- end -}} diff --git a/hosting/kubernetes/budibase/charts/couchdb/templates/service.yaml b/hosting/kubernetes/budibase/charts/couchdb/templates/service.yaml new file mode 100755 index 0000000000..6d0382477d --- /dev/null +++ b/hosting/kubernetes/budibase/charts/couchdb/templates/service.yaml @@ -0,0 +1,23 @@ +{{- if .Values.service.enabled -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "couchdb.svcname" . }} + labels: + app: {{ template "couchdb.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- if .Values.service.annotations }} + annotations: +{{ toYaml .Values.service.annotations | indent 4 }} +{{- end }} +spec: + ports: + - port: {{ .Values.service.externalPort }} + protocol: TCP + targetPort: 5984 + type: {{ .Values.service.type }} + selector: +{{ include "couchdb.ss.selector" . | indent 4 }} +{{- end -}} diff --git a/hosting/kubernetes/budibase/charts/couchdb/templates/serviceaccount.yaml b/hosting/kubernetes/budibase/charts/couchdb/templates/serviceaccount.yaml new file mode 100755 index 0000000000..bb82799a49 --- /dev/null +++ b/hosting/kubernetes/budibase/charts/couchdb/templates/serviceaccount.yaml @@ -0,0 +1,15 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "couchdb.serviceAccount" . }} + labels: + app: {{ template "couchdb.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- if .Values.serviceAccount.imagePullSecrets }} +imagePullSecrets: +{{ toYaml .Values.serviceAccount.imagePullSecrets }} +{{- end }} +{{- end }} diff --git a/hosting/kubernetes/budibase/charts/couchdb/templates/statefulset.yaml b/hosting/kubernetes/budibase/charts/couchdb/templates/statefulset.yaml new file mode 100755 index 0000000000..6225fbe98c --- /dev/null +++ b/hosting/kubernetes/budibase/charts/couchdb/templates/statefulset.yaml @@ -0,0 +1,202 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "couchdb.fullname" . }} + labels: + app: {{ template "couchdb.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + replicas: {{ .Values.clusterSize }} + serviceName: {{ template "couchdb.fullname" . }} + podManagementPolicy: {{ .Values.podManagementPolicy }} + selector: + matchLabels: +{{ include "couchdb.ss.selector" . | indent 6 }} + template: + metadata: + labels: +{{ include "couchdb.ss.selector" . | indent 8 }} +{{- with .Values.annotations }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} +{{ toYaml . | indent 8 }} +{{- end }} + spec: + {{- if .Values.schedulerName }} + schedulerName: "{{ .Values.schedulerName }}" + {{- end }} + {{- if .Values.serviceAccount.enabled }} + serviceAccountName: {{ template "couchdb.serviceAccount" . }} + {{- end }} + initContainers: + - name: init-copy + image: "{{ .Values.initImage.repository }}:{{ .Values.initImage.tag }}" + imagePullPolicy: {{ .Values.initImage.pullPolicy }} + command: ['sh','-c','cp /tmp/chart.ini /default.d; cp /tmp/seedlist.ini /default.d; ls -lrt /default.d;'] + volumeMounts: + - name: config + mountPath: /tmp/ + - name: config-storage + mountPath: /default.d +{{- if .Values.adminHash }} + - name: admin-hash-copy + image: "{{ .Values.initImage.repository }}:{{ .Values.initImage.tag }}" + imagePullPolicy: {{ .Values.initImage.pullPolicy }} + command: ['sh','-c','cp /tmp/password.ini /local.d/ ;'] + volumeMounts: + - name: admin-password + mountPath: /tmp/password.ini + subPath: "password.ini" + - name: local-config-storage + mountPath: /local.d +{{- end }} + containers: + - name: couchdb + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: couchdb + containerPort: 5984 + - name: epmd + containerPort: 4369 + - containerPort: 9100 + env: +{{- if not .Values.allowAdminParty }} + - name: COUCHDB_USER + valueFrom: + secretKeyRef: + name: {{ template "couchdb.fullname" . }} + key: adminUsername + - name: COUCHDB_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "couchdb.fullname" . }} + key: adminPassword + - name: COUCHDB_SECRET + valueFrom: + secretKeyRef: + name: {{ template "couchdb.fullname" . }} + key: cookieAuthSecret +{{- end }} + - name: ERL_FLAGS + value: "{{ range $k, $v := .Values.erlangFlags }} -{{ $k }} {{ $v }} {{ end }}" +{{- if .Values.livenessProbe.enabled }} + livenessProbe: +{{- if .Values.couchdbConfig.chttpd.require_valid_user }} + exec: + command: + - sh + - -c + - curl -G --silent --fail -u ${COUCHDB_USER}:${COUCHDB_PASSWORD} http://localhost:5984/_up +{{- else }} + httpGet: + path: /_up + port: 5984 +{{- end }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} +{{- end }} +{{- if .Values.readinessProbe.enabled }} + readinessProbe: +{{- if .Values.couchdbConfig.chttpd.require_valid_user }} + exec: + command: + - sh + - -c + - curl -G --silent --fail -u ${COUCHDB_USER}:${COUCHDB_PASSWORD} http://localhost:5984/_up +{{- else }} + httpGet: + path: /_up + port: 5984 +{{- end }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} +{{- end }} + resources: +{{ toYaml .Values.resources | indent 12 }} + volumeMounts: + - name: config-storage + mountPath: /opt/couchdb/etc/default.d +{{- if .Values.adminHash }} + - name: local-config-storage + mountPath: /opt/couchdb/etc/local.d +{{- end }} + - name: database-storage + mountPath: /opt/couchdb/data +{{- if .Values.enableSearch }} + - name: clouseau + image: "{{ .Values.searchImage.repository }}:{{ .Values.searchImage.tag }}" + imagePullPolicy: {{ .Values.searchImage.pullPolicy }} + volumeMounts: + - name: database-storage + mountPath: /opt/couchdb-search/data +{{- end }} +{{- if .Values.sidecars }} +{{ toYaml .Values.sidecars | indent 8}} +{{- end }} +{{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} +{{- end }} +{{- with .Values.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.affinity }} + affinity: +{{ toYaml . | indent 8 }} +{{- end }} + volumes: + - name: config-storage + emptyDir: {} + - name: config + configMap: + name: {{ template "couchdb.fullname" . }} + items: + - key: inifile + path: chart.ini + - key: seedlistinifile + path: seedlist.ini + +{{- if .Values.adminHash }} + - name: local-config-storage + emptyDir: {} + - name: admin-password + secret: + secretName: {{ template "couchdb.fullname" . }} +{{- end -}} + +{{- if not .Values.persistentVolume.enabled }} + - name: database-storage + emptyDir: {} +{{- else }} + volumeClaimTemplates: + - metadata: + name: database-storage + labels: + app: {{ template "couchdb.name" . }} + release: {{ .Release.Name }} + spec: + accessModes: + {{- range .Values.persistentVolume.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistentVolume.size | quote }} + {{- if .Values.persistentVolume.storageClass }} + {{- if (eq "-" .Values.persistentVolume.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.persistentVolume.storageClass }}" + {{- end }} + {{- end }} +{{- end }} diff --git a/hosting/kubernetes/budibase/charts/couchdb/values.yaml b/hosting/kubernetes/budibase/charts/couchdb/values.yaml new file mode 100755 index 0000000000..5a5025f816 --- /dev/null +++ b/hosting/kubernetes/budibase/charts/couchdb/values.yaml @@ -0,0 +1,201 @@ +## clusterSize is the initial size of the CouchDB cluster. +clusterSize: 3 + +## If allowAdminParty is enabled the cluster will start up without any database +## administrator account; i.e., all users will be granted administrative +## access. Otherwise, the system will look for a Secret called +## -couchdb containing `adminUsername`, `adminPassword` and +## `cookieAuthSecret` keys. See the `createAdminSecret` flag. +## ref: https://kubernetes.io/docs/concepts/configuration/secret/ +allowAdminParty: false + +## If createAdminSecret is enabled a Secret called -couchdb will +## be created containing auto-generated credentials. Users who prefer to set +## these values themselves have a couple of options: +## +## 1) The `adminUsername`, `adminPassword`, `adminHash`, and `cookieAuthSecret` +## can be defined directly in the chart's values. Note that all of a chart's +## values are currently stored in plaintext in a ConfigMap in the tiller +## namespace. +## +## 2) This flag can be disabled and a Secret with the required keys can be +## created ahead of time. +createAdminSecret: true + +# adminUsername: budibase +# adminPassword: budibase +# adminHash: -pbkdf2-this_is_not_necessarily_secure_either +# cookieAuthSecret: admin + +## When enabled, will deploy a networkpolicy that allows CouchDB pods to +## communicate with each other for clustering and ingress on port 5984 +networkPolicy: + enabled: true + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +# Use a service account +serviceAccount: + enabled: true + create: true +# name: +# imagePullSecrets: +# - name: myimagepullsecret + +## The storage volume used by each Pod in the StatefulSet. If a +## persistentVolume is not enabled, the Pods will use `emptyDir` ephemeral +## local storage. Setting the storageClass attribute to "-" disables dynamic +## provisioning of Persistent Volumes; leaving it unset will invoke the default +## provisioner. +persistentVolume: + enabled: false + accessModes: + - ReadWriteOnce + size: 10Gi + storageClass: "" + +## The CouchDB image +image: + repository: couchdb + tag: 3.1.0 + pullPolicy: IfNotPresent + +## Experimental integration with Lucene-powered fulltext search +searchImage: + repository: kocolosk/couchdb-search + tag: 0.2.0 + pullPolicy: IfNotPresent + +## Flip this to flag to include the Search container in each Pod +enableSearch: true + +initImage: + repository: busybox + tag: latest + pullPolicy: Always + +## CouchDB is happy to spin up cluster nodes in parallel, but if you encounter +## problems you can try setting podManagementPolicy to the StatefulSet default +## `OrderedReady` +podManagementPolicy: Parallel + +## To better tolerate Node failures, we can prevent Kubernetes scheduler from +## assigning more than one Pod of CouchDB StatefulSet per Node using podAntiAffinity. +affinity: {} + # podAntiAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # - labelSelector: + # matchExpressions: + # - key: "app" + # operator: In + # values: + # - couchdb + # topologyKey: "kubernetes.io/hostname" + +## Optional pod annotations +annotations: {} + +## Optional tolerations +tolerations: [] + +## A StatefulSet requires a headless Service to establish the stable network +## identities of the Pods, and that Service is created automatically by this +## chart without any additional configuration. The Service block below refers +## to a second Service that governs how clients connect to the CouchDB cluster. +service: + # annotations: + enabled: true + type: ClusterIP + externalPort: 5984 + +## An Ingress resource can provide name-based virtual hosting and TLS +## termination among other things for CouchDB deployments which are accessed +## from outside the Kubernetes cluster. +## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/ +ingress: + enabled: false + hosts: + - chart-example.local + path: / + annotations: [] + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + tls: + # Secrets must be manually created in the namespace. + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +## Optional resource requests and limits for the CouchDB container +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +resources: + {} + # requests: + # cpu: 100m + # memory: 128Mi + # limits: + # cpu: 56 + # memory: 256Gi + +## erlangFlags is a map that is passed to the Erlang VM as flags using the +## ERL_FLAGS env. `name` and `setcookie` flags are minimally required to +## establish connectivity between cluster nodes. +## ref: http://erlang.org/doc/man/erl.html#init_flags +erlangFlags: + name: couchdb + setcookie: monster + +## couchdbConfig will override default CouchDB configuration settings. +## The contents of this map are reformatted into a .ini file laid down +## by a ConfigMap object. +## ref: http://docs.couchdb.org/en/latest/config/index.html +couchdbConfig: + couchdb: + uuid: budibase-couchdb # REQUIRED: Unique identifier for this CouchDB server instance + # cluster: + # q: 8 # Create 8 shards for each database + chttpd: + bind_address: any + # chttpd.require_valid_user disables all the anonymous requests to the port + # 5984 when is set to true. + require_valid_user: false + +# Kubernetes local cluster domain. +# This is used to generate FQDNs for peers when joining the CouchDB cluster. +dns: + clusterDomainSuffix: cluster.local + +## Configure liveness and readiness probe values +## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes +livenessProbe: + enabled: true + failureThreshold: 3 + initialDelaySeconds: 0 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 +readinessProbe: + enabled: true + failureThreshold: 3 + initialDelaySeconds: 0 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + +# Configure arbitrary sidecar containers for CouchDB pods created by the +# StatefulSet +sidecars: {} + # - name: foo + # image: "busybox" + # imagePullPolicy: IfNotPresent + # resources: + # requests: + # cpu: "0.1" + # memory: 10Mi + # command: ['echo "foo";'] + # volumeMounts: + # - name: database-storage + # mountPath: /opt/couchdb/data/ diff --git a/hosting/kubernetes/budibase/charts/ingress-nginx-3.35.0.tgz b/hosting/kubernetes/budibase/charts/ingress-nginx-3.35.0.tgz new file mode 100644 index 0000000000..ee5214c497 Binary files /dev/null and b/hosting/kubernetes/budibase/charts/ingress-nginx-3.35.0.tgz differ diff --git a/hosting/kubernetes/budibase/templates/NOTES.txt b/hosting/kubernetes/budibase/templates/NOTES.txt new file mode 100644 index 0000000000..2ace0aa38d --- /dev/null +++ b/hosting/kubernetes/budibase/templates/NOTES.txt @@ -0,0 +1,22 @@ +1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} +{{- range $host := .Values.ingress.hosts }} + {{- range .paths }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }} + {{- end }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "budibase.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "budibase.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "budibase.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "budibase.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT +{{- end }} diff --git a/hosting/kubernetes/budibase/templates/_helpers.tpl b/hosting/kubernetes/budibase/templates/_helpers.tpl new file mode 100644 index 0000000000..3b0853e19f --- /dev/null +++ b/hosting/kubernetes/budibase/templates/_helpers.tpl @@ -0,0 +1,83 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "budibase.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "budibase.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- printf "%s-%s" .Values.fullnameOverride .Chart.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +CouchDB secret identifier +*/}} +{{- define "couchdb.fullname" -}} +{{- $name := "couchdb" -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Internal DNS +*/}} +{{- define "budibase.serviceDns" -}} +{{- printf "%s.%s.%s" .Release.Namespace "svc" .Values.services.dns -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "budibase.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "budibase.labels" -}} +helm.sh/chart: {{ include "budibase.chart" . }} +{{ include "budibase.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "budibase.selectorLabels" -}} +app.kubernetes.io/name: {{ include "budibase.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "budibase.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "budibase.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} + +{{/* +Create a random string if the supplied key does not exist +*/}} +{{- define "budibase.defaultsecret" -}} +{{- if . -}} +{{- . | b64enc | quote -}} +{{- else -}} +{{- randAlphaNum 20 | b64enc | quote -}} +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/hosting/kubernetes/budibase/templates/alb-ingress.yaml b/hosting/kubernetes/budibase/templates/alb-ingress.yaml new file mode 100644 index 0000000000..ea3bd674d5 --- /dev/null +++ b/hosting/kubernetes/budibase/templates/alb-ingress.yaml @@ -0,0 +1,35 @@ +{{- if .Values.ingress.aws }} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: ingress-budibase + annotations: + kubernetes.io/ingress.class: alb + alb.ingress.kubernetes.io/scheme: internet-facing + alb.ingress.kubernetes.io/target-type: ip + {{- if .Values.ingress.certificateArn }} + alb.ingress.kubernetes.io/actions.ssl-redirect: '{"Type": "redirect", "RedirectConfig": { "Protocol": "HTTPS", "Port": "443", "StatusCode": "HTTP_301"}}' + alb.ingress.kubernetes.io/listen-ports: '[{"HTTP": 80}, {"HTTPS":443}]' + alb.ingress.kubernetes.io/certificate-arn: {{ .Values.ingress.certificateArn }} + {{- end }} +spec: + rules: + - http: + paths: + {{- if .Values.ingress.certificateArn }} + - path: / + pathType: Prefix + backend: + service: + name: ssl-redirect + port: + name: use-annotation + {{- end }} + - path: / + pathType: Prefix + backend: + service: + name: proxy-service + port: + number: {{ .Values.services.proxy.port }} +{{- end }} \ No newline at end of file diff --git a/hosting/kubernetes/budibase/templates/app-service-deployment.yaml b/hosting/kubernetes/budibase/templates/app-service-deployment.yaml new file mode 100644 index 0000000000..b101ab7854 --- /dev/null +++ b/hosting/kubernetes/budibase/templates/app-service-deployment.yaml @@ -0,0 +1,105 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.21.0 (992df58d8) + creationTimestamp: null + labels: + io.kompose.service: app-service + name: app-service +spec: + replicas: {{ .Values.services.apps.replicaCount }} + selector: + matchLabels: + io.kompose.service: app-service + strategy: + type: Recreate + template: + metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.21.0 (992df58d8) + creationTimestamp: null + labels: + io.kompose.service: app-service + spec: + containers: + - env: + - name: BUDIBASE_ENVIRONMENT + value: {{ .Values.globals.budibaseEnv }} + - name: COUCH_DB_URL + {{ if .Values.services.couchdb.url }} + value: {{ .Values.services.couchdb.url }} + {{ else }} + value: http://{{ .Release.Name }}-svc-couchdb:{{ .Values.services.couchdb.port }} + {{ end }} + - name: COUCH_DB_USER + valueFrom: + secretKeyRef: + name: {{ template "couchdb.fullname" . }} + key: adminUsername + - name: COUCH_DB_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "couchdb.fullname" . }} + key: adminPassword + - name: ENABLE_ANALYTICS + value: {{ .Values.globals.enableAnalytics | quote }} + - name: INTERNAL_API_KEY + valueFrom: + secretKeyRef: + name: {{ template "budibase.fullname" . }} + key: internalApiKey + - name: JWT_SECRET + valueFrom: + secretKeyRef: + name: {{ template "budibase.fullname" . }} + key: jwtSecret + - name: LOG_LEVEL + value: {{ .Values.services.apps.logLevel | default "info" | quote }} + {{ if .Values.services.objectStore.region }} + - name: AWS_REGION + value: {{ .Values.services.objectStore.region }} + {{ end }} + - name: MINIO_ACCESS_KEY + valueFrom: + secretKeyRef: + name: {{ template "budibase.fullname" . }} + key: objectStoreAccess + - name: MINIO_SECRET_KEY + valueFrom: + secretKeyRef: + name: {{ template "budibase.fullname" . }} + key: objectStoreSecret + - name: MINIO_URL + {{ if .Values.services.objectStore.url }} + value: {{ .Values.services.objectStore.url }} + {{ else }} + value: http://minio-service:{{ .Values.services.objectStore.port }} + {{ end }} + - name: PORT + value: {{ .Values.services.apps.port | quote }} + - name: REDIS_PASSWORD + value: {{ .Values.services.redis.password }} + - name: REDIS_URL + {{ if .Values.services.redis.url }} + value: {{ .Values.services.redis.url }} + {{ else }} + value: redis-service:{{ .Values.services.redis.port }} + {{ end }} + - name: SELF_HOSTED + value: {{ .Values.globals.selfHosted | quote }} + - name: SENTRY_DSN + value: {{ .Values.globals.sentryDSN }} + - name: WORKER_URL + value: worker-service:{{ .Values.services.worker.port }} + image: budibase/apps + imagePullPolicy: Always + name: bbapps + ports: + - containerPort: {{ .Values.services.apps.port }} + resources: {} + restartPolicy: Always + serviceAccountName: "" +status: {} diff --git a/hosting/kubernetes/budibase/templates/app-service-service.yaml b/hosting/kubernetes/budibase/templates/app-service-service.yaml new file mode 100644 index 0000000000..5247b4de09 --- /dev/null +++ b/hosting/kubernetes/budibase/templates/app-service-service.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Service +metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.21.0 (992df58d8) + creationTimestamp: null + labels: + io.kompose.service: app-service + name: app-service +spec: + ports: + - name: {{ .Values.services.apps.port | quote }} + port: {{ .Values.services.apps.port }} + targetPort: {{ .Values.services.apps.port }} + selector: + io.kompose.service: app-service +status: + loadBalancer: {} diff --git a/hosting/kubernetes/budibase/templates/hpa.yaml b/hosting/kubernetes/budibase/templates/hpa.yaml new file mode 100644 index 0000000000..2f901b4664 --- /dev/null +++ b/hosting/kubernetes/budibase/templates/hpa.yaml @@ -0,0 +1,28 @@ +{{- if .Values.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "budibase.fullname" . }} + labels: + {{- include "budibase.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "budibase.fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/hosting/kubernetes/budibase/templates/ingress.yaml b/hosting/kubernetes/budibase/templates/ingress.yaml new file mode 100644 index 0000000000..4de295e18a --- /dev/null +++ b/hosting/kubernetes/budibase/templates/ingress.yaml @@ -0,0 +1,61 @@ +{{- if .Values.ingress.enabled -}} +{{- $fullName := include "budibase.fullname" . -}} +{{- $svcPort := .Values.service.port -}} +{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} + {{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }} + {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}} + {{- end }} +{{- end }} +{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1 +{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + {{- include "budibase.labels" . | nindent 4 }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} + ingressClassName: {{ .Values.ingress.className }} + {{- end }} + {{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} + {{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ .path }} + {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + pathType: {{ .pathType }} + {{- end }} + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ .backend.service.name }} + port: + number: {{ .backend.service.port.number }} + {{- else }} + serviceName: {{ .backend.service.name }} + servicePort: {{ .backend.service.port.number }} + {{- end }} + {{- end }} + {{- end }} +{{- end }} diff --git a/hosting/kubernetes/budibase/templates/minio-data-persistentvolumeclaim.yaml b/hosting/kubernetes/budibase/templates/minio-data-persistentvolumeclaim.yaml new file mode 100644 index 0000000000..d122ad0a3e --- /dev/null +++ b/hosting/kubernetes/budibase/templates/minio-data-persistentvolumeclaim.yaml @@ -0,0 +1,16 @@ +{{- if .Values.services.objectStore.minio }} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + creationTimestamp: null + labels: + io.kompose.service: minio-data + name: minio-data +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: {{ .Values.services.objectStore.storage }} +status: {} +{{- end }} \ No newline at end of file diff --git a/hosting/kubernetes/budibase/templates/minio-service-deployment.yaml b/hosting/kubernetes/budibase/templates/minio-service-deployment.yaml new file mode 100644 index 0000000000..a23d0c1d89 --- /dev/null +++ b/hosting/kubernetes/budibase/templates/minio-service-deployment.yaml @@ -0,0 +1,70 @@ +{{- if .Values.services.objectStore.minio }} +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.21.0 (992df58d8) + creationTimestamp: null + labels: + io.kompose.service: minio-service + name: minio-service +spec: + replicas: 1 + selector: + matchLabels: + io.kompose.service: minio-service + strategy: + type: Recreate + template: + metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.21.0 (992df58d8) + creationTimestamp: null + labels: + io.kompose.service: minio-service + spec: + containers: + - args: + - server + - /data + env: + - name: MINIO_BROWSER + value: {{ .Values.services.objectStore.browser | quote }} + - name: MINIO_ACCESS_KEY + valueFrom: + secretKeyRef: + name: {{ template "budibase.fullname" . }} + key: objectStoreAccess + - name: MINIO_SECRET_KEY + valueFrom: + secretKeyRef: + name: {{ template "budibase.fullname" . }} + key: objectStoreSecret + image: minio/minio + imagePullPolicy: "" + livenessProbe: + exec: + command: + - curl + - -f + - http://localhost:9000/minio/health/live + failureThreshold: 3 + periodSeconds: 30 + timeoutSeconds: 20 + name: minio-service + ports: + - containerPort: {{ .Values.services.objectStore.port }} + resources: {} + volumeMounts: + - mountPath: /data + name: minio-data + restartPolicy: Always + serviceAccountName: "" + volumes: + - name: minio-data + persistentVolumeClaim: + claimName: minio-data +status: {} +{{- end }} \ No newline at end of file diff --git a/hosting/kubernetes/budibase/templates/minio-service-service.yaml b/hosting/kubernetes/budibase/templates/minio-service-service.yaml new file mode 100644 index 0000000000..cfdb22002b --- /dev/null +++ b/hosting/kubernetes/budibase/templates/minio-service-service.yaml @@ -0,0 +1,21 @@ +{{- if .Values.services.objectStore.minio }} +apiVersion: v1 +kind: Service +metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.21.0 (992df58d8) + creationTimestamp: null + labels: + io.kompose.service: minio-service + name: minio-service +spec: + ports: + - name: {{ .Values.services.objectStore.port | quote }} + port: {{ .Values.services.objectStore.port }} + targetPort: {{ .Values.services.objectStore.port }} + selector: + io.kompose.service: minio-service +status: + loadBalancer: {} +{{- end }} \ No newline at end of file diff --git a/hosting/kubernetes/budibase/templates/proxy-service-deployment.yaml b/hosting/kubernetes/budibase/templates/proxy-service-deployment.yaml new file mode 100644 index 0000000000..0f802da843 --- /dev/null +++ b/hosting/kubernetes/budibase/templates/proxy-service-deployment.yaml @@ -0,0 +1,38 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.21.0 (992df58d8) + creationTimestamp: null + labels: + app.kubernetes.io/name: budibase-proxy + name: proxy-service +spec: + replicas: {{ .Values.services.proxy.replicaCount }} + selector: + matchLabels: + app.kubernetes.io/name: budibase-proxy + strategy: + type: Recreate + template: + metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.21.0 (992df58d8) + creationTimestamp: null + labels: + app.kubernetes.io/name: budibase-proxy + spec: + containers: + - image: budibase/proxy + imagePullPolicy: "" + name: proxy-service + ports: + - containerPort: {{ .Values.services.proxy.port }} + resources: {} + volumeMounts: + restartPolicy: Always + serviceAccountName: "" + volumes: +status: {} diff --git a/hosting/kubernetes/budibase/templates/proxy-service-service.yaml b/hosting/kubernetes/budibase/templates/proxy-service-service.yaml new file mode 100644 index 0000000000..8f14d97862 --- /dev/null +++ b/hosting/kubernetes/budibase/templates/proxy-service-service.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +kind: Service +metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.21.0 (992df58d8) + creationTimestamp: null + labels: + app.kubernetes.io/name: budibase-proxy + name: proxy-service +spec: + type: NodePort + ports: + - port: {{ .Values.services.proxy.port }} + targetPort: {{ .Values.services.proxy.port }} + protocol: TCP + selector: + app.kubernetes.io/name: budibase-proxy +status: + loadBalancer: {} diff --git a/hosting/kubernetes/budibase/templates/redis-data-persistentvolumeclaim.yaml b/hosting/kubernetes/budibase/templates/redis-data-persistentvolumeclaim.yaml new file mode 100644 index 0000000000..2cb5ee8eab --- /dev/null +++ b/hosting/kubernetes/budibase/templates/redis-data-persistentvolumeclaim.yaml @@ -0,0 +1,16 @@ +{{- if .Values.services.redis.enabled }} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + creationTimestamp: null + labels: + io.kompose.service: redis-data + name: redis-data +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: {{ .Values.services.redis.storage }} +status: {} +{{- end }} \ No newline at end of file diff --git a/hosting/kubernetes/budibase/templates/redis-service-deployment.yaml b/hosting/kubernetes/budibase/templates/redis-service-deployment.yaml new file mode 100644 index 0000000000..9235b0b11d --- /dev/null +++ b/hosting/kubernetes/budibase/templates/redis-service-deployment.yaml @@ -0,0 +1,49 @@ +{{- if .Values.services.redis.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.21.0 (992df58d8) + creationTimestamp: null + labels: + io.kompose.service: redis-service + name: redis-service +spec: + replicas: {{ .Values.services.redis.replicaCount }} + selector: + matchLabels: + io.kompose.service: redis-service + strategy: + type: Recreate + template: + metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.21.0 (992df58d8) + creationTimestamp: null + labels: + io.kompose.service: redis-service + spec: + containers: + - args: + - redis-server + - --requirepass + - {{ .Values.services.redis.password }} + image: redis + imagePullPolicy: "" + name: redis-service + ports: + - containerPort: {{ .Values.services.redis.port }} + resources: {} + volumeMounts: + - mountPath: /data + name: redis-data + restartPolicy: Always + serviceAccountName: "" + volumes: + - name: redis-data + persistentVolumeClaim: + claimName: redis-data +status: {} +{{- end }} \ No newline at end of file diff --git a/hosting/kubernetes/budibase/templates/redis-service-service.yaml b/hosting/kubernetes/budibase/templates/redis-service-service.yaml new file mode 100644 index 0000000000..55ca40ed88 --- /dev/null +++ b/hosting/kubernetes/budibase/templates/redis-service-service.yaml @@ -0,0 +1,21 @@ +{{- if .Values.services.redis.enabled }} +apiVersion: v1 +kind: Service +metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.21.0 (992df58d8) + creationTimestamp: null + labels: + io.kompose.service: redis-service + name: redis-service +spec: + ports: + - name: {{ .Values.services.redis.port | quote }} + port: {{ .Values.services.redis.port }} + targetPort: {{ .Values.services.redis.port }} + selector: + io.kompose.service: redis-service +status: + loadBalancer: {} +{{- end }} \ No newline at end of file diff --git a/hosting/kubernetes/budibase/templates/secrets.yaml b/hosting/kubernetes/budibase/templates/secrets.yaml new file mode 100644 index 0000000000..1c0a914ed3 --- /dev/null +++ b/hosting/kubernetes/budibase/templates/secrets.yaml @@ -0,0 +1,17 @@ +{{- if .Values.globals.createSecrets -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "budibase.fullname" . }} + labels: + app: {{ template "budibase.fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +type: Opaque +data: + internalApiKey: {{ template "budibase.defaultsecret" .Values.globals.internalApiKey }} + jwtSecret: {{ template "budibase.defaultsecret" .Values.globals.jwtSecret }} + objectStoreAccess: {{ template "budibase.defaultsecret" .Values.services.objectStore.accessKey }} + objectStoreSecret: {{ template "budibase.defaultsecret" .Values.services.objectStore.secretKey }} +{{- end -}} diff --git a/hosting/kubernetes/budibase/templates/service.yaml b/hosting/kubernetes/budibase/templates/service.yaml new file mode 100644 index 0000000000..be4d932b59 --- /dev/null +++ b/hosting/kubernetes/budibase/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "budibase.fullname" . }} + labels: + {{- include "budibase.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: http + protocol: TCP + name: http + selector: + {{- include "budibase.selectorLabels" . | nindent 4 }} \ No newline at end of file diff --git a/hosting/kubernetes/budibase/templates/serviceaccount.yaml b/hosting/kubernetes/budibase/templates/serviceaccount.yaml new file mode 100644 index 0000000000..1aa1020088 --- /dev/null +++ b/hosting/kubernetes/budibase/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "budibase.serviceAccountName" . }} + labels: + {{- include "budibase.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/hosting/kubernetes/budibase/templates/tests/test-connection.yaml b/hosting/kubernetes/budibase/templates/tests/test-connection.yaml new file mode 100644 index 0000000000..ecd1f361de --- /dev/null +++ b/hosting/kubernetes/budibase/templates/tests/test-connection.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "budibase.fullname" . }}-test-connection" + labels: + {{- include "budibase.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test +spec: + containers: + - name: wget + image: busybox + command: ['wget'] + args: ['{{ include "budibase.fullname" . }}:{{ .Values.service.port }}'] + restartPolicy: Never diff --git a/hosting/kubernetes/budibase/templates/worker-service-deployment.yaml b/hosting/kubernetes/budibase/templates/worker-service-deployment.yaml new file mode 100644 index 0000000000..703d59c075 --- /dev/null +++ b/hosting/kubernetes/budibase/templates/worker-service-deployment.yaml @@ -0,0 +1,98 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.21.0 (992df58d8) + creationTimestamp: null + labels: + io.kompose.service: worker-service + name: worker-service +spec: + replicas: {{ .Values.services.worker.replicaCount }} + + selector: + matchLabels: + io.kompose.service: worker-service + strategy: + type: Recreate + template: + metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.21.0 (992df58d8) + creationTimestamp: null + labels: + io.kompose.service: worker-service + spec: + containers: + - env: + - name: CLUSTER_PORT + value: {{ .Values.services.worker.port | quote }} + - name: COUCH_DB_USER + valueFrom: + secretKeyRef: + name: {{ template "couchdb.fullname" . }} + key: adminUsername + - name: COUCH_DB_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "couchdb.fullname" . }} + key: adminPassword + - name: COUCH_DB_URL + {{ if .Values.services.couchdb.url }} + value: {{ .Values.services.couchdb.url }} + {{ else }} + value: http://{{ .Release.Name }}-svc-couchdb:{{ .Values.services.couchdb.port }} + {{ end }} + - name: INTERNAL_API_KEY + valueFrom: + secretKeyRef: + name: {{ template "budibase.fullname" . }} + key: internalApiKey + - name: JWT_SECRET + valueFrom: + secretKeyRef: + name: {{ template "budibase.fullname" . }} + key: jwtSecret + {{ if .Values.services.objectStore.region }} + - name: AWS_REGION + value: {{ .Values.services.objectStore.region }} + {{ end }} + - name: MINIO_ACCESS_KEY + valueFrom: + secretKeyRef: + name: {{ template "budibase.fullname" . }} + key: objectStoreAccess + - name: MINIO_SECRET_KEY + valueFrom: + secretKeyRef: + name: {{ template "budibase.fullname" . }} + key: objectStoreSecret + - name: MINIO_URL + {{ if .Values.services.objectStore.url }} + value: {{ .Values.services.objectStore.url }} + {{ else }} + value: http://minio-service:{{ .Values.services.objectStore.port }} + {{ end }} + - name: PORT + value: {{ .Values.services.worker.port | quote }} + - name: REDIS_PASSWORD + value: {{ .Values.services.redis.password | quote }} + - name: REDIS_URL + {{ if .Values.services.redis.url }} + value: {{ .Values.services.redis.url }} + {{ else }} + value: redis-service:{{ .Values.services.redis.port }} + {{ end }} + - name: SELF_HOSTED + value: {{ .Values.globals.selfHosted | quote }} + image: budibase/worker + imagePullPolicy: Always + name: bbworker + ports: + - containerPort: {{ .Values.services.worker.port }} + resources: {} + restartPolicy: Always + serviceAccountName: "" +status: {} diff --git a/hosting/kubernetes/budibase/templates/worker-service-service.yaml b/hosting/kubernetes/budibase/templates/worker-service-service.yaml new file mode 100644 index 0000000000..a79ba1e04b --- /dev/null +++ b/hosting/kubernetes/budibase/templates/worker-service-service.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Service +metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.21.0 (992df58d8) + creationTimestamp: null + labels: + io.kompose.service: worker-service + name: worker-service +spec: + ports: + - name: {{ .Values.services.worker.port | quote }} + port: {{ .Values.services.worker.port }} + targetPort: {{ .Values.services.worker.port }} + selector: + io.kompose.service: worker-service +status: + loadBalancer: {} diff --git a/hosting/kubernetes/budibase/values.yaml b/hosting/kubernetes/budibase/values.yaml new file mode 100644 index 0000000000..30594f95e3 --- /dev/null +++ b/hosting/kubernetes/budibase/values.yaml @@ -0,0 +1,142 @@ +# Default values for budibase. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "" + +imagePullSecrets: [] +nameOverride: "" +# fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +podAnnotations: {} + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 10000 + +ingress: + enabled: true + nginx: true + certificateArn: "" + className: "" + annotations: + kubernetes.io/ingress.class: nginx + hosts: + - host: # change if using custom domain + paths: + - path: / + pathType: Prefix + backend: + service: + name: proxy-service + port: + number: 10000 + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 100 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + +nodeSelector: {} + +tolerations: [] + +affinity: {} + +globals: + budibaseEnv: PRODUCTION + enableAnalytics: false + posthogToken: "" + sentryDSN: "" + logLevel: info + selfHosted: 1 + createSecrets: true # creates an internal API key, JWT secrets and redis password for you + + # if createSecrets is set to false, you can hard-code your secrets here + internalApiKey: "" + jwtSecret: "" + + +services: + dns: cluster.local + + proxy: + port: 10000 + replicaCount: 1 + + apps: + port: 4002 + replicaCount: 1 + logLevel: info + + worker: + port: 4001 + replicaCount: 1 + + couchdb: + enabled: true + replicaCount: 3 + url: "" # only change if pointing to existing couch server + user: "" # only change if pointing to existing couch server + password: "" # only change if pointing to existing couch server + port: 5984 + storage: 100Mi + + redis: + enabled: true # disable if using external redis + port: 6379 + replicaCount: 1 + url: "" # only change if pointing to existing redis cluster and enabled: false + password: "budibase" # recommended to override if using built-in redis + storage: 100Mi + + objectStore: + minio: true + browser: true + port: 9000 + replicaCount: 1 + accessKey: "" # AWS_ACCESS_KEY if using S3 or existing minio access key + secretKey: "" # AWS_SECRET_ACCESS_KEY if using S3 or existing minio secret + region: "" # AWS_REGION if using S3 or existing minio secret + url: "" # only change if pointing to existing minio cluster and minio: false + storage: 100Mi + diff --git a/hosting/kubernetes/envoy/Dockerfile b/hosting/kubernetes/envoy/Dockerfile new file mode 100644 index 0000000000..96334fa723 --- /dev/null +++ b/hosting/kubernetes/envoy/Dockerfile @@ -0,0 +1,4 @@ +FROM envoyproxy/envoy:v1.16-latest +COPY envoy.yaml /etc/envoy/envoy.yaml +RUN chmod go+r /etc/envoy/envoy.yaml + diff --git a/hosting/kubernetes/envoy/envoy.yaml b/hosting/kubernetes/envoy/envoy.yaml new file mode 100644 index 0000000000..0e7859d887 --- /dev/null +++ b/hosting/kubernetes/envoy/envoy.yaml @@ -0,0 +1,125 @@ +static_resources: + listeners: + - name: main_listener + address: + socket_address: { address: 0.0.0.0, port_value: 10000 } + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress + codec_type: auto + route_config: + name: local_route + virtual_hosts: + - name: local_services + domains: ["*"] + routes: + - match: { prefix: "/app/" } + route: + cluster: app-service + prefix_rewrite: "/" + + - match: { prefix: "/builder/" } + route: + cluster: app-service + + - match: { prefix: "/builder" } + route: + cluster: app-service + + - match: { prefix: "/app_" } + route: + cluster: app-service + + # special case for worker admin API + - match: { prefix: "/api/admin/" } + route: + cluster: worker-service + + - match: { path: "/" } + route: + cluster: app-service + + # special case for when API requests are made, can just forward, not to minio + - match: { prefix: "/api/" } + route: + cluster: app-service + + - match: { prefix: "/worker/" } + route: + cluster: worker-service + prefix_rewrite: "/" + + - match: { prefix: "/db/" } + route: + cluster: couchdb-service + prefix_rewrite: "/" + + # minio is on the default route because this works + # best, minio + AWS SDK doesn't handle path proxy + - match: { prefix: "/" } + route: + cluster: minio-service + + http_filters: + - name: envoy.filters.http.router + + clusters: + - name: app-service + connect_timeout: 0.25s + type: strict_dns + lb_policy: round_robin + load_assignment: + cluster_name: app-service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: app-service.budibase.svc.cluster.local + port_value: 4002 + + - name: minio-service + connect_timeout: 0.25s + type: strict_dns + lb_policy: round_robin + load_assignment: + cluster_name: minio-service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: minio-service.budibase.svc.cluster.local + port_value: 9000 + + - name: worker-service + connect_timeout: 0.25s + type: strict_dns + lb_policy: round_robin + load_assignment: + cluster_name: worker-service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: worker-service.budibase.svc.cluster.local + port_value: 4001 + + - name: couchdb-service + connect_timeout: 0.25s + type: strict_dns + lb_policy: round_robin + load_assignment: + cluster_name: couchdb-service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: couchdb-service.budibase.svc.cluster.local + port_value: 5984 + diff --git a/hosting/scripts/linux/release-to-docker-hub.sh b/hosting/scripts/linux/release-to-docker-hub.sh index b3c380f729..ccb5fa09a0 100755 --- a/hosting/scripts/linux/release-to-docker-hub.sh +++ b/hosting/scripts/linux/release-to-docker-hub.sh @@ -1,12 +1,23 @@ #!/bin/bash tag=$1 -tag=${tag:-latest} +production=$2 -echo "Tagging images with SHA: $GITHUB_SHA and tag: $tag" +if [[ ! "$tag" ]]; then + echo "No tag present. You must pass a tag to this script" + exit 1 +fi + +echo "Tagging images with tag: $tag" docker tag app-service budibase/apps:$tag docker tag worker-service budibase/worker:$tag -docker push budibase/apps:$tag -docker push budibase/worker:$tag +if [[ "$production" ]]; then + echo "Production Deployment. Tagging latest.." + docker tag app-service budibase/apps:latest + docker tag worker-service budibase/worker:latest +fi + +docker push --all-tags budibase/apps +docker push --all-tags budibase/worker diff --git a/i18n/README.de.md b/i18n/README.de.md new file mode 100644 index 0000000000..34a9164c3f --- /dev/null +++ b/i18n/README.de.md @@ -0,0 +1,235 @@ +

+ + Budibase + +

+

+ Budibase +

+ +

+ Entwickle, automatisiere und stelle interne Tools in Minuten bereit. +

+

+ Budibase ist eine quelloffene Low-Code Plattform, die es Entwicklern und IT-Profis ermöglicht interne Tools auf eigener Infrastruktur zu entwickeln, zu automatisieren und bereitzustellen. +

+ +

+ 🤖 🎨 🚀 +

+ +

+ Budibase design ui +

+ +

+ + GitHub all releases + + + GitHub release (latest by date) + + + Follow @budibase + + Code of conduct + + + +

+ +

+ Los Geht's + · + Dokumentation + · + Featureanfrage + · + Einen Bug melden + · + Support: Github Discussions +

+ +

+## ✨ Features + +- **Entwickle echte Webanwendungen.** Anders als ähnliche Plattformen entwickelst du mit Budibase echte Single-Page Webapplikationen (SPAs). Deine Budibase-Apps sind standardmäßig hochperformant und haben ein Responsive-Design für eine großartige Benutzererfahrung. + +- **Quelloffen und erweiterbar.** Budibase ist quelloffen - lizenziert unter der GPL v3. Du kannst darauf vertrauen, dass Budibase auch in der Zukunft immer zur Verfügung steht. Budibase bietet eine Entwicklerfreundliche Plattform: du kannst Budibase erweitern, oder die Codebase forken und eigene Änderungen vornehmen. + +- **Datenquellen einbinden oder von Null starten.** Budibase kann Daten aus vielen Quellen einbinden, unter anderem aus MongoDB, CouchDB, PostgreSQL, MySQL, Airtable, S3, DynamoDB, oder einer REST API. Und anders als ähnliche Plattformen erlaubt Budibase auch die App-Entwicklung komplett ohne Datenquellen mit einer internen Datenbank. Deine Datenquelle noch nicht dabei? [Frag einfach nach](https://github.com/Budibase/budibase/discussions?discussions_q=category%3AIdeas). + +- **Designe und entwickle Apps mit leistungsfähigen Komponenten.** Budibase kommt fertig mit optisch ansprechenden und leistungsfähigen Komponenten, die als Bausteine für deine UI dienen. Außerdem kannst du die UI mit vielen CSS-Styles nach deinem Geschmack anpassen. Fehlt dir eine Komponente? [Frag uns hier](https://github.com/Budibase/budibase/discussions?discussions_q=category%3AIdeas). + +- **Automatisiere Prozesse, integriere andere Tools und binde Web-APIs ein.** Spar dir Zeit, indem du manuelle Prozesse einfach automatisierst: Vom Verbinden mit Web-Hooks bis zum automatischen Senden von E-Mails, Budibase kann alles für dich erledigen. Eine Automatisierung ist noch nicht dabei? Du kannst einfach [deine eigene erstellen](https://github.com/Budibase/automations) oder [uns deine Idee mitteilen](https://github.com/Budibase/budibase/discussions?discussions_q=category%3AIdeas). + +- **Ein Paradies für Systemadministratoren** Budibase ist von Grund auf für das Skalieren ausgelegt. Du kannst Budibase einfach auf deiner eigenen Infrastruktur hosten und global Benutzer, Onboarding, SMTP, Applikationen, Gruppen, UI-Themes und mehr verwalten. Du kannst außerdem ein übersichtliches App-Portal für deine Benutzer bereitstellen und das Benutzermanagement an Gruppen-Manager delegieren. + +
+ +--- + +
+ +## 🏁 Los geht's +Momentan existieren zwei Optionen mit Budibase loszulegen: Digital Ocean und Docker. +

+ +### Los geht's mit Digital Ocean +Der einfachste und schnellste Weg loszulegen ist Digital Ocean: +1-Click Deploy auf Digital Ocean + + + digital ocean badge + +

+ +### Los geht's mit Docker +Um loszulegen musst du bereits `docker` und `docker compose` auf deinem Computer installiert haben. +Sobald du Docker installiert hast brauchst du ca. 5 Minuten für diese 4 Schritte: + +1. Installiere das Budibase CLI Tool. +``` +$ npm i -g @budibase/cli +``` + + +2. Installiere Budibase (wähle den Speicherort und den Port auf dem Budibase laufen soll.) +``` +$ budi hosting --init +``` + + +3. Führe Budibase aus. +``` +$ budi hosting --start +``` + + +4. Lege einen Admin-Benutzer an. +Gib die E-Mail und das Passwort für den neuen Admin-Benutzer ein. + +Schon geschafft! Jetzt kann es losgehen mit der minutenschnellen Entwicklung deiner Tools. Für weitere Informationen und Tipps schau doch mal in unsere [Dokumentation](https://docs.budibase.com/getting-started). + +
+ +--- + +
+ +## 🎓 Budibase lernen + +Die Budibase Dokumentation [findest du hier](https://docs.budibase.com). +
+ +--- + +

+ +## 💬 Community + +Wenn du eine Frage hast, oder dich mit anderen Budibase-Nutzern unterhalten willst, schau doch mal in unsere +[Github Discussions](https://github.com/Budibase/budibase/discussions). + + + +

+ +--- + +
+ +## ❗ Verhaltenskodex + +Budibase steht für eine einladende und vielfältige Community frei von Belästigung. Wir erwarten dass sich jeder in der Budibase-Community an unseren [**Verhaltenskodex**](https://github.com/Budibase/budibase/blob/HEAD/.github/CODE_OF_CONDUCT.md) hält. Bitte les ihn dir durch. +
+ +--- + +
+ +## 🙌 Zu Budibase beitragen + +Von einem gemeldeten Bug bis zum Erstellen einer Pull-Request: wir schätzen jeden Beitrag. Wenn du ein neues Feature implementieren willst oder eine Änderung an der API vornehmen willst, erstelle bitte zuerst ein Issue. So können wir sicherstellen, dass deine Arbeit nicht umsonst ist. + +### Unsicher wo du anfangen sollst? +Gute Ideen für erste Beiträge zum Projekt [findest du hier](https://github.com/Budibase/budibase/projects/22). + +### Wie die Repository strukturiert ist. +Budibase ist eine Monorepo, die von Lerna verwaltet wird. Lerna verwaltet das Erstellen und Veröffentlichen von Budibase-Paketen. +Grob besteht Budibase aus folgenden Modulen: + +- [packages/builder](https://github.com/Budibase/budibase/tree/HEAD/packages/builder) - enthält Code für den clientseitigen Budibase Builder, mit dem Anwendungen erstellt werden. + +- [packages/client](https://github.com/Budibase/budibase/tree/HEAD/packages/client) - Ein Modul, das im Browser läuft und aus JSON-Definitionen funktionsfähige Web-Apps erstellt. + +- [packages/server](https://github.com/Budibase/budibase/tree/HEAD/packages/server) - Der Budibase Server. Diese Koa-Anwendung stellt den Javascript-Code für den Builder und den Client bereit, und bietet eine API für die Interaktion mit dem Budibase Backend, Datenbanken und dem Dateisystem. + +Für mehr Informationen schau in die [CONTRIBUTING.md](https://github.com/Budibase/budibase/blob/HEAD/.github/CONTRIBUTING.md) +

+ +--- + +

+ +## 📝 Lizenz + +Budibase ist quelloffen, lizenziert unter der [GPL v3](https://www.gnu.org/licenses/gpl-3.0.en.html). Die Client- und Komponentenbibliotheken sind unter der [MPL](https://directory.fsf.org/wiki/License:MPL-2.0) lizenziert, damit du deine erstellten Apps unter deine präferierte Lizenz stellen kannst. +

+ +--- + + +
+ +## ⭐ Github-Sterne im Verlauf der Zeit + +[![Stargazers over time](https://starchart.cc/Budibase/budibase.svg)](https://starchart.cc/Budibase/budibase) + +Wenn du zwischen Updates des Builders Probleme auftreten, lies bitte den Guide [hier](https://github.com/Budibase/budibase/blob/HEAD/.github/CONTRIBUTING.md#troubleshooting), um deine Umgebung zurückzusetzen. + +
+ +--- + +

+ +## Mitwirkende ✨ + +Vielen Dank an alle wundervollen Menschen, die zu Budibase beigetragen haben ([emoji key](https://allcontributors.org/docs/en/emoji-key)): + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Martin McKeaveney

💻 📖 ⚠️ 🚇

Michael Drury

📖 💻 ⚠️ 🚇

Andrew Kingston

📖 💻 ⚠️ 🎨

Michael Shanks

📖 💻 ⚠️

Kevin Åberg Kultalahti

📖 💻 ⚠️

Joe

📖 💻 🖋 🎨

Rory Powell

💻 📖 ⚠️

Peter Clement

💻 📖 ⚠️

Conor_Mack

💻 ⚠️

pngwn

💻 ⚠️

HugoLd

💻

victoriasloan

💻

yashank09

💻

SOVLOOKUP

💻

seoulaja

🌍

Maurits Lourens

⚠️ 💻

Rory Powell

🚇 ⚠️ 💻
+ + + + + + +Dieses Projekt folgt der [All-Contributors](https://github.com/all-contributors/all-contributors) Spezifikation. Wir heißen Beiträge aller Art willkommen! diff --git a/lerna.json b/lerna.json index b00fa15ba4..eff6f67e6f 100644 --- a/lerna.json +++ b/lerna.json @@ -1,5 +1,5 @@ { - "version": "0.9.115", + "version": "0.9.116-alpha.0", "npmClient": "yarn", "packages": [ "packages/*" diff --git a/package.json b/package.json index 4f545d935f..05c69e54dc 100644 --- a/package.json +++ b/package.json @@ -7,6 +7,7 @@ "eslint": "^7.28.0", "eslint-plugin-cypress": "^2.11.3", "eslint-plugin-svelte3": "^3.2.0", + "husky": "^7.0.1", "kill-port": "^1.6.1", "lerna": "3.14.1", "prettier": "^2.3.1", @@ -16,7 +17,7 @@ "svelte": "^3.38.2" }, "scripts": { - "setup": "./hosting/scripts/setup.js && yarn && yarn bootstrap && yarn build && yarn dev", + "setup": "node ./hosting/scripts/setup.js && yarn && yarn bootstrap && yarn build && yarn dev", "bootstrap": "lerna link && lerna bootstrap", "build": "lerna run build", "initialise": "lerna run initialise", @@ -42,7 +43,11 @@ "lint:fix": "yarn run lint:fix:ts && yarn run lint:fix:prettier && yarn run lint:fix:eslint", "test:e2e": "lerna run cy:test", "test:e2e:ci": "lerna run cy:ci", - "build:docker": "lerna run build:docker && cd hosting/scripts/linux/ && ./release-to-docker-hub.sh && cd -", - "build:docker:develop": "lerna run build:docker && cd hosting/scripts/linux/ && ./release-to-docker-hub.sh develop && cd -" + "build:docker": "lerna run build:docker && cd hosting/scripts/linux/ && ./release-to-docker-hub.sh $BUDIBASE_RELEASE_VERSION release && cd -", + "build:docker:develop": "node scripts/pinVersions && lerna run build:docker && cd hosting/scripts/linux/ && ./release-to-docker-hub.sh develop && cd -", + "release:helm": "./scripts/release_helm_chart.sh", + "multi:enable": "lerna run multi:enable", + "multi:disable": "lerna run multi:disable", + "postinstall": "husky install" } } diff --git a/packages/auth/db.js b/packages/auth/db.js index 4b03ec36cc..a7b38821a7 100644 --- a/packages/auth/db.js +++ b/packages/auth/db.js @@ -1 +1,4 @@ -module.exports = require("./src/db/utils") +module.exports = { + ...require("./src/db/utils"), + ...require("./src/db/constants"), +} diff --git a/packages/auth/package.json b/packages/auth/package.json index fa56e927f1..beb1cf751c 100644 --- a/packages/auth/package.json +++ b/packages/auth/package.json @@ -1,6 +1,6 @@ { "name": "@budibase/auth", - "version": "0.9.115", + "version": "0.9.116-alpha.0", "description": "Authentication middlewares for budibase builder and apps", "main": "src/index.js", "author": "Budibase", @@ -13,6 +13,7 @@ "@techpass/passport-openidconnect": "^0.3.0", "aws-sdk": "^2.901.0", "bcryptjs": "^2.4.3", + "cls-hooked": "^4.2.2", "ioredis": "^4.27.1", "jsonwebtoken": "^8.5.1", "koa-passport": "^4.1.4", diff --git a/packages/auth/src/cache/user.js b/packages/auth/src/cache/user.js index 46202cbfe9..4a19da489f 100644 --- a/packages/auth/src/cache/user.js +++ b/packages/auth/src/cache/user.js @@ -1,15 +1,21 @@ -const { getDB } = require("../db") -const { StaticDatabases } = require("../db/utils") const redis = require("../redis/authRedis") +const { getTenantId, lookupTenantId, getGlobalDB } = require("../tenancy") const EXPIRY_SECONDS = 3600 -exports.getUser = async userId => { +exports.getUser = async (userId, tenantId = null) => { + if (!tenantId) { + try { + tenantId = getTenantId() + } catch (err) { + tenantId = await lookupTenantId(userId) + } + } const client = await redis.getUserClient() // try cache let user = await client.get(userId) if (!user) { - user = await getDB(StaticDatabases.GLOBAL.name).get(userId) + user = await getGlobalDB(tenantId).get(userId) client.store(userId, user, EXPIRY_SECONDS) } return user diff --git a/packages/auth/src/constants.js b/packages/auth/src/constants.js index c8cc34e937..4b4aef5a42 100644 --- a/packages/auth/src/constants.js +++ b/packages/auth/src/constants.js @@ -14,13 +14,14 @@ exports.Headers = { API_VER: "x-budibase-api-version", APP_ID: "x-budibase-app-id", TYPE: "x-budibase-type", + TENANT_ID: "x-budibase-tenant-id", } exports.GlobalRoles = { OWNER: "owner", ADMIN: "admin", BUILDER: "builder", - GROUP_MANAGER: "group_manager", + WORKSPACE_MANAGER: "workspace_manager", } exports.Configs = { @@ -31,3 +32,5 @@ exports.Configs = { OIDC: "oidc", OIDC_LOGOS: "logos_oidc", } + +exports.DEFAULT_TENANT_ID = "default" diff --git a/packages/auth/src/db/constants.js b/packages/auth/src/db/constants.js new file mode 100644 index 0000000000..77643ce4c5 --- /dev/null +++ b/packages/auth/src/db/constants.js @@ -0,0 +1,17 @@ +exports.SEPARATOR = "_" + +exports.StaticDatabases = { + GLOBAL: { + name: "global-db", + docs: { + apiKeys: "apikeys", + }, + }, + // contains information about tenancy and so on + PLATFORM_INFO: { + name: "global-info", + docs: { + tenants: "tenants", + }, + }, +} diff --git a/packages/auth/src/db/utils.js b/packages/auth/src/db/utils.js index 1fc78f4182..4cd29c9bc8 100644 --- a/packages/auth/src/db/utils.js +++ b/packages/auth/src/db/utils.js @@ -1,39 +1,38 @@ const { newid } = require("../hashing") const Replication = require("./Replication") +const { DEFAULT_TENANT_ID } = require("../constants") const env = require("../environment") +const { StaticDatabases, SEPARATOR } = require("./constants") +const { getTenantId } = require("../tenancy") const fetch = require("node-fetch") const { getCouch } = require("./index") const UNICODE_MAX = "\ufff0" -const SEPARATOR = "_" exports.ViewNames = { USER_BY_EMAIL: "by_email", } -exports.StaticDatabases = { - GLOBAL: { - name: "global-db", - }, - DEPLOYMENTS: { - name: "deployments", - }, -} +exports.StaticDatabases = StaticDatabases + +const PRE_APP = "app" +const PRE_DEV = "dev" const DocumentTypes = { USER: "us", - GROUP: "group", + WORKSPACE: "workspace", CONFIG: "config", TEMPLATE: "template", - APP: "app", - APP_DEV: "app_dev", - APP_METADATA: "app_metadata", + APP: PRE_APP, + DEV: PRE_DEV, + APP_DEV: `${PRE_APP}${SEPARATOR}${PRE_DEV}`, + APP_METADATA: `${PRE_APP}${SEPARATOR}metadata`, ROLE: "role", } exports.DocumentTypes = DocumentTypes exports.APP_PREFIX = DocumentTypes.APP + SEPARATOR -exports.APP_DEV_PREFIX = DocumentTypes.APP_DEV + SEPARATOR +exports.APP_DEV = exports.APP_DEV_PREFIX = DocumentTypes.APP_DEV + SEPARATOR exports.SEPARATOR = SEPARATOR function isDevApp(app) { @@ -64,21 +63,21 @@ function getDocParams(docType, docId = null, otherProps = {}) { } /** - * Generates a new group ID. - * @returns {string} The new group ID which the group doc can be stored under. + * Generates a new workspace ID. + * @returns {string} The new workspace ID which the workspace doc can be stored under. */ -exports.generateGroupID = () => { - return `${DocumentTypes.GROUP}${SEPARATOR}${newid()}` +exports.generateWorkspaceID = () => { + return `${DocumentTypes.WORKSPACE}${SEPARATOR}${newid()}` } /** - * Gets parameters for retrieving groups. + * Gets parameters for retrieving workspaces. */ -exports.getGroupParams = (id = "", otherProps = {}) => { +exports.getWorkspaceParams = (id = "", otherProps = {}) => { return { ...otherProps, - startkey: `${DocumentTypes.GROUP}${SEPARATOR}${id}`, - endkey: `${DocumentTypes.GROUP}${SEPARATOR}${id}${UNICODE_MAX}`, + startkey: `${DocumentTypes.WORKSPACE}${SEPARATOR}${id}`, + endkey: `${DocumentTypes.WORKSPACE}${SEPARATOR}${id}${UNICODE_MAX}`, } } @@ -106,14 +105,14 @@ exports.getGlobalUserParams = (globalId, otherProps = {}) => { /** * Generates a template ID. - * @param ownerId The owner/user of the template, this could be global or a group level. + * @param ownerId The owner/user of the template, this could be global or a workspace level. */ exports.generateTemplateID = ownerId => { return `${DocumentTypes.TEMPLATE}${SEPARATOR}${ownerId}${SEPARATOR}${newid()}` } /** - * Gets parameters for retrieving templates. Owner ID must be specified, either global or a group level. + * Gets parameters for retrieving templates. Owner ID must be specified, either global or a workspace level. */ exports.getTemplateParams = (ownerId, templateId, otherProps = {}) => { if (!templateId) { @@ -159,6 +158,25 @@ exports.getDeployedAppID = appId => { return appId } +exports.getCouchUrl = () => { + if (!env.COUCH_DB_URL) return + + // username and password already exist in URL + if (env.COUCH_DB_URL.includes("@")) { + return env.COUCH_DB_URL + } + + const [protocol, ...rest] = env.COUCH_DB_URL.split("://") + + if (!env.COUCH_DB_USERNAME || !env.COUCH_DB_PASSWORD) { + throw new Error( + "CouchDB configuration invalid. You must provide a fully qualified CouchDB url, or the COUCH_DB_USER and COUCH_DB_PASSWORD environment variables." + ) + } + + return `${protocol}://${env.COUCH_DB_USERNAME}:${env.COUCH_DB_PASSWORD}@${rest}` +} + /** * if in production this will use the CouchDB _all_dbs call to retrieve a list of databases. If testing * when using Pouch it will use the pouchdb-all-dbs package. @@ -168,7 +186,7 @@ exports.getAllDbs = async () => { if (env.isTest()) { return getCouch().allDbs() } - const response = await fetch(`${env.COUCH_DB_URL}/_all_dbs`) + const response = await fetch(`${exports.getCouchUrl()}/_all_dbs`) if (response.status === 200) { return response.json() } else { @@ -183,9 +201,29 @@ exports.getAllDbs = async () => { * different users/companies apps as there is no security around it - all apps are returned. * @return {Promise} returns the app information document stored in each app database. */ -exports.getAllApps = async ({ CouchDB, dev, all } = {}) => { +exports.getAllApps = async (CouchDB, { dev, all, idsOnly } = {}) => { + let tenantId = getTenantId() + if (!env.MULTI_TENANCY && !tenantId) { + tenantId = DEFAULT_TENANT_ID + } let dbs = await exports.getAllDbs() - const appDbNames = dbs.filter(dbName => dbName.startsWith(exports.APP_PREFIX)) + const appDbNames = dbs.filter(dbName => { + const split = dbName.split(SEPARATOR) + // it is an app, check the tenantId + if (split[0] === DocumentTypes.APP) { + const noTenantId = split.length === 2 || split[1] === DocumentTypes.DEV + // tenantId is always right before the UUID + const possibleTenantId = split[split.length - 2] + return ( + (tenantId === DEFAULT_TENANT_ID && noTenantId) || + possibleTenantId === tenantId + ) + } + return false + }) + if (idsOnly) { + return appDbNames + } const appPromises = appDbNames.map(db => // skip setup otherwise databases could be re-created new CouchDB(db, { skip_setup: true }).get(DocumentTypes.APP_METADATA) @@ -232,8 +270,8 @@ exports.dbExists = async (CouchDB, dbName) => { * Generates a new configuration ID. * @returns {string} The new configuration ID which the config doc can be stored under. */ -const generateConfigID = ({ type, group, user }) => { - const scope = [type, group, user].filter(Boolean).join(SEPARATOR) +const generateConfigID = ({ type, workspace, user }) => { + const scope = [type, workspace, user].filter(Boolean).join(SEPARATOR) return `${DocumentTypes.CONFIG}${SEPARATOR}${scope}` } @@ -241,8 +279,8 @@ const generateConfigID = ({ type, group, user }) => { /** * Gets parameters for retrieving configurations. */ -const getConfigParams = ({ type, group, user }, otherProps = {}) => { - const scope = [type, group, user].filter(Boolean).join(SEPARATOR) +const getConfigParams = ({ type, workspace, user }, otherProps = {}) => { + const scope = [type, workspace, user].filter(Boolean).join(SEPARATOR) return { ...otherProps, @@ -252,15 +290,15 @@ const getConfigParams = ({ type, group, user }, otherProps = {}) => { } /** - * Returns the most granular configuration document from the DB based on the type, group and userID passed. + * Returns the most granular configuration document from the DB based on the type, workspace and userID passed. * @param {Object} db - db instance to query - * @param {Object} scopes - the type, group and userID scopes of the configuration. + * @param {Object} scopes - the type, workspace and userID scopes of the configuration. * @returns The most granular configuration document based on the scope. */ -const getScopedFullConfig = async function (db, { type, user, group }) { +const getScopedFullConfig = async function (db, { type, user, workspace }) { const response = await db.allDocs( getConfigParams( - { type, user, group }, + { type, user, workspace }, { include_docs: true, } @@ -270,14 +308,14 @@ const getScopedFullConfig = async function (db, { type, user, group }) { function determineScore(row) { const config = row.doc - // Config is specific to a user and a group - if (config._id.includes(generateConfigID({ type, user, group }))) { + // Config is specific to a user and a workspace + if (config._id.includes(generateConfigID({ type, user, workspace }))) { return 4 } else if (config._id.includes(generateConfigID({ type, user }))) { // Config is specific to a user only return 3 - } else if (config._id.includes(generateConfigID({ type, group }))) { - // Config is specific to a group only + } else if (config._id.includes(generateConfigID({ type, workspace }))) { + // Config is specific to a workspace only return 2 } else if (config._id.includes(generateConfigID({ type }))) { // Config is specific to a type only diff --git a/packages/auth/src/db/views.js b/packages/auth/src/db/views.js index 1f1f28b917..1b48786e24 100644 --- a/packages/auth/src/db/views.js +++ b/packages/auth/src/db/views.js @@ -1,5 +1,4 @@ -const { DocumentTypes, ViewNames, StaticDatabases } = require("./utils") -const { getDB } = require("./index") +const { DocumentTypes, ViewNames } = require("./utils") function DesignDoc() { return { @@ -10,8 +9,7 @@ function DesignDoc() { } } -exports.createUserEmailView = async () => { - const db = getDB(StaticDatabases.GLOBAL.name) +exports.createUserEmailView = async db => { let designDoc try { designDoc = await db.get("_design/database") diff --git a/packages/auth/src/environment.js b/packages/auth/src/environment.js index 355843d02d..4d1453837c 100644 --- a/packages/auth/src/environment.js +++ b/packages/auth/src/environment.js @@ -9,6 +9,8 @@ function isTest() { module.exports = { JWT_SECRET: process.env.JWT_SECRET, COUCH_DB_URL: process.env.COUCH_DB_URL, + COUCH_DB_USERNAME: process.env.COUCH_DB_USER, + COUCH_DB_PASSWORD: process.env.COUCH_DB_PASSWORD, SALT_ROUNDS: process.env.SALT_ROUNDS, REDIS_URL: process.env.REDIS_URL, REDIS_PASSWORD: process.env.REDIS_PASSWORD, @@ -16,6 +18,7 @@ module.exports = { MINIO_SECRET_KEY: process.env.MINIO_SECRET_KEY, MINIO_URL: process.env.MINIO_URL, INTERNAL_API_KEY: process.env.INTERNAL_API_KEY, + MULTI_TENANCY: process.env.MULTI_TENANCY, isTest, _set(key, value) { process.env[key] = value diff --git a/packages/auth/src/index.js b/packages/auth/src/index.js index 98c558706a..5421dea214 100644 --- a/packages/auth/src/index.js +++ b/packages/auth/src/index.js @@ -2,6 +2,7 @@ const passport = require("koa-passport") const LocalStrategy = require("passport-local").Strategy const JwtStrategy = require("passport-jwt").Strategy const { StaticDatabases } = require("./db/utils") +const { getGlobalDB } = require("./tenancy") const { jwt, local, @@ -9,8 +10,9 @@ const { google, oidc, auditLog, + tenancy, } = require("./middleware") -const { setDB, getDB } = require("./db") +const { setDB } = require("./db") const userCache = require("./cache/user") // Strategies @@ -20,7 +22,7 @@ passport.use(new JwtStrategy(jwt.options, jwt.authenticate)) passport.serializeUser((user, done) => done(null, user)) passport.deserializeUser(async (user, done) => { - const db = getDB(StaticDatabases.GLOBAL.name) + const db = getGlobalDB() try { const user = await db.get(user._id) @@ -54,6 +56,7 @@ module.exports = { google, oidc, jwt: require("jsonwebtoken"), + buildTenancyMiddleware: tenancy, auditLog, }, cache: { diff --git a/packages/auth/src/middleware/authenticated.js b/packages/auth/src/middleware/authenticated.js index b58e1917fd..e3705a9a24 100644 --- a/packages/auth/src/middleware/authenticated.js +++ b/packages/auth/src/middleware/authenticated.js @@ -2,46 +2,34 @@ const { Cookies, Headers } = require("../constants") const { getCookie, clearCookie } = require("../utils") const { getUser } = require("../cache/user") const { getSession, updateSessionTTL } = require("../security/sessions") +const { buildMatcherRegex, matches } = require("./matchers") const env = require("../environment") -const PARAM_REGEX = /\/:(.*?)\//g - -function buildNoAuthRegex(patterns) { - return patterns.map(pattern => { - const isObj = typeof pattern === "object" && pattern.route - const method = isObj ? pattern.method : "GET" - let route = isObj ? pattern.route : pattern - - const matches = route.match(PARAM_REGEX) - if (matches) { - for (let match of matches) { - route = route.replace(match, "/.*/") - } - } - return { regex: new RegExp(route), method } - }) -} - -function finalise(ctx, { authenticated, user, internal, version } = {}) { +function finalise( + ctx, + { authenticated, user, internal, version, publicEndpoint } = {} +) { + ctx.publicEndpoint = publicEndpoint || false ctx.isAuthenticated = authenticated || false ctx.user = user ctx.internal = internal || false ctx.version = version } -module.exports = (noAuthPatterns = [], opts) => { - const noAuthOptions = noAuthPatterns ? buildNoAuthRegex(noAuthPatterns) : [] +/** + * This middleware is tenancy aware, so that it does not depend on other middlewares being used. + * The tenancy modules should not be used here and it should be assumed that the tenancy context + * has not yet been populated. + */ +module.exports = (noAuthPatterns = [], opts = { publicAllowed: false }) => { + const noAuthOptions = noAuthPatterns ? buildMatcherRegex(noAuthPatterns) : [] return async (ctx, next) => { + let publicEndpoint = false const version = ctx.request.headers[Headers.API_VER] // the path is not authenticated - const found = noAuthOptions.find(({ regex, method }) => { - return ( - regex.test(ctx.request.url) && - ctx.request.method.toLowerCase() === method.toLowerCase() - ) - }) - if (found != null) { - return next() + const found = matches(ctx, noAuthOptions) + if (found) { + publicEndpoint = true } try { // check the actual user is authenticated first @@ -58,7 +46,7 @@ module.exports = (noAuthPatterns = [], opts) => { error = "No session found" } else { try { - user = await getUser(userId) + user = await getUser(userId, session.tenantId) delete user.password authenticated = true } catch (err) { @@ -75,22 +63,26 @@ module.exports = (noAuthPatterns = [], opts) => { } } const apiKey = ctx.request.headers[Headers.API_KEY] + const tenantId = ctx.request.headers[Headers.TENANT_ID] // this is an internal request, no user made it if (!authenticated && apiKey && apiKey === env.INTERNAL_API_KEY) { authenticated = true internal = true } + if (!user && tenantId) { + user = { tenantId } + } // be explicit if (authenticated !== true) { authenticated = false } // isAuthenticated is a function, so use a variable to be able to check authed state - finalise(ctx, { authenticated, user, internal, version }) + finalise(ctx, { authenticated, user, internal, version, publicEndpoint }) return next() } catch (err) { // allow configuring for public access - if (opts && opts.publicAllowed) { - finalise(ctx, { authenticated: false, version }) + if ((opts && opts.publicAllowed) || publicEndpoint) { + finalise(ctx, { authenticated: false, version, publicEndpoint }) } else { ctx.throw(err.status || 403, err) } diff --git a/packages/auth/src/middleware/index.js b/packages/auth/src/middleware/index.js index 35c7d9c388..689859a139 100644 --- a/packages/auth/src/middleware/index.js +++ b/packages/auth/src/middleware/index.js @@ -4,6 +4,7 @@ const google = require("./passport/google") const oidc = require("./passport/oidc") const authenticated = require("./authenticated") const auditLog = require("./auditLog") +const tenancy = require("./tenancy") module.exports = { google, @@ -12,4 +13,5 @@ module.exports = { local, authenticated, auditLog, + tenancy, } diff --git a/packages/auth/src/middleware/matchers.js b/packages/auth/src/middleware/matchers.js new file mode 100644 index 0000000000..a555823136 --- /dev/null +++ b/packages/auth/src/middleware/matchers.js @@ -0,0 +1,33 @@ +const PARAM_REGEX = /\/:(.*?)(\/.*)?$/g + +exports.buildMatcherRegex = patterns => { + if (!patterns) { + return [] + } + return patterns.map(pattern => { + const isObj = typeof pattern === "object" && pattern.route + const method = isObj ? pattern.method : "GET" + let route = isObj ? pattern.route : pattern + + const matches = route.match(PARAM_REGEX) + if (matches) { + for (let match of matches) { + const pattern = "/.*" + (match.endsWith("/") ? "/" : "") + route = route.replace(match, pattern) + } + } + return { regex: new RegExp(route), method } + }) +} + +exports.matches = (ctx, options) => { + return options.find(({ regex, method }) => { + const urlMatch = regex.test(ctx.request.url) + const methodMatch = + method === "ALL" + ? true + : ctx.request.method.toLowerCase() === method.toLowerCase() + + return urlMatch && methodMatch + }) +} diff --git a/packages/auth/src/middleware/passport/google.js b/packages/auth/src/middleware/passport/google.js index 68fe885512..07d6816c0b 100644 --- a/packages/auth/src/middleware/passport/google.js +++ b/packages/auth/src/middleware/passport/google.js @@ -27,13 +27,13 @@ async function authenticate(accessToken, refreshToken, profile, done) { * from couchDB rather than environment variables, using this factory is necessary for dynamically configuring passport. * @returns Dynamically configured Passport Google Strategy */ -exports.strategyFactory = async function (config) { +exports.strategyFactory = async function (config, callbackUrl) { try { - const { clientID, clientSecret, callbackURL } = config + const { clientID, clientSecret } = config - if (!clientID || !clientSecret || !callbackURL) { + if (!clientID || !clientSecret) { throw new Error( - "Configuration invalid. Must contain google clientID, clientSecret and callbackURL" + "Configuration invalid. Must contain google clientID and clientSecret" ) } @@ -41,7 +41,7 @@ exports.strategyFactory = async function (config) { { clientID: config.clientID, clientSecret: config.clientSecret, - callbackURL: config.callbackURL, + callbackURL: callbackUrl, }, authenticate ) diff --git a/packages/auth/src/middleware/passport/local.js b/packages/auth/src/middleware/passport/local.js index 16b53bf894..0db40d64eb 100644 --- a/packages/auth/src/middleware/passport/local.js +++ b/packages/auth/src/middleware/passport/local.js @@ -6,19 +6,23 @@ const { getGlobalUserByEmail } = require("../../utils") const { authError } = require("./utils") const { newid } = require("../../hashing") const { createASession } = require("../../security/sessions") +const { getTenantId } = require("../../tenancy") const INVALID_ERR = "Invalid Credentials" -exports.options = {} +exports.options = { + passReqToCallback: true, +} /** * Passport Local Authentication Middleware. - * @param {*} email - username to login with - * @param {*} password - plain text password to log in with - * @param {*} done - callback from passport to return user information and errors + * @param {*} ctx the request structure + * @param {*} email username to login with + * @param {*} password plain text password to log in with + * @param {*} done callback from passport to return user information and errors * @returns The authenticated user, or errors if they occur */ -exports.authenticate = async function (email, password, done) { +exports.authenticate = async function (ctx, email, password, done) { if (!email) return authError(done, "Email Required") if (!password) return authError(done, "Password Required") @@ -35,12 +39,14 @@ exports.authenticate = async function (email, password, done) { // authenticate if (await compare(password, dbUser.password)) { const sessionId = newid() - await createASession(dbUser._id, sessionId) + const tenantId = getTenantId() + await createASession(dbUser._id, { sessionId, tenantId }) dbUser.token = jwt.sign( { userId: dbUser._id, sessionId, + tenantId, }, env.JWT_SECRET ) diff --git a/packages/auth/src/middleware/passport/tests/google.spec.js b/packages/auth/src/middleware/passport/tests/google.spec.js index 30e582a68f..9cc878bba9 100644 --- a/packages/auth/src/middleware/passport/tests/google.spec.js +++ b/packages/auth/src/middleware/passport/tests/google.spec.js @@ -2,8 +2,9 @@ const { data } = require("./utilities/mock-data") +const TENANT_ID = "default" + const googleConfig = { - callbackURL: "http://somecallbackurl", clientID: data.clientID, clientSecret: data.clientSecret, } @@ -26,13 +27,14 @@ describe("google", () => { it("should create successfully create a google strategy", async () => { const google = require("../google") - - await google.strategyFactory(googleConfig) + + const callbackUrl = `/api/global/auth/${TENANT_ID}/google/callback` + await google.strategyFactory(googleConfig, callbackUrl) const expectedOptions = { clientID: googleConfig.clientID, clientSecret: googleConfig.clientSecret, - callbackURL: googleConfig.callbackURL, + callbackURL: callbackUrl, } expect(mockStrategy).toHaveBeenCalledWith( diff --git a/packages/auth/src/middleware/passport/third-party-common.js b/packages/auth/src/middleware/passport/third-party-common.js index 2ab2816391..7c03944232 100644 --- a/packages/auth/src/middleware/passport/third-party-common.js +++ b/packages/auth/src/middleware/passport/third-party-common.js @@ -1,11 +1,12 @@ const env = require("../../environment") const jwt = require("jsonwebtoken") -const database = require("../../db") -const { StaticDatabases, generateGlobalUserID } = require("../../db/utils") +const { generateGlobalUserID } = require("../../db/utils") const { authError } = require("./utils") const { newid } = require("../../hashing") const { createASession } = require("../../security/sessions") const { getGlobalUserByEmail } = require("../../utils") +const { getGlobalDB, getTenantId } = require("../../tenancy") +const fetch = require("node-fetch") /** * Common authentication logic for third parties. e.g. OAuth, OIDC. @@ -15,19 +16,21 @@ exports.authenticateThirdParty = async function ( requireLocalAccount = true, done ) { - if (!thirdPartyUser.provider) + if (!thirdPartyUser.provider) { return authError(done, "third party user provider required") - if (!thirdPartyUser.userId) + } + if (!thirdPartyUser.userId) { return authError(done, "third party user id required") - if (!thirdPartyUser.email) + } + if (!thirdPartyUser.email) { return authError(done, "third party user email required") - - const db = database.getDB(StaticDatabases.GLOBAL.name) - - let dbUser + } // use the third party id const userId = generateGlobalUserID(thirdPartyUser.userId) + const db = getGlobalDB() + + let dbUser // try to load by id try { @@ -65,7 +68,7 @@ exports.authenticateThirdParty = async function ( } } - dbUser = syncUser(dbUser, thirdPartyUser) + dbUser = await syncUser(dbUser, thirdPartyUser) // create or sync the user const response = await db.post(dbUser) @@ -73,7 +76,8 @@ exports.authenticateThirdParty = async function ( // authenticate const sessionId = newid() - await createASession(dbUser._id, sessionId) + const tenantId = getTenantId() + await createASession(dbUser._id, { sessionId, tenantId }) dbUser.token = jwt.sign( { @@ -86,10 +90,26 @@ exports.authenticateThirdParty = async function ( return done(null, dbUser) } +async function syncProfilePicture(user, thirdPartyUser) { + const pictureUrl = thirdPartyUser.profile._json.picture + if (pictureUrl) { + const response = await fetch(pictureUrl) + + if (response.status === 200) { + const type = response.headers.get("content-type") + if (type.startsWith("image/")) { + user.pictureUrl = pictureUrl + } + } + } + + return user +} + /** * @returns a user that has been sync'd with third party information */ -function syncUser(user, thirdPartyUser) { +async function syncUser(user, thirdPartyUser) { // provider user.provider = thirdPartyUser.provider user.providerType = thirdPartyUser.providerType @@ -112,6 +132,8 @@ function syncUser(user, thirdPartyUser) { } } + user = await syncProfilePicture(user, thirdPartyUser) + // profile user.thirdPartyProfile = { ...profile._json, diff --git a/packages/auth/src/middleware/tenancy.js b/packages/auth/src/middleware/tenancy.js new file mode 100644 index 0000000000..b80b9a6763 --- /dev/null +++ b/packages/auth/src/middleware/tenancy.js @@ -0,0 +1,14 @@ +const { setTenantId } = require("../tenancy") +const ContextFactory = require("../tenancy/FunctionContext") +const { buildMatcherRegex, matches } = require("./matchers") + +module.exports = (allowQueryStringPatterns, noTenancyPatterns) => { + const allowQsOptions = buildMatcherRegex(allowQueryStringPatterns) + const noTenancyOptions = buildMatcherRegex(noTenancyPatterns) + + return ContextFactory.getMiddleware(ctx => { + const allowNoTenant = !!matches(ctx, noTenancyOptions) + const allowQs = !!matches(ctx, allowQsOptions) + setTenantId(ctx, { allowQs, allowNoTenant }) + }) +} diff --git a/packages/auth/src/security/sessions.js b/packages/auth/src/security/sessions.js index 4051df7123..328f74c794 100644 --- a/packages/auth/src/security/sessions.js +++ b/packages/auth/src/security/sessions.js @@ -12,12 +12,13 @@ function makeSessionID(userId, sessionId) { return `${userId}/${sessionId}` } -exports.createASession = async (userId, sessionId) => { +exports.createASession = async (userId, session) => { const client = await redis.getSessionClient() - const session = { + const sessionId = session.sessionId + session = { createdAt: new Date().toISOString(), lastAccessedAt: new Date().toISOString(), - sessionId, + ...session, userId, } await client.store(makeSessionID(userId, sessionId), session, EXPIRY_SECONDS) diff --git a/packages/auth/src/tenancy/FunctionContext.js b/packages/auth/src/tenancy/FunctionContext.js new file mode 100644 index 0000000000..d97a3a30b4 --- /dev/null +++ b/packages/auth/src/tenancy/FunctionContext.js @@ -0,0 +1,73 @@ +const cls = require("cls-hooked") +const { newid } = require("../hashing") + +const REQUEST_ID_KEY = "requestId" + +class FunctionContext { + static getMiddleware(updateCtxFn = null) { + const namespace = this.createNamespace() + + return async function (ctx, next) { + await new Promise( + namespace.bind(function (resolve, reject) { + // store a contextual request ID that can be used anywhere (audit logs) + namespace.set(REQUEST_ID_KEY, newid()) + namespace.bindEmitter(ctx.req) + namespace.bindEmitter(ctx.res) + + if (updateCtxFn) { + updateCtxFn(ctx) + } + next().then(resolve).catch(reject) + }) + ) + } + } + + static run(callback) { + const namespace = this.createNamespace() + + return namespace.runAndReturn(callback) + } + + static setOnContext(key, value) { + const namespace = this.createNamespace() + namespace.set(key, value) + } + + static getContextStorage() { + if (this._namespace && this._namespace.active) { + let contextData = this._namespace.active + delete contextData.id + delete contextData._ns_name + return contextData + } + + return {} + } + + static getFromContext(key) { + const context = this.getContextStorage() + if (context) { + return context[key] + } else { + return null + } + } + + static destroyNamespace() { + if (this._namespace) { + cls.destroyNamespace("session") + this._namespace = null + } + } + + static createNamespace() { + if (!this._namespace) { + this._namespace = cls.createNamespace("session") + } + return this._namespace + } +} + +module.exports = FunctionContext diff --git a/packages/auth/src/tenancy/context.js b/packages/auth/src/tenancy/context.js new file mode 100644 index 0000000000..f3f1f541e9 --- /dev/null +++ b/packages/auth/src/tenancy/context.js @@ -0,0 +1,81 @@ +const env = require("../environment") +const { Headers } = require("../../constants") +const cls = require("./FunctionContext") + +exports.DEFAULT_TENANT_ID = "default" + +exports.isDefaultTenant = () => { + return exports.getTenantId() === exports.DEFAULT_TENANT_ID +} + +exports.isMultiTenant = () => { + return env.MULTI_TENANCY +} + +const TENANT_ID = "tenantId" + +// used for automations, API endpoints should always be in context already +exports.doInTenant = (tenantId, task) => { + return cls.run(() => { + // set the tenant id + cls.setOnContext(TENANT_ID, tenantId) + + // invoke the task + const result = task() + + return result + }) +} + +exports.updateTenantId = tenantId => { + cls.setOnContext(TENANT_ID, tenantId) +} + +exports.setTenantId = ( + ctx, + opts = { allowQs: false, allowNoTenant: false } +) => { + let tenantId + // exit early if not multi-tenant + if (!exports.isMultiTenant()) { + cls.setOnContext(TENANT_ID, this.DEFAULT_TENANT_ID) + return + } + + const allowQs = opts && opts.allowQs + const allowNoTenant = opts && opts.allowNoTenant + const header = ctx.request.headers[Headers.TENANT_ID] + const user = ctx.user || {} + if (allowQs) { + const query = ctx.request.query || {} + tenantId = query.tenantId + } + // override query string (if allowed) by user, or header + // URL params cannot be used in a middleware, as they are + // processed later in the chain + tenantId = user.tenantId || header || tenantId + + if (!tenantId && !allowNoTenant) { + ctx.throw(403, "Tenant id not set") + } + // check tenant ID just incase no tenant was allowed + if (tenantId) { + cls.setOnContext(TENANT_ID, tenantId) + } +} + +exports.isTenantIdSet = () => { + const tenantId = cls.getFromContext(TENANT_ID) + return !!tenantId +} + +exports.getTenantId = () => { + if (!exports.isMultiTenant()) { + return exports.DEFAULT_TENANT_ID + } + const tenantId = cls.getFromContext(TENANT_ID) + if (!tenantId) { + throw Error("Tenant id not found") + } + return tenantId +} diff --git a/packages/auth/src/tenancy/index.js b/packages/auth/src/tenancy/index.js new file mode 100644 index 0000000000..2fe257d885 --- /dev/null +++ b/packages/auth/src/tenancy/index.js @@ -0,0 +1,4 @@ +module.exports = { + ...require("./context"), + ...require("./tenancy"), +} diff --git a/packages/auth/src/tenancy/tenancy.js b/packages/auth/src/tenancy/tenancy.js new file mode 100644 index 0000000000..6e18ea7154 --- /dev/null +++ b/packages/auth/src/tenancy/tenancy.js @@ -0,0 +1,105 @@ +const { getDB } = require("../db") +const { SEPARATOR, StaticDatabases } = require("../db/constants") +const { getTenantId, DEFAULT_TENANT_ID, isMultiTenant } = require("./context") +const env = require("../environment") + +const TENANT_DOC = StaticDatabases.PLATFORM_INFO.docs.tenants +const PLATFORM_INFO_DB = StaticDatabases.PLATFORM_INFO.name + +exports.addTenantToUrl = url => { + const tenantId = getTenantId() + + if (isMultiTenant()) { + const char = url.indexOf("?") === -1 ? "?" : "&" + url += `${char}tenantId=${tenantId}` + } + + return url +} + +exports.doesTenantExist = async tenantId => { + const db = getDB(PLATFORM_INFO_DB) + let tenants + try { + tenants = await db.get(TENANT_DOC) + } catch (err) { + // if theres an error the doc doesn't exist, no tenants exist + return false + } + return ( + tenants && + Array.isArray(tenants.tenantIds) && + tenants.tenantIds.indexOf(tenantId) !== -1 + ) +} + +exports.tryAddTenant = async (tenantId, userId, email) => { + const db = getDB(PLATFORM_INFO_DB) + const getDoc = async id => { + if (!id) { + return null + } + try { + return await db.get(id) + } catch (err) { + return { _id: id } + } + } + let [tenants, userIdDoc, emailDoc] = await Promise.all([ + getDoc(TENANT_DOC), + getDoc(userId), + getDoc(email), + ]) + if (!Array.isArray(tenants.tenantIds)) { + tenants = { + _id: TENANT_DOC, + tenantIds: [], + } + } + let promises = [] + if (userIdDoc) { + userIdDoc.tenantId = tenantId + promises.push(db.put(userIdDoc)) + } + if (emailDoc) { + emailDoc.tenantId = tenantId + promises.push(db.put(emailDoc)) + } + if (tenants.tenantIds.indexOf(tenantId) === -1) { + tenants.tenantIds.push(tenantId) + promises.push(db.put(tenants)) + } + await Promise.all(promises) +} + +exports.getGlobalDB = (tenantId = null) => { + // tenant ID can be set externally, for example user API where + // new tenants are being created, this may be the case + if (!tenantId) { + tenantId = getTenantId() + } + + let dbName + + if (tenantId === DEFAULT_TENANT_ID) { + dbName = StaticDatabases.GLOBAL.name + } else { + dbName = `${tenantId}${SEPARATOR}${StaticDatabases.GLOBAL.name}` + } + + return getDB(dbName) +} + +exports.lookupTenantId = async userId => { + const db = getDB(StaticDatabases.PLATFORM_INFO.name) + let tenantId = env.MULTI_TENANCY ? DEFAULT_TENANT_ID : null + try { + const doc = await db.get(userId) + if (doc && doc.tenantId) { + tenantId = doc.tenantId + } + } catch (err) { + // just return the default + } + return tenantId +} diff --git a/packages/auth/src/utils.js b/packages/auth/src/utils.js index 6bc1e0e3a6..5936948fd7 100644 --- a/packages/auth/src/utils.js +++ b/packages/auth/src/utils.js @@ -1,14 +1,9 @@ -const { - DocumentTypes, - SEPARATOR, - ViewNames, - StaticDatabases, -} = require("./db/utils") +const { DocumentTypes, SEPARATOR, ViewNames } = require("./db/utils") const jwt = require("jsonwebtoken") const { options } = require("./middleware/passport/jwt") const { createUserEmailView } = require("./db/views") -const { getDB } = require("./db") const { Headers } = require("./constants") +const { getGlobalDB } = require("./tenancy") const APP_PREFIX = DocumentTypes.APP + SEPARATOR @@ -111,7 +106,7 @@ exports.getGlobalUserByEmail = async email => { if (email == null) { throw "Must supply an email address to view" } - const db = getDB(StaticDatabases.GLOBAL.name) + const db = getGlobalDB() try { let users = ( await db.query(`database/${ViewNames.USER_BY_EMAIL}`, { @@ -123,7 +118,7 @@ exports.getGlobalUserByEmail = async email => { return users.length <= 1 ? users[0] : users } catch (err) { if (err != null && err.name === "not_found") { - await createUserEmailView() + await createUserEmailView(db) return exports.getGlobalUserByEmail(email) } else { throw err diff --git a/packages/auth/tenancy.js b/packages/auth/tenancy.js new file mode 100644 index 0000000000..9ca808b74e --- /dev/null +++ b/packages/auth/tenancy.js @@ -0,0 +1 @@ +module.exports = require("./src/tenancy") diff --git a/packages/auth/yarn.lock b/packages/auth/yarn.lock index 8957ecb0fc..b6be8ad1e8 100644 --- a/packages/auth/yarn.lock +++ b/packages/auth/yarn.lock @@ -798,6 +798,13 @@ ast-types@0.9.6: resolved "https://registry.yarnpkg.com/ast-types/-/ast-types-0.9.6.tgz#102c9e9e9005d3e7e3829bf0c4fa24ee862ee9b9" integrity sha1-ECyenpAF0+fjgpvwxPok7oYu6bk= +async-hook-jl@^1.7.6: + version "1.7.6" + resolved "https://registry.yarnpkg.com/async-hook-jl/-/async-hook-jl-1.7.6.tgz#4fd25c2f864dbaf279c610d73bf97b1b28595e68" + integrity sha512-gFaHkFfSxTjvoxDMYqDuGHlcRyUuamF8s+ZTtJdDzqjws4mCt7v0vuV79/E2Wr2/riMQgtG4/yUtXWs1gZ7JMg== + dependencies: + stack-chain "^1.3.7" + async@~2.1.4: version "2.1.5" resolved "https://registry.yarnpkg.com/async/-/async-2.1.5.tgz#e587c68580994ac67fc56ff86d3ac56bdbe810bc" @@ -1144,6 +1151,15 @@ clone-buffer@1.0.0: resolved "https://registry.yarnpkg.com/clone-buffer/-/clone-buffer-1.0.0.tgz#e3e25b207ac4e701af721e2cb5a16792cac3dc58" integrity sha1-4+JbIHrE5wGvch4staFnksrD3Fg= +cls-hooked@^4.2.2: + version "4.2.2" + resolved "https://registry.yarnpkg.com/cls-hooked/-/cls-hooked-4.2.2.tgz#ad2e9a4092680cdaffeb2d3551da0e225eae1908" + integrity sha512-J4Xj5f5wq/4jAvcdgoGsL3G103BtWpZrMo8NEinRltN+xpTZdI+M38pyQqhuFU/P792xkMFvnKSf+Lm81U1bxw== + dependencies: + async-hook-jl "^1.7.6" + emitter-listener "^1.0.1" + semver "^5.4.1" + cluster-key-slot@^1.1.0: version "1.1.0" resolved "https://registry.yarnpkg.com/cluster-key-slot/-/cluster-key-slot-1.1.0.tgz#30474b2a981fb12172695833052bc0d01336d10d" @@ -1444,6 +1460,13 @@ electron-to-chromium@^1.3.723: resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.3.775.tgz#046517d1f2cea753e06fff549995b9dc45e20082" integrity sha512-EGuiJW4yBPOTj2NtWGZcX93ZE8IGj33HJAx4d3ouE2zOfW2trbWU+t1e0yzLr1qQIw81++txbM3BH52QwSRE6Q== +emitter-listener@^1.0.1: + version "1.1.2" + resolved "https://registry.yarnpkg.com/emitter-listener/-/emitter-listener-1.1.2.tgz#56b140e8f6992375b3d7cb2cab1cc7432d9632e8" + integrity sha512-Bt1sBAGFHY9DKY+4/2cV6izcKJUf5T7/gkdmkxzX/qv9CcGH8xSwVRW5mtX03SWJtRTWSOpzCuWN9rBFYZepZQ== + dependencies: + shimmer "^1.2.0" + emittery@^0.7.1: version "0.7.2" resolved "https://registry.yarnpkg.com/emittery/-/emittery-0.7.2.tgz#25595908e13af0f5674ab419396e2fb394cdfa82" @@ -4035,7 +4058,7 @@ saxes@^5.0.1: dependencies: xmlchars "^2.2.0" -"semver@2 || 3 || 4 || 5", semver@^5.5.0, semver@^5.6.0: +"semver@2 || 3 || 4 || 5", semver@^5.4.1, semver@^5.5.0, semver@^5.6.0: version "5.7.1" resolved "https://registry.yarnpkg.com/semver/-/semver-5.7.1.tgz#a954f931aeba508d307bbf069eff0c01c96116f7" integrity sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ== @@ -4096,6 +4119,11 @@ shellwords@^0.1.1: resolved "https://registry.yarnpkg.com/shellwords/-/shellwords-0.1.1.tgz#d6b9181c1a48d397324c84871efbcfc73fc0654b" integrity sha512-vFwSUfQvqybiICwZY5+DAWIPLKsWO31Q91JSKl3UYv+K5c2QRPzn0qzec6QPu1Qc9eHYItiP3NdJqNVqetYAww== +shimmer@^1.2.0: + version "1.2.1" + resolved "https://registry.yarnpkg.com/shimmer/-/shimmer-1.2.1.tgz#610859f7de327b587efebf501fb43117f9aff337" + integrity sha512-sQTKC1Re/rM6XyFM6fIAGHRPVGvyXfgzIDvzoq608vM+jeyVD0Tu1E6Np0Kc2zAIFWIj963V2800iF/9LPieQw== + signal-exit@^3.0.0, signal-exit@^3.0.2: version "3.0.3" resolved "https://registry.yarnpkg.com/signal-exit/-/signal-exit-3.0.3.tgz#a1410c2edd8f077b08b4e253c8eacfcaf057461c" @@ -4250,6 +4278,11 @@ sshpk@^1.7.0: safer-buffer "^2.0.2" tweetnacl "~0.14.0" +stack-chain@^1.3.7: + version "1.3.7" + resolved "https://registry.yarnpkg.com/stack-chain/-/stack-chain-1.3.7.tgz#d192c9ff4ea6a22c94c4dd459171e3f00cea1285" + integrity sha1-0ZLJ/06moiyUxN1FkXHj8AzqEoU= + stack-utils@^2.0.2: version "2.0.3" resolved "https://registry.yarnpkg.com/stack-utils/-/stack-utils-2.0.3.tgz#cd5f030126ff116b78ccb3c027fe302713b61277" diff --git a/packages/bbui/package.json b/packages/bbui/package.json index c80e7ed349..6ed8013d34 100644 --- a/packages/bbui/package.json +++ b/packages/bbui/package.json @@ -1,7 +1,7 @@ { "name": "@budibase/bbui", "description": "A UI solution used in the different Budibase projects.", - "version": "0.9.115", + "version": "0.9.116-alpha.0", "license": "AGPL-3.0", "svelte": "src/index.js", "module": "dist/bbui.es.js", @@ -45,7 +45,7 @@ "@spectrum-css/buttongroup": "^3.0.2", "@spectrum-css/checkbox": "^3.0.2", "@spectrum-css/dialog": "^3.0.1", - "@spectrum-css/divider": "^1.0.1", + "@spectrum-css/divider": "^1.0.3", "@spectrum-css/dropzone": "^3.0.2", "@spectrum-css/fieldgroup": "^3.0.2", "@spectrum-css/fieldlabel": "^3.0.1", @@ -65,6 +65,7 @@ "@spectrum-css/search": "^3.0.2", "@spectrum-css/sidenav": "^3.0.2", "@spectrum-css/statuslight": "^3.0.2", + "@spectrum-css/stepper": "^3.0.3", "@spectrum-css/switch": "^1.0.2", "@spectrum-css/table": "^3.0.1", "@spectrum-css/tabs": "^3.0.1", diff --git a/packages/bbui/src/ActionMenu/ActionMenu.svelte b/packages/bbui/src/ActionMenu/ActionMenu.svelte index 45bb65b527..08425e8f59 100644 --- a/packages/bbui/src/ActionMenu/ActionMenu.svelte +++ b/packages/bbui/src/ActionMenu/ActionMenu.svelte @@ -23,8 +23,11 @@ dropdown.show() } - const openMenu = () => { - if (!disabled) show() + const openMenu = event => { + if (!disabled) { + event.stopPropagation() + show() + } } setContext("actionMenu", { show, hide }) diff --git a/packages/bbui/src/ColorPicker/ColorPicker.svelte b/packages/bbui/src/ColorPicker/ColorPicker.svelte index 4d248d6190..ff6a292d1b 100644 --- a/packages/bbui/src/ColorPicker/ColorPicker.svelte +++ b/packages/bbui/src/ColorPicker/ColorPicker.svelte @@ -19,7 +19,7 @@ const dispatch = createEventDispatcher() const categories = [ { - label: "Grays", + label: "Theme", colors: [ "gray-50", "gray-75", @@ -72,6 +72,9 @@ "blue-700", "indigo-700", "magenta-700", + + "static-white", + "static-black", ], }, ] @@ -101,9 +104,19 @@ } const getCheckColor = value => { - return /^.*(white|(gray-(50|75|100|200|300|400|500)))\)$/.test(value) - ? "var(--spectrum-global-color-gray-900)" - : "var(--spectrum-global-color-gray-50)" + // Use dynamic color for theme grays + if (value?.includes("gray")) { + return /^.*(gray-(50|75|100|200|300|400|500))\)$/.test(value) + ? "var(--spectrum-global-color-gray-900)" + : "var(--spectrum-global-color-gray-50)" + } + + // Use black check for static white + if (value?.includes("static-black")) { + return "var(--spectrum-global-color-static-gray-50)" + } + + return "var(--spectrum-global-color-static-gray-900)" } diff --git a/packages/bbui/src/Form/Checkbox.svelte b/packages/bbui/src/Form/Checkbox.svelte index 90a2cddda5..6aa88f6dee 100644 --- a/packages/bbui/src/Form/Checkbox.svelte +++ b/packages/bbui/src/Form/Checkbox.svelte @@ -9,6 +9,7 @@ export let text = null export let disabled = false export let error = null + export let size = "M" const dispatch = createEventDispatcher() const onChange = e => { @@ -18,5 +19,5 @@ - + diff --git a/packages/bbui/src/Form/Core/Checkbox.svelte b/packages/bbui/src/Form/Core/Checkbox.svelte index e9a6b56fd9..ca3cef383f 100644 --- a/packages/bbui/src/Form/Core/Checkbox.svelte +++ b/packages/bbui/src/Form/Core/Checkbox.svelte @@ -8,15 +8,18 @@ export let id = null export let text = null export let disabled = false + export let size const dispatch = createEventDispatcher() const onChange = event => { dispatch("change", event.target.checked) } + + $: sizeClass = `spectrum-Checkbox--size${size || "M"}`