Merge remote-tracking branch 'origin/master' into feature/monolith-js-refactor

This commit is contained in:
Dean 2024-01-17 17:17:01 +00:00
commit 30db237071
745 changed files with 18538 additions and 9802 deletions

View File

@ -1,194 +0,0 @@
{
"files": [
"README.md"
],
"imageSize": 100,
"commit": false,
"contributors": [
{
"login": "shogunpurple",
"name": "Martin McKeaveney",
"avatar_url": "https://avatars1.githubusercontent.com/u/11256663?v=4",
"profile": "http://martinmck.com",
"contributions": [
"code",
"doc",
"test",
"infra"
]
},
{
"login": "mike12345567",
"name": "Michael Drury",
"avatar_url": "https://avatars2.githubusercontent.com/u/4407001?v=4",
"profile": "http://www.michaeldrury.co.uk/",
"contributions": [
"doc",
"code",
"test",
"infra"
]
},
{
"login": "aptkingston",
"name": "Andrew Kingston",
"avatar_url": "https://avatars3.githubusercontent.com/u/9075550?v=4",
"profile": "https://github.com/aptkingston",
"contributions": [
"doc",
"code",
"test",
"design"
]
},
{
"login": "mjashanks",
"name": "Michael Shanks",
"avatar_url": "https://avatars3.githubusercontent.com/u/3524181?v=4",
"profile": "https://budibase.com/",
"contributions": [
"doc",
"code",
"test"
]
},
{
"login": "kevmodrome",
"name": "Kevin Åberg Kultalahti",
"avatar_url": "https://avatars3.githubusercontent.com/u/534488?v=4",
"profile": "https://github.com/kevmodrome",
"contributions": [
"doc",
"code",
"test"
]
},
{
"login": "joebudi",
"name": "Joe",
"avatar_url": "https://avatars2.githubusercontent.com/u/49767913?v=4",
"profile": "https://www.budibase.com/",
"contributions": [
"doc",
"code",
"content",
"design"
]
},
{
"login": "Rory-Powell",
"name": "Rory Powell",
"avatar_url": "https://avatars.githubusercontent.com/u/8755148?v=4",
"profile": "https://github.com/Rory-Powell",
"contributions": [
"code",
"doc",
"test"
]
},
{
"login": "PClmnt",
"name": "Peter Clement",
"avatar_url": "https://avatars.githubusercontent.com/u/5665926?v=4",
"profile": "https://github.com/PClmnt",
"contributions": [
"code",
"doc",
"test"
]
},
{
"login": "Conor-Mack",
"name": "Conor_Mack",
"avatar_url": "https://avatars1.githubusercontent.com/u/36074859?v=4",
"profile": "https://github.com/Conor-Mack",
"contributions": [
"code",
"test"
]
},
{
"login": "pngwn",
"name": "pngwn",
"avatar_url": "https://avatars1.githubusercontent.com/u/12937446?v=4",
"profile": "https://github.com/pngwn",
"contributions": [
"code",
"test"
]
},
{
"login": "HugoLd",
"name": "HugoLd",
"avatar_url": "https://avatars0.githubusercontent.com/u/26521848?v=4",
"profile": "https://github.com/HugoLd",
"contributions": [
"code"
]
},
{
"login": "victoriasloan",
"name": "victoriasloan",
"avatar_url": "https://avatars.githubusercontent.com/u/9913651?v=4",
"profile": "https://github.com/victoriasloan",
"contributions": [
"code"
]
},
{
"login": "yashank09",
"name": "yashank09",
"avatar_url": "https://avatars.githubusercontent.com/u/37672190?v=4",
"profile": "https://github.com/yashank09",
"contributions": [
"code"
]
},
{
"login": "SOVLOOKUP",
"name": "SOVLOOKUP",
"avatar_url": "https://avatars.githubusercontent.com/u/53158137?v=4",
"profile": "https://github.com/SOVLOOKUP",
"contributions": [
"code"
]
},
{
"login": "seoulaja",
"name": "seoulaja",
"avatar_url": "https://avatars.githubusercontent.com/u/15101654?v=4",
"profile": "https://github.com/seoulaja",
"contributions": [
"translation"
]
},
{
"login": "mslourens",
"name": "Maurits Lourens",
"avatar_url": "https://avatars.githubusercontent.com/u/1907152?v=4",
"profile": "https://github.com/mslourens",
"contributions": [
"test",
"code"
]
},
{
"login": "Rory-Powell",
"name": "Rory Powell",
"avatar_url": "https://avatars.githubusercontent.com/u/8755148?v=4",
"profile": "https://github.com/Rory-Powell",
"contributions": [
"infra",
"test",
"code"
]
}
],
"contributorsPerLine": 7,
"projectName": "budibase",
"projectOwner": "Budibase",
"repoType": "github",
"repoHost": "https://github.com",
"skipCi": true,
"commitConvention": "none"
}

View File

@ -8,3 +8,6 @@ packages/backend-core/coverage
packages/server/client packages/server/client
packages/builder/.routify packages/builder/.routify
packages/sdk/sdk packages/sdk/sdk
packages/account-portal/packages/server/build
packages/account-portal/packages/ui/.routify
packages/account-portal/packages/ui/build

View File

@ -19,6 +19,7 @@
"bundle.js" "bundle.js"
], ],
"extends": ["eslint:recommended"], "extends": ["eslint:recommended"],
"plugins": ["import", "eslint-plugin-local-rules"],
"overrides": [ "overrides": [
{ {
"files": ["**/*.svelte"], "files": ["**/*.svelte"],
@ -30,7 +31,6 @@
"sourceType": "module", "sourceType": "module",
"allowImportExportEverywhere": true "allowImportExportEverywhere": true
} }
}, },
{ {
"files": ["**/*.ts"], "files": ["**/*.ts"],
@ -42,13 +42,25 @@
"no-case-declarations": "off", "no-case-declarations": "off",
"no-useless-escape": "off", "no-useless-escape": "off",
"no-undef": "off", "no-undef": "off",
"no-prototype-builtins": "off" "no-prototype-builtins": "off",
"local-rules/no-budibase-imports": "error"
} }
} }
], ],
"rules": { "rules": {
"no-self-assign": "off", "no-self-assign": "off",
"no-unused-vars": ["error", { "varsIgnorePattern": "^_", "argsIgnorePattern": "^_", "destructuredArrayIgnorePattern": "^_" }] "no-unused-vars": [
"error",
{
"varsIgnorePattern": "^_",
"argsIgnorePattern": "^_",
"destructuredArrayIgnorePattern": "^_"
}
],
"import/no-relative-packages": "error",
"import/export": "error",
"import/no-duplicates": "error",
"import/newline-after-import": "error"
}, },
"globals": { "globals": {
"GeolocationPositionError": true "GeolocationPositionError": true

View File

@ -1,139 +1,45 @@
# Budibase CI Pipelines # Budibase CI Pipelines
Welcome to the budibase CI pipelines directory. This document details what each of the CI pipelines are for, and come common combinations. Welcome to the Budibase CI pipelines directory. This document details what each of the CI pipelines are for, and come common combinations.
## All CI Pipelines ## All CI Pipelines
### Note
- When running workflow dispatch jobs, ensure you always run them off the `master` branch. It defaults to `develop`, so double check before running any jobs. The exception to this case is the `deploy-release` job which requires the develop branch.
### Standard CI Build Job (budibase_ci.yml) ### Standard CI Build Job (budibase_ci.yml)
Triggers: Triggers:
- PR or push to develop
- PR or push to master - PR or push to master
The standard CI Build job is what runs when you raise a PR to develop or master. The standard CI Build job is what runs when you raise a PR to master.
- Installs all dependencies, - Installs all dependencies,
- builds the project - builds the project
- run the unit tests - run the unit tests
- Generate test coverage metrics with codecov - Generate test coverage metrics with codecov
- Run the integration tests - Run the integration tests
- Check that the pro and account portal submodules are pointing to the lastest master head
### Release Develop Job (release-develop.yml) ### Release Job (tag-release.yml)
Triggers: Triggers:
- Push to develop - Manually triggered
The job responsible for building, tagging and pushing docker images out to the test and release environments. This job is responsible for building and pushing all the production services, packages and images. This is done via [budibase-deploys](https://github.com/Budibase/budibase-deploys/actions/workflows/release.yml).
- Installs all dependencies An input is required, indicating if the new version will be a `patch`, `minor` or `major` bump.
- builds the project
- run the unit tests
- publish the budibase JS packages under a prerelease tag to NPM
- build, tag and push docker images under the `develop` tag to docker hub
These images will then be pulled by the test and release environments, updating the latest automatically. Discord notifications are sent to the #infra channel when this occurs. More documentation can be found in here: https://budibase.atlassian.net/wiki/spaces/DEVOPS/pages/347930625/Production+release
### Release Job (release.yml)
Triggers:
- Push to master
This job is responsible for building and pushing the latest code to NPM and docker hub, so that it can be deployed.
- Installs all dependencies
- builds the project
- run the unit tests
- publish the budibase JS packages under a release tag to NPM (always incremented by patch versions)
- build, tag and push docker images under the `v.x.x.x` (the tag of the NPM release) tag to docker hub
### Release Selfhost Job (release-selfhost.yml)
Triggers:
- Manual Workflow Dispatch Trigger
This job is responsible for delivering the latest version of budibase to those that are self-hosting.
This job relies on the release job to have run first, so the latest image is pushed to dockerhub. This job then will pull the latest version from `lerna.json` and try to find an image in dockerhub corresponding to that version. For example, if the version in `lerna.json` is `1.0.0`:
- Pull the images for all budibase services tagged `v1.0.0` from dockerhub
- Tag these images as `latest`
- Push them back to dockerhub. This now means anyone who pulls `latest` (self hosters using docker-compose) will get the latest version.
- Build and release the budibase helm chart for kubernetes users
- Perform a github release with the latest version. You can see previous releases here (https://github.com/Budibase/budibase/releases)
### Deploy Release (deploy-release.yml)
Triggers:
- Manual Workflow Dispatch Trigger
This job is responsible for deploying to our release, cloud kubernetes environment. You must run the release job first, to ensure that the latest images have been built and pushed to docker hub. After kicking off this job, the following will occur:
- Checks out the release branch
- Pulls the latest `values.yaml` from budibase infra, a private repo containing budibases infrastructure configuration
- Gets the latest budibase version from `lerna.json`, if it hasn't been specified in the workflow when you kicked it off
- Configures AWS Credentials
- Deploys the helm chart in the budibase repo to our preproduction EKS cluster, injecting the `values.yaml` we pulled from budibase-infra
- Fires off a discord webhook in the #infra channel to show that the deployment completely successfully.
### Deploy Preprod (deploy-preprod.yml)
Triggers:
- Manual Workflow Dispatch Trigger
This job is responsible for deploying to our preprod, cloud kubernetes environment. You must run the release job first, to ensure that the latest images have been built and pushed to docker hub. After kicking off this job, the following will occur:
- Checks out the master branch
- Pulls the latest `values.yaml` from budibase infra, a private repo containing budibases infrastructure configuration
- Gets the latest budibase version from `lerna.json`, if it hasn't been specified in the workflow when you kicked it off
- Configures AWS Credentials
- Deploys the helm chart in the budibase repo to our preprod EKS cluster, injecting the `values.yaml` we pulled from budibase-infra
- Fires off a discord webhook in the #infra channel to show that the deployment completely successfully.
### Deploy Production (deploy-cloud.yml)
Triggers:
- Manual Workflow Dispatch Trigger
This job is responsible for deploying to our production, cloud kubernetes environment. You must run the release job first, to ensure that the latest images have been built and pushed to docker hub. You can also manually enter a version number for this job, so you can perform rollbacks or upgrade to a specific version. After kicking off this job, the following will occur:
- Checks out the master branch
- Pulls the latest `values.yaml` from budibase infra, a private repo containing budibases infrastructure configuration
- Gets the latest budibase version from `lerna.json`, if it hasn't been specified in the workflow when you kicked it off
- Configures AWS Credentials
- Deploys the helm chart in the budibase repo to our production EKS cluster, injecting the `values.yaml` we pulled from budibase-infra
- Fires off a discord webhook in the #infra channel to show that the deployment completely successfully.
## Common Workflows ## Common Workflows
### Deploy Changes to Production (Release) ### Deploy Changes to Production (Release)
- Merge `develop` into `master` - Merge your changes into `master`
- Wait for budibase CI job and release job to run - Run `tag-release.yml`
- Run cloud deploy job - Check the progress in [budibase-deploys](https://github.com/Budibase/budibase-deploys/actions/workflows/release.yml)
- Run release selfhost job
### Deploy Changes to Production (Hotfix)
- Branch off `master`
- Perform your hotfix
- Merge back into `master`
- Wait for budibase CI job and release job to run
- Run cloud deploy job
- Run release selfhost job
### Rollback A Bad Cloud Deployment ### Rollback A Bad Cloud Deployment
- Kick off cloud deploy job Rollback documentation can be found in here.
- Ensure you are running off master https://budibase.atlassian.net/wiki/spaces/DEVOPS/pages/347930625/Production+release#Rollback
- Enter the version number of the last known good version of budibase. For example `1.0.0`

View File

@ -12,6 +12,13 @@ on:
- master - master
pull_request: pull_request:
workflow_dispatch: workflow_dispatch:
workflow_call:
inputs:
run_as_oss:
type: boolean
required: false
description: Force running checks as if it was an OSS contributor
default: false
env: env:
BRANCH: ${{ github.event.pull_request.head.ref }} BRANCH: ${{ github.event.pull_request.head.ref }}
@ -19,50 +26,41 @@ env:
PERSONAL_ACCESS_TOKEN: ${{ secrets.PERSONAL_ACCESS_TOKEN }} PERSONAL_ACCESS_TOKEN: ${{ secrets.PERSONAL_ACCESS_TOKEN }}
NX_BASE_BRANCH: origin/${{ github.base_ref }} NX_BASE_BRANCH: origin/${{ github.base_ref }}
USE_NX_AFFECTED: ${{ github.event_name == 'pull_request' }} USE_NX_AFFECTED: ${{ github.event_name == 'pull_request' }}
IS_OSS_CONTRIBUTOR: ${{ inputs.run_as_oss == true || (github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name != 'Budibase/budibase') }}
jobs: jobs:
lint: lint:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout repo and submodules - name: Checkout repo
uses: actions/checkout@v3 uses: actions/checkout@v3
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name == 'Budibase/budibase'
with: with:
submodules: true submodules: ${{ env.IS_OSS_CONTRIBUTOR == 'false' }}
token: ${{ secrets.PERSONAL_ACCESS_TOKEN || github.token }} token: ${{ secrets.PERSONAL_ACCESS_TOKEN || github.token }}
- name: Checkout repo only
uses: actions/checkout@v3
if: github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name != 'Budibase/budibase'
- name: Use Node.js 18.x - name: Use Node.js 20.x
uses: actions/setup-node@v3 uses: actions/setup-node@v3
with: with:
node-version: 18.x node-version: 20.x
cache: "yarn" cache: yarn
- run: yarn --frozen-lockfile - run: yarn --frozen-lockfile
- run: yarn lint - run: yarn lint
build: build:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout repo and submodules - name: Checkout repo
uses: actions/checkout@v3 uses: actions/checkout@v3
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name == 'Budibase/budibase'
with: with:
submodules: true submodules: ${{ env.IS_OSS_CONTRIBUTOR == 'false' }}
token: ${{ secrets.PERSONAL_ACCESS_TOKEN || github.token }} token: ${{ secrets.PERSONAL_ACCESS_TOKEN || github.token }}
fetch-depth: 0 fetch-depth: 0
- name: Checkout repo only
uses: actions/checkout@v3
if: github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name != 'Budibase/budibase'
with:
fetch-depth: 0
- name: Use Node.js 18.x - name: Use Node.js 20.x
uses: actions/setup-node@v3 uses: actions/setup-node@v3
with: with:
node-version: 18.x node-version: 20.x
cache: "yarn" cache: yarn
- run: yarn --frozen-lockfile - run: yarn --frozen-lockfile
# Run build all the projects # Run build all the projects
@ -78,27 +76,33 @@ jobs:
yarn check:types yarn check:types
fi fi
test-libraries: helm-lint:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout repo and submodules - name: Checkout repo
uses: actions/checkout@v3 uses: actions/checkout@v3
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name == 'Budibase/budibase'
with:
submodules: true
token: ${{ secrets.PERSONAL_ACCESS_TOKEN || github.token }}
fetch-depth: 0
- name: Checkout repo only
uses: actions/checkout@v3
if: github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name != 'Budibase/budibase'
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Use Node.js 18.x - name: Use Node.js 20.x
uses: azure/setup-helm@v3
- run: cd charts/budibase && helm lint .
test-libraries:
runs-on: ubuntu-latest
steps:
- name: Checkout repo
uses: actions/checkout@v3
with:
submodules: ${{ env.IS_OSS_CONTRIBUTOR == 'false' }}
token: ${{ secrets.PERSONAL_ACCESS_TOKEN || github.token }}
fetch-depth: 0
- name: Use Node.js 20.x
uses: actions/setup-node@v3 uses: actions/setup-node@v3
with: with:
node-version: 18.x node-version: 20.x
cache: "yarn" cache: yarn
- run: yarn --frozen-lockfile - run: yarn --frozen-lockfile
- name: Test - name: Test
run: | run: |
@ -107,33 +111,22 @@ jobs:
else else
yarn test --ignore=@budibase/worker --ignore=@budibase/server --ignore=@budibase/pro yarn test --ignore=@budibase/worker --ignore=@budibase/server --ignore=@budibase/pro
fi fi
- uses: codecov/codecov-action@v3
with:
token: ${{ secrets.CODECOV_TOKEN }} # not required for public repos
name: codecov-umbrella
verbose: true
test-worker: test-worker:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout repo and submodules - name: Checkout repo
uses: actions/checkout@v3 uses: actions/checkout@v3
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name == 'Budibase/budibase'
with: with:
submodules: true submodules: ${{ env.IS_OSS_CONTRIBUTOR == 'false' }}
token: ${{ secrets.PERSONAL_ACCESS_TOKEN || github.token }} token: ${{ secrets.PERSONAL_ACCESS_TOKEN || github.token }}
fetch-depth: 0 fetch-depth: 0
- name: Checkout repo only
uses: actions/checkout@v3
if: github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name != 'Budibase/budibase'
with:
fetch-depth: 0
- name: Use Node.js 18.x - name: Use Node.js 20.x
uses: actions/setup-node@v3 uses: actions/setup-node@v3
with: with:
node-version: 18.x node-version: 20.x
cache: "yarn" cache: yarn
- run: yarn --frozen-lockfile - run: yarn --frozen-lockfile
- name: Test worker - name: Test worker
run: | run: |
@ -143,33 +136,21 @@ jobs:
yarn test --scope=@budibase/worker yarn test --scope=@budibase/worker
fi fi
- uses: codecov/codecov-action@v3
with:
token: ${{ secrets.CODECOV_TOKEN || github.token }} # not required for public repos
name: codecov-umbrella
verbose: true
test-server: test-server:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout repo and submodules - name: Checkout repo
uses: actions/checkout@v3 uses: actions/checkout@v3
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name == 'Budibase/budibase'
with: with:
submodules: true submodules: ${{ env.IS_OSS_CONTRIBUTOR == 'false' }}
token: ${{ secrets.PERSONAL_ACCESS_TOKEN || github.token }} token: ${{ secrets.PERSONAL_ACCESS_TOKEN || github.token }}
fetch-depth: 0 fetch-depth: 0
- name: Checkout repo only
uses: actions/checkout@v3
if: github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name != 'Budibase/budibase'
with:
fetch-depth: 0
- name: Use Node.js 18.x - name: Use Node.js 20.x
uses: actions/setup-node@v3 uses: actions/setup-node@v3
with: with:
node-version: 18.x node-version: 20.x
cache: "yarn" cache: yarn
- run: yarn --frozen-lockfile - run: yarn --frozen-lockfile
- name: Test server - name: Test server
run: | run: |
@ -179,12 +160,6 @@ jobs:
yarn test --scope=@budibase/server yarn test --scope=@budibase/server
fi fi
- uses: codecov/codecov-action@v3
with:
token: ${{ secrets.CODECOV_TOKEN || github.token }} # not required for public repos
name: codecov-umbrella
verbose: true
test-pro: test-pro:
runs-on: ubuntu-latest runs-on: ubuntu-latest
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name == 'Budibase/budibase' if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name == 'Budibase/budibase'
@ -196,11 +171,11 @@ jobs:
token: ${{ secrets.PERSONAL_ACCESS_TOKEN || github.token }} token: ${{ secrets.PERSONAL_ACCESS_TOKEN || github.token }}
fetch-depth: 0 fetch-depth: 0
- name: Use Node.js 18.x - name: Use Node.js 20.x
uses: actions/setup-node@v3 uses: actions/setup-node@v3
with: with:
node-version: 18.x node-version: 20.x
cache: "yarn" cache: yarn
- run: yarn --frozen-lockfile - run: yarn --frozen-lockfile
- name: Test - name: Test
run: | run: |
@ -213,24 +188,23 @@ jobs:
integration-test: integration-test:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout repo and submodules - name: Checkout repo
uses: actions/checkout@v3 uses: actions/checkout@v3
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name == 'Budibase/budibase'
with: with:
submodules: true submodules: ${{ env.IS_OSS_CONTRIBUTOR == 'false' }}
token: ${{ secrets.PERSONAL_ACCESS_TOKEN || github.token }} token: ${{ secrets.PERSONAL_ACCESS_TOKEN || github.token }}
- name: Checkout repo only
uses: actions/checkout@v3
if: github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name != 'Budibase/budibase'
- name: Use Node.js 18.x - name: Use Node.js 20.x
uses: actions/setup-node@v3 uses: actions/setup-node@v3
with: with:
node-version: 18.x node-version: 20.x
cache: "yarn" cache: yarn
- run: yarn --frozen-lockfile - run: yarn --frozen-lockfile
- name: Build packages - name: Build packages
run: yarn build --scope @budibase/server --scope @budibase/worker run: yarn build --scope @budibase/server --scope @budibase/worker
- name: Build backend-core for OSS contributor (required for pro)
if: ${{ env.IS_OSS_CONTRIBUTOR == 'true' }}
run: yarn build --scope @budibase/backend-core
- name: Run tests - name: Run tests
run: | run: |
cd qa-core cd qa-core
@ -242,7 +216,7 @@ jobs:
check-pro-submodule: check-pro-submodule:
runs-on: ubuntu-latest runs-on: ubuntu-latest
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name == 'Budibase/budibase' if: inputs.run_as_oss != true && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name == 'Budibase/budibase')
steps: steps:
- name: Checkout repo and submodules - name: Checkout repo and submodules
uses: actions/checkout@v3 uses: actions/checkout@v3
@ -284,7 +258,57 @@ jobs:
if (submoduleCommit !== baseCommit) { if (submoduleCommit !== baseCommit) {
console.error('Submodule commit does not match the latest commit on the "${{ steps.get_pro_commits.outputs.target_branch }}" branch.'); console.error('Submodule commit does not match the latest commit on the "${{ steps.get_pro_commits.outputs.target_branch }}" branch.');
console.error('Refer to the pro repo to merge your changes: https://github.com/Budibase/budibase-pro/blob/develop/docs/getting_started.md') console.error('Refer to the pro repo to merge your changes: https://github.com/Budibase/budibase-pro/blob/master/docs/getting_started.md')
process.exit(1);
} else {
console.log('All good, the submodule had been merged and setup correctly!')
}
check-accountportal-submodule:
runs-on: ubuntu-latest
if: inputs.run_as_oss != true && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name == 'Budibase/budibase')
steps:
- name: Checkout repo and submodules
uses: actions/checkout@v3
with:
submodules: true
token: ${{ secrets.PERSONAL_ACCESS_TOKEN || github.token }}
fetch-depth: 0
- name: Check account portal commit
id: get_accountportal_commits
run: |
cd packages/account-portal
accountportal_commit=$(git rev-parse HEAD)
branch="${{ github.base_ref || github.ref_name }}"
echo "Running on branch '$branch' (base_ref=${{ github.base_ref }}, ref_name=${{ github.head_ref }})"
base_commit=$(git rev-parse origin/master)
if [[ ! -z $base_commit ]]; then
echo "target_branch=$branch"
echo "target_branch=$branch" >> "$GITHUB_OUTPUT"
echo "accountportal_commit=$accountportal_commit"
echo "accountportal_commit=$accountportal_commit" >> "$GITHUB_OUTPUT"
echo "base_commit=$base_commit"
echo "base_commit=$base_commit" >> "$GITHUB_OUTPUT"
else
echo "Nothing to do - branch to branch merge."
fi
- name: Check submodule merged to base branch
if: ${{ steps.get_accountportal_commits.outputs.base_commit != '' }}
uses: actions/github-script@v4
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: |
const submoduleCommit = '${{ steps.get_accountportal_commits.outputs.accountportal_commit }}';
const baseCommit = '${{ steps.get_accountportal_commits.outputs.base_commit }}';
if (submoduleCommit !== baseCommit) {
console.error('Submodule commit does not match the latest commit on the "${{ steps.get_accountportal_commits.outputs.target_branch }}" branch.');
console.error('Refer to the account portal repo to merge your changes: https://github.com/Budibase/account-portal/blob/master/docs/index.md')
process.exit(1); process.exit(1);
} else { } else {
console.log('All good, the submodule had been merged and setup correctly!') console.log('All good, the submodule had been merged and setup correctly!')

View File

@ -0,0 +1,35 @@
name: OSS contributor checks
on:
workflow_dispatch:
schedule:
- cron: "0 8,16 * * 1-5" # on weekdays at 8am and 4pm
jobs:
run-checks:
name: Publish server and worker docker images
uses: ./.github/workflows/budibase_ci.yml
with:
run_as_oss: true
secrets: inherit
notify-error:
needs: ["run-checks"]
if: ${{ failure() }}
name: Notify error
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Set commit SHA
id: set_sha
run: echo "::set-output name=sha::$(git rev-parse --short ${{ github.sha }})"
- name: Notify error
uses: tsickert/discord-webhook@v5.3.0
with:
webhook-url: ${{ secrets.OSS_CHECKS_WEBHOOK_URL }}
embed-title: 🚨 OSS checks failed in master
embed-url: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"
embed-description: |
Git sha: `${{ steps.set_sha.outputs.sha }}`

View File

@ -2,9 +2,7 @@ name: close-featurebranch
on: on:
pull_request: pull_request:
types: [closed] types: [closed, unlabeled]
branches:
- master
workflow_dispatch: workflow_dispatch:
inputs: inputs:
BRANCH: BRANCH:
@ -14,6 +12,9 @@ on:
jobs: jobs:
release: release:
if: |
(github.event.action == 'closed' && contains(github.event.pull_request.labels.*.name, 'feature-branch')) ||
github.event.label.name == 'feature-branch'
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3

View File

@ -1,48 +0,0 @@
name: Budibase Deploy Production
on:
workflow_dispatch:
inputs:
version:
description: Budibase release version. For example - 1.0.0
required: false
jobs:
release:
runs-on: ubuntu-latest
steps:
- name: Fail if not a tag
run: |
if [[ $GITHUB_REF != refs/tags/* ]]; then
echo "Workflow Dispatch can only be run on tags"
exit 1
fi
- uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Fail if tag is not in master
run: |
if ! git merge-base --is-ancestor ${{ github.sha }} origin/master; then
echo "Tag is not in master. This pipeline can only execute tags that are present on the master branch"
exit 1
fi
- name: Get the latest budibase release version
id: version
run: |
if [ -z "${{ github.event.inputs.version }}" ]; then
release_version=$(cat lerna.json | jq -r '.version')
else
release_version=${{ github.event.inputs.version }}
fi
echo "RELEASE_VERSION=$release_version" >> $GITHUB_ENV
- uses: passeidireto/trigger-external-workflow-action@main
env:
PAYLOAD_VERSION: ${{ env.RELEASE_VERSION }}
with:
repository: budibase/budibase-deploys
event: budicloud-prod-deploy
github_pat: ${{ secrets.GH_ACCESS_TOKEN }}

View File

@ -2,11 +2,19 @@ name: deploy-featurebranch
on: on:
pull_request: pull_request:
branches: types: [
- master labeled,
# default types below (https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#pull_request)
opened,
synchronize,
reopened,
]
jobs: jobs:
release: release:
if: |
(github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name == 'Budibase/budibase') &&
contains(github.event.pull_request.labels.*.name, 'feature-branch')
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3

46
.github/workflows/force-release.yml vendored Normal file
View File

@ -0,0 +1,46 @@
name: Forced release
concurrency:
group: tag-release
cancel-in-progress: false
on:
workflow_dispatch:
jobs:
ensure-is-master-tag:
name: Ensure is a master tag
runs-on: qa-arc-runner-set
steps:
- name: Checkout monorepo
uses: actions/checkout@v4
with:
token: ${{ secrets.PERSONAL_ACCESS_TOKEN || github.token }}
fetch-tags: true
fetch-depth: 0
- name: Fail if ref is not a tag
run: |
if ! git show-ref -q --verify "refs/tags/${{ github.ref_name }}" 2>/dev/null; then
echo "'${{ github.ref_name }}' is not a valid tag."
exit 1
fi
- name: Fail if tag is not in master
run: |
if ! git merge-base --is-ancestor ${{ github.ref_name }} origin/master; then
echo "Tag is not in master. Release can only execute tags that are present on the master branch"
exit 1
fi
trigger-release:
needs: [ensure-is-master-tag]
runs-on: ubuntu-latest
steps:
- uses: peter-evans/repository-dispatch@v2
with:
repository: budibase/budibase-deploys
event-type: release-prod
token: ${{ secrets.GH_ACCESS_TOKEN }}
client-payload: |-
{
"TAG": "${{ github.ref_name }}"
}

View File

@ -1,178 +0,0 @@
name: Budibase Release
concurrency:
group: release
cancel-in-progress: false
on:
push:
tags:
- "[0-9]+.[0-9]+.[0-9]+"
# Exclude all pre-releases
- "!*[0-9]+.[0-9]+.[0-9]+-*"
env:
# Posthog token used by ui at build time
POSTHOG_TOKEN: phc_bIjZL7oh2GEUd2vqvTBH8WvrX0fWTFQMs6H5KQxiUxU
INTERCOM_TOKEN: ${{ secrets.INTERCOM_TOKEN }}
PERSONAL_ACCESS_TOKEN: ${{ secrets.PERSONAL_ACCESS_TOKEN }}
jobs:
release-images:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
with:
submodules: true
token: ${{ secrets.PERSONAL_ACCESS_TOKEN }}
fetch-depth: 0
- name: Fail if tag is not in master
run: |
if ! git merge-base --is-ancestor ${{ github.sha }} origin/master; then
echo "Tag is not in master. This pipeline can only execute tags that are present on the master branch"
exit 1
fi
- uses: actions/setup-node@v1
with:
node-version: 18.x
cache: yarn
- run: yarn install --frozen-lockfile
- name: Update versions
run: ./scripts/updateVersions.sh
- run: yarn lint
- run: yarn build
- run: yarn build:sdk
- name: Publish budibase packages to NPM
env:
NPM_TOKEN: ${{ secrets.NPM_TOKEN }}
run: |
# setup the username and email. I tend to use 'GitHub Actions Bot' with no email by default
git config --global user.name "Budibase Release Bot"
git config --global user.email "<>"
git submodule foreach git commit -a -m 'Release process'
git commit -a -m 'Release process'
echo //registry.npmjs.org/:_authToken=${NPM_TOKEN} >> .npmrc
yarn release
- name: "Get Current tag"
id: currenttag
run: |
version=$(./scripts/getCurrentVersion.sh)
echo "Using tag $version"
echo "version=$version" >> "$GITHUB_OUTPUT"
- name: Setup Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v1
- name: Docker login
run: |
docker login -u $DOCKER_USER -p $DOCKER_PASSWORD
env:
DOCKER_USER: ${{ secrets.DOCKER_USERNAME }}
DOCKER_PASSWORD: ${{ secrets.DOCKER_API_KEY }}
- name: Build worker docker
uses: docker/build-push-action@v5
with:
context: .
push: true
platforms: linux/amd64,linux/arm64
build-args: |
BUDIBASE_VERSION=${{ env.BUDIBASE_VERSION }}
tags: ${{ env.IMAGE_NAME }}:${{ env.IMAGE_TAG }}
file: ./packages/worker/Dockerfile.v2
cache-from: type=registry,ref=${{ env.IMAGE_NAME }}:latest
cache-to: type=inline
env:
IMAGE_NAME: budibase/worker
IMAGE_TAG: ${{ steps.currenttag.outputs.version }}
BUDIBASE_VERSION: ${{ steps.currenttag.outputs.version }}
- name: Build server docker
uses: docker/build-push-action@v5
with:
context: .
push: true
platforms: linux/amd64,linux/arm64
build-args: |
BUDIBASE_VERSION=${{ env.BUDIBASE_VERSION }}
tags: ${{ env.IMAGE_NAME }}:${{ env.IMAGE_TAG }}
file: ./packages/server/Dockerfile.v2
cache-from: type=registry,ref=${{ env.IMAGE_NAME }}:latest
cache-to: type=inline
env:
IMAGE_NAME: budibase/apps
IMAGE_TAG: ${{ steps.currenttag.outputs.version }}
BUDIBASE_VERSION: ${{ steps.currenttag.outputs.version }}
- name: Build proxy docker
uses: docker/build-push-action@v5
with:
context: ./hosting/proxy
push: true
platforms: linux/amd64,linux/arm64
tags: ${{ env.IMAGE_NAME }}:${{ env.IMAGE_TAG }}
file: ./hosting/proxy/Dockerfile
cache-from: type=registry,ref=${{ env.IMAGE_NAME }}:latest
cache-to: type=inline
env:
IMAGE_NAME: budibase/proxy
IMAGE_TAG: ${{ steps.currenttag.outputs.version }}
release-helm-chart:
needs: [release-images]
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Setup Helm
uses: azure/setup-helm@v1
id: helm-install
- name: Get the latest budibase release version
id: version
run: |
release_version=$(cat lerna.json | jq -r '.version')
echo "RELEASE_VERSION=$release_version" >> $GITHUB_ENV
# due to helm repo index issue: https://github.com/helm/helm/issues/7363
# we need to create new package in a different dir, merge the index and move the package back
- name: Build and release helm chart
run: |
git config user.name "Budibase Helm Bot"
git config user.email "<>"
git reset --hard
git fetch
mkdir sync
echo "Packaging chart to sync dir"
helm package charts/budibase --version 0.0.0-master --app-version "$RELEASE_VERSION" --destination sync
echo "Packaging successful"
git checkout gh-pages
echo "Indexing helm repo"
helm repo index --merge docs/index.yaml sync
mv -f sync/* docs
rm -rf sync
echo "Pushing new helm release"
git add -A
git commit -m "Helm Release: ${{ env.RELEASE_VERSION }}"
git push
trigger-deploy-to-qa-env:
needs: [release-helm-chart]
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: peter-evans/repository-dispatch@v2
with:
repository: budibase/budibase-deploys
event-type: budicloud-qa-deploy
token: ${{ secrets.GH_ACCESS_TOKEN }}
client-payload: |-
{
"VERSION": "${{ github.ref_name }}",
"REF_NAME": "${{ github.ref_name}}"
}

View File

@ -1,125 +0,0 @@
name: Budibase Release Selfhost
on:
workflow_dispatch:
jobs:
release:
runs-on: ubuntu-latest
steps:
- name: Fail if not a tag
run: |
if [[ $GITHUB_REF != refs/tags/* ]]; then
echo "Workflow Dispatch can only be run on tags"
exit 1
fi
- uses: actions/checkout@v2
with:
submodules: true
token: ${{ secrets.PERSONAL_ACCESS_TOKEN }}
fetch-depth: 0
- name: Fail if tag is not in master
run: |
if ! git merge-base --is-ancestor ${{ github.sha }} origin/master; then
echo "Tag is not in master. This pipeline can only execute tags that are present on the master branch"
exit 1
fi
- name: Use Node.js 18.x
uses: actions/setup-node@v1
with:
node-version: 18.x
- name: Get the latest budibase release version
id: version
run: |
release_version=$(cat lerna.json | jq -r '.version')
echo "RELEASE_VERSION=$release_version" >> $GITHUB_ENV
- name: Tag and release Docker images (Self Host)
run: |
docker login -u $DOCKER_USER -p $DOCKER_PASSWORD
release_tag=${{ env.RELEASE_VERSION }}
# Pull apps and worker images
docker pull budibase/apps:$release_tag
docker pull budibase/worker:$release_tag
docker pull budibase/proxy:$release_tag
# Tag apps and worker images
docker tag budibase/apps:$release_tag budibase/apps:$SELFHOST_TAG
docker tag budibase/worker:$release_tag budibase/worker:$SELFHOST_TAG
docker tag budibase/proxy:$release_tag budibase/proxy:$SELFHOST_TAG
# Push images
docker push budibase/apps:$SELFHOST_TAG
docker push budibase/worker:$SELFHOST_TAG
docker push budibase/proxy:$SELFHOST_TAG
env:
DOCKER_USER: ${{ secrets.DOCKER_USERNAME }}
DOCKER_PASSWORD: ${{ secrets.DOCKER_API_KEY }}
SELFHOST_TAG: latest
- name: Bootstrap and build (CLI)
run: |
yarn
yarn build
- name: Build OpenAPI spec
run: |
pushd packages/server
yarn
yarn specs
popd
- name: Setup Helm
uses: azure/setup-helm@v1
id: helm-install
# due to helm repo index issue: https://github.com/helm/helm/issues/7363
# we need to create new package in a different dir, merge the index and move the package back
- name: Build and release helm chart
run: |
git config user.name "Budibase Helm Bot"
git config user.email "<>"
git reset --hard
git fetch
mkdir sync
echo "Packaging chart to sync dir"
helm package charts/budibase --version "$RELEASE_VERSION" --app-version "$RELEASE_VERSION" --destination sync
echo "Packaging successful"
git checkout gh-pages
echo "Indexing helm repo"
helm repo index --merge docs/index.yaml sync
mv -f sync/* docs
rm -rf sync
echo "Pushing new helm release"
git add -A
git commit -m "Helm Release: ${{ env.RELEASE_VERSION }}"
git push
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Perform Github Release
uses: softprops/action-gh-release@v1
with:
name: ${{ env.RELEASE_VERSION }}
tag_name: ${{ env.RELEASE_VERSION }}
generate_release_notes: true
files: |
packages/cli/build/cli-win.exe
packages/cli/build/cli-linux
packages/cli/build/cli-macos
packages/server/specs/openapi.yaml
packages/server/specs/openapi.json
- name: Discord Webhook Action
uses: tsickert/discord-webhook@v4.0.0
with:
webhook-url: ${{ secrets.PROD_DEPLOY_WEBHOOK_URL }}
content: "Self Host Deployment Complete: ${{ env.RELEASE_VERSION }} deployed to Self Host."
embed-title: ${{ env.RELEASE_VERSION }}

View File

@ -1,86 +0,0 @@
name: Deploy Budibase Single Container Image to DockerHub
on:
workflow_dispatch:
env:
CI: true
PERSONAL_ACCESS_TOKEN: ${{ secrets.PERSONAL_ACCESS_TOKEN }}
REGISTRY_URL: registry.hub.docker.com
jobs:
build:
name: "build"
runs-on: ubuntu-latest
strategy:
matrix:
node-version: [18.x]
steps:
- name: Maximize build space
uses: easimon/maximize-build-space@master
with:
root-reserve-mb: 30000
swap-size-mb: 1024
remove-android: "true"
remove-dotnet: "true"
- name: Fail if not a tag
run: |
if [[ $GITHUB_REF != refs/tags/* ]]; then
echo "Workflow Dispatch can only be run on tags"
exit 1
fi
- name: "Checkout"
uses: actions/checkout@v2
with:
submodules: true
token: ${{ secrets.PERSONAL_ACCESS_TOKEN }}
- name: Use Node.js ${{ matrix.node-version }}
uses: actions/setup-node@v1
with:
node-version: ${{ matrix.node-version }}
- name: Setup QEMU
uses: docker/setup-qemu-action@v1
- name: Setup Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v1
- name: Run Yarn
run: yarn
- name: Update versions
run: ./scripts/updateVersions.sh
- name: Run Yarn Build
run: yarn build
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_API_KEY }}
- name: Get the latest release version
id: version
run: |
release_version=$(cat lerna.json | jq -r '.version')
echo $release_version
echo "RELEASE_VERSION=$release_version" >> $GITHUB_ENV
- name: Tag and release Budibase service docker image
uses: docker/build-push-action@v2
with:
context: .
push: true
platforms: linux/amd64,linux/arm64
build-args: BUDIBASE_VERSION=${{ env.BUDIBASE_VERSION }}
tags: budibase/budibase,budibase/budibase:${{ env.RELEASE_VERSION }}
file: ./hosting/single/Dockerfile.v2
env:
BUDIBASE_VERSION: ${{ env.RELEASE_VERSION }}
- name: Tag and release Budibase Azure App Service docker image
uses: docker/build-push-action@v2
with:
context: .
push: true
platforms: linux/amd64
build-args: |
TARGETBUILD=aas
BUDIBASE_VERSION=${{ env.BUDIBASE_VERSION }}
tags: budibase/budibase-aas,budibase/budibase-aas:${{ env.RELEASE_VERSION }}
file: ./hosting/single/Dockerfile.v2
env:
BUDIBASE_VERSION: ${{ env.RELEASE_VERSION }}

View File

@ -10,13 +10,14 @@ jobs:
steps: steps:
- uses: actions/stale@v8 - uses: actions/stale@v8
with: with:
days-before-stale: 330
operations-per-run: 1 operations-per-run: 1
# stale rules for PRs # stale rules for PRs
days-before-pr-stale: 7 days-before-pr-stale: 7
stale-issue-label: stale stale-issue-label: stale
exempt-pr-labels: pinned,security,roadmap exempt-pr-labels: pinned,security,roadmap
days-before-pr-close: 7 days-before-pr-close: 7
days-before-issue-close: 30
- uses: actions/stale@v8 - uses: actions/stale@v8
with: with:
@ -25,6 +26,7 @@ jobs:
days-before-stale: 30 days-before-stale: 30
only-issue-labels: bug,High priority only-issue-labels: bug,High priority
stale-issue-label: warn stale-issue-label: warn
days-before-close: 30
- uses: actions/stale@v8 - uses: actions/stale@v8
with: with:
@ -33,6 +35,7 @@ jobs:
days-before-stale: 90 days-before-stale: 90
only-issue-labels: bug,Medium priority only-issue-labels: bug,Medium priority
stale-issue-label: warn stale-issue-label: warn
days-before-close: 30
- uses: actions/stale@v8 - uses: actions/stale@v8
with: with:
@ -42,5 +45,4 @@ jobs:
stale-issue-label: stale stale-issue-label: stale
only-issue-labels: bug only-issue-labels: bug
stale-issue-message: "This issue has been automatically marked as stale because it has not had any activity for six months." stale-issue-message: "This issue has been automatically marked as stale because it has not had any activity for six months."
days-before-close: 30 days-before-close: 30

6
.gitignore vendored
View File

@ -1,4 +1,3 @@
builder/*
.data/ .data/
.temp/ .temp/
packages/server/runtime_apps/ packages/server/runtime_apps/
@ -41,8 +40,11 @@ bower_components
build/Release build/Release
# Dependency directories # Dependency directories
/node_modules/
jspm_packages/ jspm_packages/
*.min.js
*.map
node_modules/
dist/
# TypeScript v1 declaration files # TypeScript v1 declaration files
typings/ typings/

3
.gitmodules vendored
View File

@ -1,3 +1,6 @@
[submodule "packages/pro"] [submodule "packages/pro"]
path = packages/pro path = packages/pro
url = git@github.com:Budibase/budibase-pro.git url = git@github.com:Budibase/budibase-pro.git
[submodule "packages/account-portal"]
path = packages/account-portal
url = git@github.com:Budibase/account-portal.git

2
.nvmrc
View File

@ -1 +1 @@
v18.17.0 v20.10.0

View File

@ -1,13 +1,14 @@
node_modules node_modules
dist dist
*.spec.js
packages/builder/src/components/design/AppPreview/CurrentItemPreview.svelte
packages/server/builder packages/server/builder
packages/server/coverage packages/server/coverage
packages/worker/coverage
packages/backend-core/coverage
packages/server/client packages/server/client
packages/server/src/definitions/openapi.ts packages/server/src/definitions/openapi.ts
packages/worker/coverage
packages/backend-core/coverage
packages/builder/.routify packages/builder/.routify
packages/sdk/sdk packages/sdk/sdk
packages/pro/coverage packages/pro/coverage
packages/account-portal/packages/ui/build
packages/account-portal/packages/ui/.routify
packages/account-portal/packages/server/build

View File

@ -1,3 +1,3 @@
nodejs 18.17.0 nodejs 20.10.0
python 3.10.0 python 3.10.0
yarn 1.22.19 yarn 1.22.19

View File

@ -1,7 +1,7 @@
{ {
"editor.formatOnSave": true, "editor.formatOnSave": true,
"editor.codeActionsOnSave": { "editor.codeActionsOnSave": {
"source.fixAll": true "source.fixAll": "explicit"
}, },
"editor.defaultFormatter": "esbenp.prettier-vscode", "editor.defaultFormatter": "esbenp.prettier-vscode",
"[json]": { "[json]": {

3
CODEOWNERS Normal file
View File

@ -0,0 +1,3 @@
/packages/server @Budibase/backend
/packages/worker @Budibase/backend
/packages/backend-core @Budibase/backend

View File

@ -1,7 +1,9 @@
Copyright 2019-2021, Budibase Ltd. Copyright 2019-2023, Budibase Ltd.
Each Budibase package has its own license, please check the license file in each package. Each Budibase package has its own license, please check the license file in each package.
You can consider Budibase to be GPLv3 licensed overall. You can consider Budibase to be GPLv3 licensed overall.
The apps that you build with Budibase do not package any GPLv3 licensed code, thus do not fall under those restrictions. The apps that you build with Budibase do not package any GPLv3 licensed code, thus do not fall under those restrictions.
Budibase ships with Structured Query Server, by The Neighbourhoodie Software GmbH. This license for this can be found at ./SQS_LICENSE

31
SQS_LICENSE Normal file
View File

@ -0,0 +1,31 @@
FORM OF CUSTOMER LICENCE
Budibase hereby grants the Customer a worldwide, royalty free, non-exclusive,
perpetual (for the lifetime of the intellectual property rights contained in the Product)
right and title to utilise the binary code of the The Neighbourhoodie Software GmbH
Structured Query Server software product (Product) for its own internal business
purposes (the Purpose) only (the Licence). The Product has the function of bringing a
CouchDB database (NoSQL database) into an SQL database form (SQLite) and thereby
making it usable for complex queries - which originally could only be displayed in an
SQL database. By indexing in SQLite and a server that is tailored to it, the Product
enables the use of CouchDB with SQL queries.
The Licence shall not permit sub-licensing, resale or transfer of the Product to third
parties, other than sub-licensing to the Customers direct contractors for the purposes
of utilizing the Product as contemplated above.
The Licence shall not permit the adaptation, modification, decompilation, reverse
engineering or similar activities with respect to the Product.
This licence is granted to the Customer only, although Customer and its Affiliates
employees, servants and agents shall be entitled to utilize the Product within the scope
of the Licence for the Customers Purpose only.
Reproduction is not permitted to users, except for reproductions that are necessary for
the use of the product under the licence described above. These conditions apply to the
product regardless of the form in which we make the product available and on which
devices it is installed and/or with which devices it is ultimately used. Depending on the
product variant or intended use, certain technical requirements in the IT infrastructure
must be satisfied as a prerequisite for use.
The law of the Northern Ireland applies exclusively to this licence, and the courts of
Northern Ireland shall have exclusive jurisdiction, save that we reserve a right to sue
you in the jurisdiction in which you are based. The application of the UN Sales
Convention (CISG) is excluded.
The invalidity of any part of this licence does not affect the validity of the remaining
regulations.

View File

@ -1,9 +1,6 @@
dependencies: dependencies:
- name: couchdb - name: couchdb
repository: https://apache.github.io/couchdb-helm repository: https://apache.github.io/couchdb-helm
version: 3.3.4 version: 4.3.0
- name: ingress-nginx digest: sha256:94449a7f195b186f5af33ec5aa66d58b36bede240fae710f021ca87837b30606
repository: https://kubernetes.github.io/ingress-nginx generated: "2023-11-20T17:43:02.777596Z"
version: 4.0.13
digest: sha256:20892705c2d8e64c98257d181063a514ac55013e2b43399a6e54868a97f97845
generated: "2021-12-30T18:55:30.878411Z"

View File

@ -17,10 +17,6 @@ version: 0.0.0
appVersion: 0.0.0 appVersion: 0.0.0
dependencies: dependencies:
- name: couchdb - name: couchdb
version: 3.3.4 version: 4.3.0
repository: https://apache.github.io/couchdb-helm repository: https://apache.github.io/couchdb-helm
condition: services.couchdb.enabled condition: services.couchdb.enabled
- name: ingress-nginx
version: 4.0.13
repository: https://kubernetes.github.io/ingress-nginx
condition: ingress.nginx

View File

@ -1,39 +1,228 @@
# Budibase # budibase
[Budibase](https://budibase.com/) Budibase is an open source low-code platform, helping thousands of teams build apps for their workplace in minutes. Budibase is an open source low-code platform, helping thousands of teams build apps for their workplace in minutes.
## TL;DR;
```console
$ cd chart
$ helm install budibase .
```
## Introduction
This chart bootstraps a [Budibase](https://budibase.com/) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
## Prerequisites ## Prerequisites
- helm v3 or above - `helm` v3 or above
- Kubernetes 1.4+ - Kubernetes 1.4+
- PV provisioner support in the underlying infrastructure (with persistence storage enabled) - A storage controller (if you want to use persistent storage)
- An ingress controller (if you want to define an `Ingress` resource)
- `metrics-server` (if you want to make use of horizontal pod autoscaling)
## Installing the Chart ## Chart dependencies
To install the chart with the release name `budi-release`: This chart depends on the official Apache CouchDB chart. You can see its
documentation here:
<https://github.com/apache/couchdb-helm/tree/couchdb-4.3.0/couchdb>.
## Upgrading
### `2.x` to `3.0.0`
We made a number of breaking changes in this release to make the chart more
idiomatic and easier to use.
1. We no longer bundle `ingress-nginx`. If you were relying on this to supply
an ingress controller to your cluster, you will now need to deploy that
separately. You'll find guidance for that here:
<https://kubernetes.github.io/ingress-nginx/>.
2. We've upgraded the version of the [CouchDB chart](https://github.com/apache/couchdb-helm)
we use from `3.3.4` to `4.3.0`. The primary motivation for this was to align
the CouchDB chart used with the CouchDB version used, which has also updated
from 3.1.1 to 3.2.1. Additionally, we're moving away from the official CouchDB
to one we're building ourselves.
3. We've separated out the supplied AWS ALB ingress resource for those deploying
into EKS. Where previously you enabled this by setting `ingress.enabled: false`
and `ingress.aws: true`, you now set `awsAlbIngress.enabled: true` and all
configuration for it is under `awsAlbIngress`.
4. The `HorizontalPodAutoscaler` that was configured at `hpa.enabled: true` has
been split into 3 separate HPAs, one for each of `apps`, `worker`, and `proxy`.
They are configured at `services.{apps,worker,proxy}.autoscaling`.
## Installing
To install the chart from our repository:
```console ```console
$ helm install budi-release . $ helm repo add budibase https://budibase.github.io/budibase/
$ helm repo update
$ helm install --create-namespace --namespace budibase budibase budibase/budibase
``` ```
The command deploys Budibase on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. To install the chart from this repo:
> **Tip**: List all releases using `helm list`
## Uninstalling the Chart
To uninstall/delete the `my-release` deployment:
```console ```console
$ helm delete my-release $ git clone git@github.com:budibase/budibase.git
$ cd budibase/charts/budibase
$ helm install --create-namespace --namespace budibase budibase .
``` ```
## Example minimal configuration
Here's an example `values.yaml` that would get a Budibase instance running in a home
cluster using an nginx ingress controller and NFS as cluster storage (basically one of our
staff's homelabs).
<details>
```yaml
ingress:
enabled: true
className: "nginx"
hosts:
- host: budibase.local # set this to whatever DNS name you'd use
paths:
- backend:
service:
name: proxy-service
port:
number: 10000
path: /
pathType: Prefix
couchdb:
persistentVolume:
enabled: true
storageClass: "nfs-client"
adminPassword: admin
services:
objectStore:
storageClass: "nfs-client"
redis:
storageClass: "nfs-client"
```
If you wanted to use this when bringing up Budibase in your own cluster, you could save it
to your hard disk and run the following:
```console
$ helm install --create-namespace --namespace budibase budibase . -f values.yaml
```
</details>
## Configuring
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| affinity | object | `{}` | Sets the affinity for all pods created by this chart. Should not ordinarily need to be changed. See <https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes-using-node-affinity/> for more information on affinity. |
| awsAlbIngress.certificateArn | string | `""` | If you're wanting to use HTTPS, you'll need to create an ACM certificate and specify the ARN here. |
| awsAlbIngress.enabled | bool | `false` | Whether to create an ALB Ingress resource pointing to the Budibase proxy. Requires the AWS ALB Ingress Controller. |
| couchdb.clusterSize | int | `1` | The number of replicas to run in the CouchDB cluster. We set this to 1 by default to make things simpler, but you can set it to 3 if you need a high-availability CouchDB cluster. |
| couchdb.couchdbConfig.couchdb.uuid | string | `"budibase-couchdb"` | Unique identifier for this CouchDB server instance. You shouldn't need to change this. |
| couchdb.image | object | `{}` | We use a custom CouchDB image for running Budibase and we don't support using any other CouchDB image. You shouldn't change this, and if you do we can't guarantee that Budibase will work. |
| globals.apiEncryptionKey | string | `""` | Used for encrypting API keys and environment variables when stored in the database. You don't need to set this if `createSecrets` is true. |
| globals.appVersion | string | `""` | The version of Budibase to deploy. Defaults to what's specified by {{ .Chart.AppVersion }}. Ends up being used as the image version tag for the apps, proxy, and worker images. |
| globals.automationMaxIterations | string | `"200"` | The maximum number of iterations allows for an automation loop step. You can read more about looping here: <https://docs.budibase.com/docs/looping>. |
| globals.budibaseEnv | string | `"PRODUCTION"` | Sets the environment variable BUDIBASE_ENVIRONMENT for the apps and worker pods. Should not ordinarily need to be changed. |
| globals.cookieDomain | string | `""` | Sets the domain attribute of the cookie that Budibase uses to store session information. See <https://developer.mozilla.org/en-US/docs/Web/HTTP/Cookies#define_where_cookies_are_sent> for details on why you might want to set this. |
| globals.createSecrets | bool | `true` | Create an internal API key, JWT secret, object store access key and secret, and store them in a Kubernetes `Secret`. |
| globals.enableAnalytics | string | `"1"` | Whether to enable analytics or not. You can read more about our analytics here: <https://docs.budibase.com/docs/analytics>. |
| globals.google | object | `{"clientId":"","secret":""}` | Google OAuth settings. These can also be set in the Budibase UI, see <https://docs.budibase.com/docs/sso-with-google> for details. |
| globals.google.clientId | string | `""` | Client ID of your Google OAuth app. |
| globals.google.secret | string | `""` | Client secret of your Google OAuth app. |
| globals.httpMigrations | string | `"0"` | Whether or not to enable doing data migrations over the HTTP API. If this is set to "0", migrations are run on startup. You shouldn't ordinarily need to change this. |
| globals.internalApiKey | string | `""` | API key used for internal Budibase API calls. You don't need to set this if `createSecrets` is true. |
| globals.internalApiKeyFallback | string | `""` | A fallback value for `internalApiKey`. If you're rotating your encryption key, you can set this to the old value for the duration of the rotation. |
| globals.jwtSecret | string | `""` | Secret used for signing JWTs. You don't need to set this if `createSecrets` is true. |
| globals.jwtSecretFallback | string | `""` | A fallback value for `jwtSecret`. If you're rotating your JWT secret, you can set this to the old value for the duration of the rotation. |
| globals.platformUrl | string | `""` | Set the `platformUrl` binding. You can also do this in Settings > Organisation if you are self-hosting. |
| globals.smtp.enabled | bool | `false` | Whether to enable SMTP or not. |
| globals.smtp.from | string | `""` | The email address to use in the "From:" field of emails sent by Budibase. |
| globals.smtp.host | string | `""` | The hostname of your SMTP server. |
| globals.smtp.password | string | `""` | The password to use when authenticating with your SMTP server. |
| globals.smtp.port | string | `"587"` | The port of your SMTP server. |
| globals.smtp.user | string | `""` | The username to use when authenticating with your SMTP server. |
| globals.tenantFeatureFlags | string | `"*:LICENSING,*:USER_GROUPS,*:ONBOARDING_TOUR"` | Sets what feature flags are enabled and for which tenants. Should not ordinarily need to be changed. |
| imagePullSecrets | list | `[]` | Passed to all pods created by this chart. Should not ordinarily need to be changed. |
| ingress.className | string | `""` | What ingress class to use. |
| ingress.enabled | bool | `true` | Whether to create an Ingress resource pointing to the Budibase proxy. |
| ingress.hosts | list | `[]` | Standard hosts block for the Ingress resource. Defaults to pointing to the Budibase proxy. |
| nameOverride | string | `""` | Override the name of the deploymen. Defaults to {{ .Chart.Name }}. |
| service.port | int | `10000` | Port to expose on the service. |
| service.type | string | `"ClusterIP"` | Service type for the service that points to the main Budibase proxy pod. |
| serviceAccount.annotations | object | `{}` | Annotations to add to the service account |
| serviceAccount.create | bool | `true` | Specifies whether a service account should be created |
| serviceAccount.name | string | `""` | The name of the service account to use. If not set and create is true, a name is generated using the fullname template |
| services.apps.autoscaling.enabled | bool | `false` | Whether to enable horizontal pod autoscaling for the apps service. |
| services.apps.autoscaling.maxReplicas | int | `10` | |
| services.apps.autoscaling.minReplicas | int | `1` | |
| services.apps.autoscaling.targetCPUUtilizationPercentage | int | `80` | Target CPU utilization percentage for the apps service. Note that for autoscaling to work, you will need to have metrics-server configured, and resources set for the apps pods. |
| services.apps.httpLogging | int | `1` | Whether or not to log HTTP requests to the apps service. |
| services.apps.livenessProbe | object | HTTP health checks. | Liveness probe configuration for apps pods. You shouldn't need to change this, but if you want to you can find more information here: <https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/> |
| services.apps.logLevel | string | `"info"` | The log level for the apps service. |
| services.apps.readinessProbe | object | HTTP health checks. | Readiness probe configuration for apps pods. You shouldn't need to change this, but if you want to you can find more information here: <https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/> |
| services.apps.replicaCount | int | `1` | The number of apps replicas to run. |
| services.apps.resources | object | `{}` | The resources to use for apps pods. See <https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/> for more information on how to set these. |
| services.apps.startupProbe | object | HTTP health checks. | Startup probe configuration for apps pods. You shouldn't need to change this, but if you want to you can find more information here: <https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/> |
| services.automationWorkers.autoscaling.enabled | bool | `false` | Whether to enable horizontal pod autoscaling for the apps service. |
| services.automationWorkers.autoscaling.maxReplicas | int | `10` | |
| services.automationWorkers.autoscaling.minReplicas | int | `1` | |
| services.automationWorkers.autoscaling.targetCPUUtilizationPercentage | int | `80` | Target CPU utilization percentage for the automation worker service. Note that for autoscaling to work, you will need to have metrics-server configured, and resources set for the automation worker pods. |
| services.automationWorkers.enabled | bool | `true` | Whether or not to enable the automation worker service. If you disable this, automations will be processed by the apps service. |
| services.automationWorkers.livenessProbe | object | HTTP health checks. | Liveness probe configuration for automation worker pods. You shouldn't need to change this, but if you want to you can find more information here: <https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/> |
| services.automationWorkers.logLevel | string | `"info"` | The log level for the automation worker service. |
| services.automationWorkers.readinessProbe | object | HTTP health checks. | Readiness probe configuration for automation worker pods. You shouldn't need to change this, but if you want to you can find more information here: <https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/> |
| services.automationWorkers.replicaCount | int | `1` | The number of automation worker replicas to run. |
| services.automationWorkers.resources | object | `{}` | The resources to use for automation worker pods. See <https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/> for more information on how to set these. |
| services.automationWorkers.startupProbe | object | HTTP health checks. | Startup probe configuration for automation worker pods. You shouldn't need to change this, but if you want to you can find more information here: <https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/> |
| services.couchdb.backup.enabled | bool | `false` | Whether or not to enable periodic CouchDB backups. This works by replicating to another CouchDB instance. |
| services.couchdb.backup.interval | string | `""` | Backup interval in seconds |
| services.couchdb.backup.resources | object | `{}` | The resources to use for CouchDB backup pods. See <https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/> for more information on how to set these. |
| services.couchdb.backup.target | string | `""` | Target couchDB instance to back up to, either a hostname or an IP address. |
| services.couchdb.enabled | bool | `true` | Whether or not to spin up a CouchDB instance in your cluster. True by default, and the configuration for the CouchDB instance is under the `couchdb` key at the root of this file. You can see what options are available to you by looking at the official CouchDB Helm chart: <https://github.com/apache/couchdb-helm/tree/couchdb-4.3.0/couchdb>. |
| services.couchdb.port | int | `5984` | |
| services.dns | string | `"cluster.local"` | The DNS suffix to use for service discovery. You only need to change this if you've configured your cluster to use a different DNS suffix. |
| services.objectStore.accessKey | string | `""` | AWS_ACCESS_KEY if using S3 |
| services.objectStore.browser | bool | `true` | Whether to enable the Minio web console or not. If you're exposing Minio to the Internet (via a custom Ingress record, for example), you should set this to false. If you're only exposing Minio to your cluster, you can leave this as true. |
| services.objectStore.cloudfront.cdn | string | `""` | Set the url of a distribution to enable cloudfront. |
| services.objectStore.cloudfront.privateKey64 | string | `""` | Base64 encoded private key for the above public key. |
| services.objectStore.cloudfront.publicKeyId | string | `""` | ID of public key stored in cloudfront. |
| services.objectStore.minio | bool | `true` | Set to false if using another object store, such as S3. You will need to set `services.objectStore.url` to point to your bucket if you do this. |
| services.objectStore.region | string | `""` | AWS_REGION if using S3 |
| services.objectStore.resources | object | `{}` | The resources to use for Minio pods. See <https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/> for more information on how to set these. |
| services.objectStore.secretKey | string | `""` | AWS_SECRET_ACCESS_KEY if using S3 |
| services.objectStore.storage | string | `"100Mi"` | How much storage to give Minio in its PersistentVolumeClaim. |
| services.objectStore.storageClass | string | `""` | If defined, storageClassName: <storageClass> If set to "-", storageClassName: "", which disables dynamic provisioning If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner. |
| services.objectStore.url | string | `"http://minio-service:9000"` | URL to use for object storage. Only change this if you're using an external object store, such as S3. Remember to set `minio: false` if you do this. |
| services.proxy.autoscaling.enabled | bool | `false` | Whether to enable horizontal pod autoscaling for the proxy service. |
| services.proxy.autoscaling.maxReplicas | int | `10` | |
| services.proxy.autoscaling.minReplicas | int | `1` | |
| services.proxy.autoscaling.targetCPUUtilizationPercentage | int | `80` | Target CPU utilization percentage for the proxy service. Note that for autoscaling to work, you will need to have metrics-server configured, and resources set for the proxy pods. |
| services.proxy.livenessProbe | object | HTTP health checks. | Liveness probe configuration for proxy pods. You shouldn't need to change this, but if you want to you can find more information here: <https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/> |
| services.proxy.readinessProbe | object | HTTP health checks. | Readiness probe configuration for proxy pods. You shouldn't need to change this, but if you want to you can find more information here: <https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/> |
| services.proxy.replicaCount | int | `1` | The number of proxy replicas to run. |
| services.proxy.resources | object | `{}` | The resources to use for proxy pods. See <https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/> for more information on how to set these. |
| services.proxy.startupProbe | object | HTTP health checks. | Startup probe configuration for proxy pods. You shouldn't need to change this, but if you want to you can find more information here: <https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/> |
| services.redis.enabled | bool | `true` | Whether or not to deploy a Redis pod into your cluster. |
| services.redis.password | string | `"budibase"` | The password to use when connecting to Redis. It's recommended that you change this from the default if you're running Redis in-cluster. |
| services.redis.port | int | `6379` | Port to expose Redis on. |
| services.redis.resources | object | `{}` | The resources to use for Redis pods. See <https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/> for more information on how to set these. |
| services.redis.storage | string | `"100Mi"` | How much persistent storage to allocate to Redis. |
| services.redis.storageClass | string | `""` | If defined, storageClassName: <storageClass> If set to "-", storageClassName: "", which disables dynamic provisioning If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner. |
| services.redis.url | string | `""` | If you choose to run Redis externally to this chart, you can specify the connection details here. |
| services.worker.autoscaling.enabled | bool | `false` | Whether to enable horizontal pod autoscaling for the worker service. |
| services.worker.autoscaling.maxReplicas | int | `10` | |
| services.worker.autoscaling.minReplicas | int | `1` | |
| services.worker.autoscaling.targetCPUUtilizationPercentage | int | `80` | Target CPU utilization percentage for the worker service. Note that for autoscaling to work, you will need to have metrics-server configured, and resources set for the worker pods. |
| services.worker.httpLogging | int | `1` | Whether or not to log HTTP requests to the worker service. |
| services.worker.livenessProbe | object | HTTP health checks. | Liveness probe configuration for worker pods. You shouldn't need to change this, but if you want to you can find more information here: <https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/> |
| services.worker.logLevel | string | `"info"` | The log level for the worker service. |
| services.worker.readinessProbe | object | HTTP health checks. | Readiness probe configuration for worker pods. You shouldn't need to change this, but if you want to you can find more information here: <https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/> |
| services.worker.replicaCount | int | `1` | The number of worker replicas to run. |
| services.worker.resources | object | `{}` | The resources to use for worker pods. See <https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/> for more information on how to set these. |
| services.worker.startupProbe | object | HTTP health checks. | Startup probe configuration for worker pods. You shouldn't need to change this, but if you want to you can find more information here: <https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/> |
| tolerations | list | `[]` | Sets the tolerations for all pods created by this chart. Should not ordinarily need to be changed. See <https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/> for more information on tolerations. |
## Uninstalling
To uninstall the chart, assuming you named the release `budibase` (both commands in the installation section do so):
```console
$ helm uninstall --namespace budibase budibase
```
----------------------------------------------
Autogenerated from chart metadata using [helm-docs v1.11.3](https://github.com/norwoodj/helm-docs/releases/v1.11.3)

View File

@ -0,0 +1,117 @@
{{ template "chart.header" . }}
{{ template "chart.description" . }}
## Prerequisites
- `helm` v3 or above
- Kubernetes 1.4+
- A storage controller (if you want to use persistent storage)
- An ingress controller (if you want to define an `Ingress` resource)
- `metrics-server` (if you want to make use of horizontal pod autoscaling)
## Chart dependencies
This chart depends on the official Apache CouchDB chart. You can see its
documentation here:
<https://github.com/apache/couchdb-helm/tree/couchdb-4.3.0/couchdb>.
## Upgrading
### `2.x` to `3.0.0`
We made a number of breaking changes in this release to make the chart more
idiomatic and easier to use.
1. We no longer bundle `ingress-nginx`. If you were relying on this to supply
an ingress controller to your cluster, you will now need to deploy that
separately. You'll find guidance for that here:
<https://kubernetes.github.io/ingress-nginx/>.
2. We've upgraded the version of the [CouchDB chart](https://github.com/apache/couchdb-helm)
we use from `3.3.4` to `4.3.0`. The primary motivation for this was to align
the CouchDB chart used with the CouchDB version used, which has also updated
from 3.1.1 to 3.2.1. Additionally, we're moving away from the official CouchDB
to one we're building ourselves.
3. We've separated out the supplied AWS ALB ingress resource for those deploying
into EKS. Where previously you enabled this by setting `ingress.enabled: false`
and `ingress.aws: true`, you now set `awsAlbIngress.enabled: true` and all
configuration for it is under `awsAlbIngress`.
4. The `HorizontalPodAutoscaler` that was configured at `hpa.enabled: true` has
been split into 3 separate HPAs, one for each of `apps`, `worker`, and `proxy`.
They are configured at `services.{apps,worker,proxy}.autoscaling`.
## Installing
To install the chart from our repository:
```console
$ helm repo add budibase https://budibase.github.io/budibase/
$ helm repo update
$ helm install --create-namespace --namespace budibase budibase budibase/budibase
```
To install the chart from this repo:
```console
$ git clone git@github.com:budibase/budibase.git
$ cd budibase/charts/budibase
$ helm install --create-namespace --namespace budibase budibase .
```
## Example minimal configuration
Here's an example `values.yaml` that would get a Budibase instance running in a home
cluster using an nginx ingress controller and NFS as cluster storage (basically one of our
staff's homelabs).
<details>
```yaml
ingress:
enabled: true
className: "nginx"
hosts:
- host: budibase.local # set this to whatever DNS name you'd use
paths:
- backend:
service:
name: proxy-service
port:
number: 10000
path: /
pathType: Prefix
couchdb:
persistentVolume:
enabled: true
storageClass: "nfs-client"
adminPassword: admin
services:
objectStore:
storageClass: "nfs-client"
redis:
storageClass: "nfs-client"
```
If you wanted to use this when bringing up Budibase in your own cluster, you could save it
to your hard disk and run the following:
```console
$ helm install --create-namespace --namespace budibase budibase . -f values.yaml
```
</details>
## Configuring
{{ template "chart.valuesTable" . }}
## Uninstalling
To uninstall the chart, assuming you named the release `budibase` (both commands in the installation section do so):
```console
$ helm uninstall --namespace budibase budibase
```
{{ template "helm-docs.versionFooter" . }}

Binary file not shown.

View File

@ -1,4 +1,4 @@
{{- if .Values.ingress.aws }} {{- if .Values.awsAlbIngress.enabled }}
apiVersion: networking.k8s.io/v1 apiVersion: networking.k8s.io/v1
kind: Ingress kind: Ingress
metadata: metadata:
@ -7,24 +7,24 @@ metadata:
kubernetes.io/ingress.class: alb kubernetes.io/ingress.class: alb
alb.ingress.kubernetes.io/scheme: internet-facing alb.ingress.kubernetes.io/scheme: internet-facing
alb.ingress.kubernetes.io/target-type: ip alb.ingress.kubernetes.io/target-type: ip
alb.ingress.kubernetes.io/success-codes: 200,301 alb.ingress.kubernetes.io/success-codes: '200'
alb.ingress.kubernetes.io/healthcheck-path: / alb.ingress.kubernetes.io/healthcheck-path: '/health'
{{- if .Values.ingress.certificateArn }} {{- if .Values.awsAlbIngress.certificateArn }}
alb.ingress.kubernetes.io/actions.ssl-redirect: '{"Type": "redirect", "RedirectConfig": { "Protocol": "HTTPS", "Port": "443", "StatusCode": "HTTP_301"}}' alb.ingress.kubernetes.io/actions.ssl-redirect: '{"Type": "redirect", "RedirectConfig": { "Protocol": "HTTPS", "Port": "443", "StatusCode": "HTTP_301"}}'
alb.ingress.kubernetes.io/listen-ports: '[{"HTTP": 80}, {"HTTPS":443}]' alb.ingress.kubernetes.io/listen-ports: '[{"HTTP": 80}, {"HTTPS":443}]'
alb.ingress.kubernetes.io/certificate-arn: {{ .Values.ingress.certificateArn }} alb.ingress.kubernetes.io/certificate-arn: {{ .Values.awsAlbIngress.certificateArn }}
{{- end }} {{- end }}
{{- if .Values.ingress.sslPolicy }} {{- if .Values.awsAlbIngress.sslPolicy }}
alb.ingress.kubernetes.io/actions.ssl-policy: {{ .Values.ingress.sslPolicy }} alb.ingress.kubernetes.io/actions.ssl-policy: {{ .Values.awsAlbIngress.sslPolicy }}
{{- end }} {{- end }}
{{- if .Values.ingress.securityGroups }} {{- if .Values.awsAlbIngress.securityGroups }}
alb.ingress.kubernetes.io/security-groups: {{ .Values.ingress.securityGroups }} alb.ingress.kubernetes.io/security-groups: {{ .Values.awsAlbIngress.securityGroups }}
{{- end }} {{- end }}
spec: spec:
rules: rules:
- http: - http:
paths: paths:
{{- if .Values.ingress.certificateArn }} {{- if .Values.awsAlbIngress.certificateArn }}
- path: / - path: /
pathType: Prefix pathType: Prefix
backend: backend:

View File

@ -2,12 +2,9 @@ apiVersion: apps/v1
kind: Deployment kind: Deployment
metadata: metadata:
annotations: annotations:
kompose.cmd: kompose convert
kompose.version: 1.21.0 (992df58d8)
{{ if .Values.services.apps.deploymentAnnotations }} {{ if .Values.services.apps.deploymentAnnotations }}
{{- toYaml .Values.services.apps.deploymentAnnotations | indent 4 -}} {{- toYaml .Values.services.apps.deploymentAnnotations | indent 4 -}}
{{ end }} {{ end }}
creationTimestamp: null
labels: labels:
io.kompose.service: app-service io.kompose.service: app-service
{{ if .Values.services.apps.deploymentLabels }} {{ if .Values.services.apps.deploymentLabels }}
@ -24,12 +21,9 @@ spec:
template: template:
metadata: metadata:
annotations: annotations:
kompose.cmd: kompose convert
kompose.version: 1.21.0 (992df58d8)
{{ if .Values.services.apps.templateAnnotations }} {{ if .Values.services.apps.templateAnnotations }}
{{- toYaml .Values.services.apps.templateAnnotations | indent 8 -}} {{- toYaml .Values.services.apps.templateAnnotations | indent 8 -}}
{{ end }} {{ end }}
creationTimestamp: null
labels: labels:
io.kompose.service: app-service io.kompose.service: app-service
{{ if .Values.services.apps.templateLabels }} {{ if .Values.services.apps.templateLabels }}
@ -198,7 +192,14 @@ spec:
- name: NODE_TLS_REJECT_UNAUTHORIZED - name: NODE_TLS_REJECT_UNAUTHORIZED
value: {{ .Values.services.tlsRejectUnauthorized }} value: {{ .Values.services.tlsRejectUnauthorized }}
{{ end }} {{ end }}
{{- if .Values.services.automationWorkers.enabled }}
- name: APP_FEATURES
value: "api"
{{- end }}
{{- range .Values.services.apps.extraEnv }}
- name: {{ .name }}
value: {{ .value | quote }}
{{- end }}
image: budibase/apps:{{ .Values.globals.appVersion | default .Chart.AppVersion }} image: budibase/apps:{{ .Values.globals.appVersion | default .Chart.AppVersion }}
imagePullPolicy: Always imagePullPolicy: Always
{{- if .Values.services.apps.startupProbe }} {{- if .Values.services.apps.startupProbe }}
@ -226,6 +227,14 @@ spec:
resources: resources:
{{- toYaml . | nindent 10 }} {{- toYaml . | nindent 10 }}
{{ end }} {{ end }}
{{ if .Values.services.apps.command }}
command:
{{- toYaml .Values.services.apps.command | nindent 10 }}
{{ end }}
{{ if .Values.services.apps.args }}
args:
{{- toYaml .Values.services.apps.args | nindent 10 }}
{{ end }}
{{- with .Values.affinity }} {{- with .Values.affinity }}
affinity: affinity:
{{- toYaml . | nindent 8 }} {{- toYaml . | nindent 8 }}
@ -243,4 +252,10 @@ spec:
{{ end }} {{ end }}
restartPolicy: Always restartPolicy: Always
serviceAccountName: "" serviceAccountName: ""
{{ if .Values.services.apps.ndots }}
dnsConfig:
options:
- name: ndots
value: {{ .Values.services.apps.ndots | quote }}
{{ end }}
status: {} status: {}

View File

@ -0,0 +1,32 @@
{{- if .Values.services.apps.autoscaling.enabled }}
apiVersion: {{ ternary "autoscaling/v2" "autoscaling/v2beta2" (.Capabilities.APIVersions.Has "autoscaling/v2") }}
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "budibase.fullname" . }}-apps
labels:
{{- include "budibase.labels" . | nindent 4 }}
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: app-service
minReplicas: {{ .Values.services.apps.autoscaling.minReplicas }}
maxReplicas: {{ .Values.services.apps.autoscaling.maxReplicas }}
metrics:
{{- if .Values.services.apps.autoscaling.targetCPUUtilizationPercentage }}
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: {{ .Values.services.apps.autoscaling.targetCPUUtilizationPercentage }}
{{- end }}
{{- if .Values.services.apps.autoscaling.targetMemoryUtilizationPercentage }}
- type: Resource
resource:
name: memory
target:
type: Utilization
averageUtilization: {{ .Values.services.apps.autoscaling.targetMemoryUtilizationPercentage }}
{{- end }}
{{- end }}

View File

@ -1,10 +1,6 @@
apiVersion: v1 apiVersion: v1
kind: Service kind: Service
metadata: metadata:
annotations:
kompose.cmd: kompose convert
kompose.version: 1.21.0 (992df58d8)
creationTimestamp: null
labels: labels:
io.kompose.service: app-service io.kompose.service: app-service
name: app-service name: app-service

View File

@ -0,0 +1,262 @@
{{- if .Values.services.automationWorkers.enabled }}
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
{{ if .Values.services.automationWorkers.deploymentAnnotations }}
{{- toYaml .Values.services.automationWorkers.deploymentAnnotations | indent 4 -}}
{{ end }}
labels:
io.kompose.service: automation-worker-service
{{ if .Values.services.automationWorkers.deploymentLabels }}
{{- toYaml .Values.services.automationWorkers.deploymentLabels | indent 4 -}}
{{ end }}
name: automation-worker-service
spec:
replicas: {{ .Values.services.automationWorkers.replicaCount }}
selector:
matchLabels:
io.kompose.service: automation-worker-service
strategy:
type: RollingUpdate
template:
metadata:
annotations:
{{ if .Values.services.automationWorkers.templateAnnotations }}
{{- toYaml .Values.services.automationWorkers.templateAnnotations | indent 8 -}}
{{ end }}
labels:
io.kompose.service: automation-worker-service
{{ if .Values.services.automationWorkers.templateLabels }}
{{- toYaml .Values.services.automationWorkers.templateLabels | indent 8 -}}
{{ end }}
spec:
containers:
- env:
- name: BUDIBASE_ENVIRONMENT
value: {{ .Values.globals.budibaseEnv }}
- name: DEPLOYMENT_ENVIRONMENT
value: "kubernetes"
- name: COUCH_DB_URL
{{ if .Values.services.couchdb.url }}
value: {{ .Values.services.couchdb.url }}
{{ else }}
value: http://{{ .Release.Name }}-svc-couchdb:{{ .Values.services.couchdb.port }}
{{ end }}
{{ if .Values.services.couchdb.enabled }}
- name: COUCH_DB_USER
valueFrom:
secretKeyRef:
name: {{ template "couchdb.fullname" . }}
key: adminUsername
- name: COUCH_DB_PASSWORD
valueFrom:
secretKeyRef:
name: {{ template "couchdb.fullname" . }}
key: adminPassword
{{ end }}
- name: ENABLE_ANALYTICS
value: {{ .Values.globals.enableAnalytics | quote }}
- name: API_ENCRYPTION_KEY
value: {{ .Values.globals.apiEncryptionKey | quote }}
- name: HTTP_LOGGING
value: {{ .Values.services.automationWorkers.httpLogging | quote }}
- name: INTERNAL_API_KEY
valueFrom:
secretKeyRef:
name: {{ template "budibase.fullname" . }}
key: internalApiKey
- name: INTERNAL_API_KEY_FALLBACK
value: {{ .Values.globals.internalApiKeyFallback | quote }}
- name: JWT_SECRET
valueFrom:
secretKeyRef:
name: {{ template "budibase.fullname" . }}
key: jwtSecret
- name: JWT_SECRET_FALLBACK
value: {{ .Values.globals.jwtSecretFallback | quote }}
{{ if .Values.services.objectStore.region }}
- name: AWS_REGION
value: {{ .Values.services.objectStore.region }}
{{ end }}
- name: MINIO_ENABLED
value: {{ .Values.services.objectStore.minio | quote }}
- name: MINIO_ACCESS_KEY
valueFrom:
secretKeyRef:
name: {{ template "budibase.fullname" . }}
key: objectStoreAccess
- name: MINIO_SECRET_KEY
valueFrom:
secretKeyRef:
name: {{ template "budibase.fullname" . }}
key: objectStoreSecret
- name: CLOUDFRONT_CDN
value: {{ .Values.services.objectStore.cloudfront.cdn | quote }}
- name: CLOUDFRONT_PUBLIC_KEY_ID
value: {{ .Values.services.objectStore.cloudfront.publicKeyId | quote }}
- name: CLOUDFRONT_PRIVATE_KEY_64
value: {{ .Values.services.objectStore.cloudfront.privateKey64 | quote }}
- name: MINIO_URL
value: {{ .Values.services.objectStore.url }}
- name: PLUGIN_BUCKET_NAME
value: {{ .Values.services.objectStore.pluginBucketName | quote }}
- name: APPS_BUCKET_NAME
value: {{ .Values.services.objectStore.appsBucketName | quote }}
- name: GLOBAL_BUCKET_NAME
value: {{ .Values.services.objectStore.globalBucketName | quote }}
- name: BACKUPS_BUCKET_NAME
value: {{ .Values.services.objectStore.backupsBucketName | quote }}
- name: PORT
value: {{ .Values.services.automationWorkers.port | quote }}
{{ if .Values.services.worker.publicApiRateLimitPerSecond }}
- name: API_REQ_LIMIT_PER_SEC
value: {{ .Values.globals.automationWorkers.publicApiRateLimitPerSecond | quote }}
{{ end }}
- name: MULTI_TENANCY
value: {{ .Values.globals.multiTenancy | quote }}
- name: OFFLINE_MODE
value: {{ .Values.globals.offlineMode | quote }}
- name: LOG_LEVEL
value: {{ .Values.services.automationWorkers.logLevel | quote }}
- name: REDIS_PASSWORD
value: {{ .Values.services.redis.password }}
- name: REDIS_URL
{{ if .Values.services.redis.url }}
value: {{ .Values.services.redis.url }}
{{ else }}
value: redis-service:{{ .Values.services.redis.port }}
{{ end }}
- name: SELF_HOSTED
value: {{ .Values.globals.selfHosted | quote }}
- name: POSTHOG_TOKEN
value: {{ .Values.globals.posthogToken | quote }}
- name: WORKER_URL
value: http://worker-service:{{ .Values.services.worker.port }}
- name: PLATFORM_URL
value: {{ .Values.globals.platformUrl | quote }}
- name: ACCOUNT_PORTAL_URL
value: {{ .Values.globals.accountPortalUrl | quote }}
- name: ACCOUNT_PORTAL_API_KEY
value: {{ .Values.globals.accountPortalApiKey | quote }}
- name: COOKIE_DOMAIN
value: {{ .Values.globals.cookieDomain | quote }}
- name: HTTP_MIGRATIONS
value: {{ .Values.globals.httpMigrations | quote }}
- name: GOOGLE_CLIENT_ID
value: {{ .Values.globals.google.clientId | quote }}
- name: GOOGLE_CLIENT_SECRET
value: {{ .Values.globals.google.secret | quote }}
- name: AUTOMATION_MAX_ITERATIONS
value: {{ .Values.globals.automationMaxIterations | quote }}
- name: TENANT_FEATURE_FLAGS
value: {{ .Values.globals.tenantFeatureFlags | quote }}
- name: ENCRYPTION_KEY
value: {{ .Values.globals.bbEncryptionKey | quote }}
{{ if .Values.globals.bbAdminUserEmail }}
- name: BB_ADMIN_USER_EMAIL
value: {{ .Values.globals.bbAdminUserEmail | quote }}
{{ end }}
{{ if .Values.globals.bbAdminUserPassword }}
- name: BB_ADMIN_USER_PASSWORD
value: {{ .Values.globals.bbAdminUserPassword | quote }}
{{ end }}
{{ if .Values.globals.pluginsDir }}
- name: PLUGINS_DIR
value: {{ .Values.globals.pluginsDir | quote }}
{{ end }}
{{ if .Values.services.automationWorkers.nodeDebug }}
- name: NODE_DEBUG
value: {{ .Values.services.automationWorkers.nodeDebug | quote }}
{{ end }}
{{ if .Values.globals.datadogApmEnabled }}
- name: DD_LOGS_INJECTION
value: {{ .Values.globals.datadogApmEnabled | quote }}
- name: DD_APM_ENABLED
value: {{ .Values.globals.datadogApmEnabled | quote }}
- name: DD_APM_DD_URL
value: https://trace.agent.datadoghq.eu
{{ end }}
{{ if .Values.globals.globalAgentHttpProxy }}
- name: GLOBAL_AGENT_HTTP_PROXY
value: {{ .Values.globals.globalAgentHttpProxy | quote }}
{{ end }}
{{ if .Values.globals.globalAgentHttpsProxy }}
- name: GLOBAL_AGENT_HTTPS_PROXY
value: {{ .Values.globals.globalAgentHttpsProxy | quote }}
{{ end }}
{{ if .Values.globals.globalAgentNoProxy }}
- name: GLOBAL_AGENT_NO_PROXY
value: {{ .Values.globals.globalAgentNoProxy | quote }}
{{ end }}
{{ if .Values.services.tlsRejectUnauthorized }}
- name: NODE_TLS_REJECT_UNAUTHORIZED
value: {{ .Values.services.tlsRejectUnauthorized }}
{{ end }}
- name: APP_FEATURES
value: "automations"
{{- range .Values.services.automationWorkers.extraEnv }}
- name: {{ .name }}
value: {{ .value | quote }}
{{- end }}
image: budibase/apps:{{ .Values.globals.appVersion | default .Chart.AppVersion }}
imagePullPolicy: Always
{{- if .Values.services.automationWorkers.startupProbe }}
{{- with .Values.services.automationWorkers.startupProbe }}
startupProbe:
{{- toYaml . | nindent 10 }}
{{- end }}
{{- end }}
{{- if .Values.services.automationWorkers.livenessProbe }}
{{- with .Values.services.automationWorkers.livenessProbe }}
livenessProbe:
{{- toYaml . | nindent 10 }}
{{- end }}
{{- end }}
{{- if .Values.services.automationWorkers.readinessProbe }}
{{- with .Values.services.automationWorkers.readinessProbe }}
readinessProbe:
{{- toYaml . | nindent 10 }}
{{- end }}
{{- end }}
name: bbautomationworker
ports:
- containerPort: {{ .Values.services.automationWorkers.port }}
{{ with .Values.services.automationWorkers.resources }}
resources:
{{- toYaml . | nindent 10 }}
{{ end }}
{{ if .Values.services.automationWorkers.command }}
command:
{{- toYaml .Values.services.automationWorkers.command | nindent 10 }}
{{ end }}
{{ if .Values.services.automationWorkers.args }}
args:
{{- toYaml .Values.services.automationWorkers.args | nindent 10 }}
{{ end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
{{ if .Values.schedulerName }}
schedulerName: {{ .Values.schedulerName | quote }}
{{ end }}
{{ if .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml .Values.imagePullSecrets | nindent 6 }}
{{ end }}
restartPolicy: Always
serviceAccountName: ""
{{ if .Values.services.automationWorkers.ndots }}
dnsConfig:
options:
- name: ndots
value: {{ .Values.services.automationWorkers.ndots | quote }}
{{ end }}
status: {}
{{- end }}

View File

@ -0,0 +1,32 @@
{{- if .Values.services.automationWorkers.autoscaling.enabled }}
apiVersion: {{ ternary "autoscaling/v2" "autoscaling/v2beta2" (.Capabilities.APIVersions.Has "autoscaling/v2") }}
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "budibase.fullname" . }}-apps
labels:
{{- include "budibase.labels" . | nindent 4 }}
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: automation-worker-service
minReplicas: {{ .Values.services.automationWorkers.autoscaling.minReplicas }}
maxReplicas: {{ .Values.services.automationWorkers.autoscaling.maxReplicas }}
metrics:
{{- if .Values.services.automationWorkers.autoscaling.targetCPUUtilizationPercentage }}
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: {{ .Values.services.automationWorkers.autoscaling.targetCPUUtilizationPercentage }}
{{- end }}
{{- if .Values.services.automationWorkers.autoscaling.targetMemoryUtilizationPercentage }}
- type: Resource
resource:
name: memory
target:
type: Utilization
averageUtilization: {{ .Values.services.automationWorkers.autoscaling.targetMemoryUtilizationPercentage }}
{{- end }}
{{- end }}

View File

@ -2,10 +2,6 @@
apiVersion: apps/v1 apiVersion: apps/v1
kind: Deployment kind: Deployment
metadata: metadata:
annotations:
kompose.cmd: kompose convert
kompose.version: 1.21.0 (992df58d8)
creationTimestamp: null
labels: labels:
app.kubernetes.io/name: couchdb-backup app.kubernetes.io/name: couchdb-backup
name: couchdb-backup name: couchdb-backup
@ -18,10 +14,6 @@ spec:
type: Recreate type: Recreate
template: template:
metadata: metadata:
annotations:
kompose.cmd: kompose convert
kompose.version: 1.21.0 (992df58d8)
creationTimestamp: null
labels: labels:
app.kubernetes.io/name: couchdb-backup app.kubernetes.io/name: couchdb-backup
spec: spec:

View File

@ -1,28 +0,0 @@
{{- if .Values.autoscaling.enabled }}
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "budibase.fullname" . }}
labels:
{{- include "budibase.labels" . | nindent 4 }}
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ include "budibase.fullname" . }}
minReplicas: {{ .Values.autoscaling.minReplicas }}
maxReplicas: {{ .Values.autoscaling.maxReplicas }}
metrics:
{{- if .Values.autoscaling.targetCPUUtilizationPercentage }}
- type: Resource
resource:
name: cpu
targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }}
{{- end }}
{{- if .Values.autoscaling.targetMemoryUtilizationPercentage }}
- type: Resource
resource:
name: memory
targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }}
{{- end }}
{{- end }}

View File

@ -2,7 +2,6 @@
apiVersion: v1 apiVersion: v1
kind: PersistentVolumeClaim kind: PersistentVolumeClaim
metadata: metadata:
creationTimestamp: null
labels: labels:
io.kompose.service: minio-data io.kompose.service: minio-data
name: minio-data name: minio-data

View File

@ -2,10 +2,6 @@
apiVersion: apps/v1 apiVersion: apps/v1
kind: Deployment kind: Deployment
metadata: metadata:
annotations:
kompose.cmd: kompose convert
kompose.version: 1.21.0 (992df58d8)
creationTimestamp: null
labels: labels:
io.kompose.service: minio-service io.kompose.service: minio-service
name: minio-service name: minio-service
@ -18,10 +14,6 @@ spec:
type: Recreate type: Recreate
template: template:
metadata: metadata:
annotations:
kompose.cmd: kompose convert
kompose.version: 1.21.0 (992df58d8)
creationTimestamp: null
labels: labels:
io.kompose.service: minio-service io.kompose.service: minio-service
spec: spec:
@ -46,11 +38,9 @@ spec:
image: minio/minio image: minio/minio
imagePullPolicy: "" imagePullPolicy: ""
livenessProbe: livenessProbe:
exec: httpGet:
command: path: /minio/health/live
- curl port: 9000
- -f
- http://localhost:9000/minio/health/live
failureThreshold: 3 failureThreshold: 3
periodSeconds: 30 periodSeconds: 30
timeoutSeconds: 20 timeoutSeconds: 20

View File

@ -2,10 +2,6 @@
apiVersion: v1 apiVersion: v1
kind: Service kind: Service
metadata: metadata:
annotations:
kompose.cmd: kompose convert
kompose.version: 1.21.0 (992df58d8)
creationTimestamp: null
labels: labels:
io.kompose.service: minio-service io.kompose.service: minio-service
name: minio-service name: minio-service

View File

@ -2,12 +2,9 @@ apiVersion: apps/v1
kind: Deployment kind: Deployment
metadata: metadata:
annotations: annotations:
kompose.cmd: kompose convert
kompose.version: 1.21.0 (992df58d8)
{{ if .Values.services.proxy.deploymentAnnotations }} {{ if .Values.services.proxy.deploymentAnnotations }}
{{- toYaml .Values.services.proxy.deploymentAnnotations | indent 4 -}} {{- toYaml .Values.services.proxy.deploymentAnnotations | indent 4 -}}
{{ end }} {{ end }}
creationTimestamp: null
labels: labels:
app.kubernetes.io/name: budibase-proxy app.kubernetes.io/name: budibase-proxy
{{ if .Values.services.proxy.deploymentLabels }} {{ if .Values.services.proxy.deploymentLabels }}
@ -19,17 +16,15 @@ spec:
selector: selector:
matchLabels: matchLabels:
app.kubernetes.io/name: budibase-proxy app.kubernetes.io/name: budibase-proxy
minReadySeconds: 10
strategy: strategy:
type: RollingUpdate type: RollingUpdate
template: template:
metadata: metadata:
annotations: annotations:
kompose.cmd: kompose convert
kompose.version: 1.21.0 (992df58d8)
{{ if .Values.services.proxy.templateAnnotations }} {{ if .Values.services.proxy.templateAnnotations }}
{{- toYaml .Values.services.proxy.templateAnnotations | indent 8 -}} {{- toYaml .Values.services.proxy.templateAnnotations | indent 8 -}}
{{ end }} {{ end }}
creationTimestamp: null
labels: labels:
app.kubernetes.io/name: budibase-proxy app.kubernetes.io/name: budibase-proxy
{{ if .Values.services.proxy.templateLabels }} {{ if .Values.services.proxy.templateLabels }}
@ -105,5 +100,19 @@ spec:
{{ end }} {{ end }}
restartPolicy: Always restartPolicy: Always
serviceAccountName: "" serviceAccountName: ""
{{ if .Values.services.proxy.command }}
command:
{{- toYaml .Values.services.proxy.command | nindent 8 }}
{{ end }}
{{ if .Values.services.proxy.args }}
args:
{{- toYaml .Values.services.proxy.args | nindent 8 }}
{{ end }}
volumes: volumes:
{{ if .Values.services.proxy.ndots }}
dnsConfig:
options:
- name: ndots
value: {{ .Values.services.proxy.ndots | quote }}
{{ end }}
status: {} status: {}

View File

@ -0,0 +1,32 @@
{{- if .Values.services.proxy.autoscaling.enabled }}
apiVersion: {{ ternary "autoscaling/v2" "autoscaling/v2beta2" (.Capabilities.APIVersions.Has "autoscaling/v2") }}
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "budibase.fullname" . }}-proxy
labels:
{{- include "budibase.labels" . | nindent 4 }}
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: proxy-service
minReplicas: {{ .Values.services.proxy.autoscaling.minReplicas }}
maxReplicas: {{ .Values.services.proxy.autoscaling.maxReplicas }}
metrics:
{{- if .Values.services.proxy.autoscaling.targetCPUUtilizationPercentage }}
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: {{ .Values.services.proxy.autoscaling.targetCPUUtilizationPercentage }}
{{- end }}
{{- if .Values.services.proxy.autoscaling.targetMemoryUtilizationPercentage }}
- type: Resource
resource:
name: memory
target:
type: Utilization
averageUtilization: {{ .Values.services.proxy.autoscaling.targetMemoryUtilizationPercentage }}
{{- end }}
{{- end }}

View File

@ -1,10 +1,6 @@
apiVersion: v1 apiVersion: v1
kind: Service kind: Service
metadata: metadata:
annotations:
kompose.cmd: kompose convert
kompose.version: 1.21.0 (992df58d8)
creationTimestamp: null
labels: labels:
app.kubernetes.io/name: budibase-proxy app.kubernetes.io/name: budibase-proxy
name: proxy-service name: proxy-service

View File

@ -2,7 +2,6 @@
apiVersion: v1 apiVersion: v1
kind: PersistentVolumeClaim kind: PersistentVolumeClaim
metadata: metadata:
creationTimestamp: null
labels: labels:
io.kompose.service: redis-data io.kompose.service: redis-data
name: redis-data name: redis-data

View File

@ -2,10 +2,6 @@
apiVersion: apps/v1 apiVersion: apps/v1
kind: Deployment kind: Deployment
metadata: metadata:
annotations:
kompose.cmd: kompose convert
kompose.version: 1.21.0 (992df58d8)
creationTimestamp: null
labels: labels:
io.kompose.service: redis-service io.kompose.service: redis-service
name: redis-service name: redis-service
@ -18,10 +14,6 @@ spec:
type: Recreate type: Recreate
template: template:
metadata: metadata:
annotations:
kompose.cmd: kompose convert
kompose.version: 1.21.0 (992df58d8)
creationTimestamp: null
labels: labels:
io.kompose.service: redis-service io.kompose.service: redis-service
spec: spec:

View File

@ -2,10 +2,6 @@
apiVersion: v1 apiVersion: v1
kind: Service kind: Service
metadata: metadata:
annotations:
kompose.cmd: kompose convert
kompose.version: 1.21.0 (992df58d8)
creationTimestamp: null
labels: labels:
io.kompose.service: redis-service io.kompose.service: redis-service
name: redis-service name: redis-service

View File

@ -2,12 +2,9 @@ apiVersion: apps/v1
kind: Deployment kind: Deployment
metadata: metadata:
annotations: annotations:
kompose.cmd: kompose convert
kompose.version: 1.21.0 (992df58d8)
{{ if .Values.services.worker.deploymentAnnotations }} {{ if .Values.services.worker.deploymentAnnotations }}
{{- toYaml .Values.services.worker.deploymentAnnotations | indent 4 -}} {{- toYaml .Values.services.worker.deploymentAnnotations | indent 4 -}}
{{ end }} {{ end }}
creationTimestamp: null
labels: labels:
io.kompose.service: worker-service io.kompose.service: worker-service
{{ if .Values.services.worker.deploymentLabels }} {{ if .Values.services.worker.deploymentLabels }}
@ -24,12 +21,9 @@ spec:
template: template:
metadata: metadata:
annotations: annotations:
kompose.cmd: kompose convert
kompose.version: 1.21.0 (992df58d8)
{{ if .Values.services.worker.templateAnnotations }} {{ if .Values.services.worker.templateAnnotations }}
{{- toYaml .Values.services.worker.templateAnnotations | indent 8 -}} {{- toYaml .Values.services.worker.templateAnnotations | indent 8 -}}
{{ end }} {{ end }}
creationTimestamp: null
labels: labels:
io.kompose.service: worker-service io.kompose.service: worker-service
{{ if .Values.services.worker.templateLabels }} {{ if .Values.services.worker.templateLabels }}
@ -188,6 +182,10 @@ spec:
- name: NODE_TLS_REJECT_UNAUTHORIZED - name: NODE_TLS_REJECT_UNAUTHORIZED
value: {{ .Values.services.tlsRejectUnauthorized }} value: {{ .Values.services.tlsRejectUnauthorized }}
{{ end }} {{ end }}
{{- range .Values.services.worker.extraEnv }}
- name: {{ .name }}
value: {{ .value | quote }}
{{- end }}
image: budibase/worker:{{ .Values.globals.appVersion | default .Chart.AppVersion }} image: budibase/worker:{{ .Values.globals.appVersion | default .Chart.AppVersion }}
imagePullPolicy: Always imagePullPolicy: Always
{{- if .Values.services.worker.startupProbe }} {{- if .Values.services.worker.startupProbe }}
@ -215,6 +213,14 @@ spec:
resources: resources:
{{- toYaml . | nindent 10 }} {{- toYaml . | nindent 10 }}
{{ end }} {{ end }}
{{ if .Values.services.worker.command }}
command:
{{- toYaml .Values.services.worker.command | nindent 10 }}
{{ end }}
{{ if .Values.services.worker.args }}
args:
{{- toYaml .Values.services.worker.args | nindent 10 }}
{{ end }}
{{- with .Values.affinity }} {{- with .Values.affinity }}
affinity: affinity:
{{- toYaml . | nindent 8 }} {{- toYaml . | nindent 8 }}
@ -232,4 +238,10 @@ spec:
{{ end }} {{ end }}
restartPolicy: Always restartPolicy: Always
serviceAccountName: "" serviceAccountName: ""
{{ if .Values.services.worker.ndots }}
dnsConfig:
options:
- name: ndots
value: {{ .Values.services.worker.ndots | quote }}
{{ end }}
status: {} status: {}

View File

@ -0,0 +1,32 @@
{{- if .Values.services.worker.autoscaling.enabled }}
apiVersion: {{ ternary "autoscaling/v2" "autoscaling/v2beta2" (.Capabilities.APIVersions.Has "autoscaling/v2") }}
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "budibase.fullname" . }}-worker
labels:
{{- include "budibase.labels" . | nindent 4 }}
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: worker-service
minReplicas: {{ .Values.services.worker.autoscaling.minReplicas }}
maxReplicas: {{ .Values.services.worker.autoscaling.maxReplicas }}
metrics:
{{- if .Values.services.worker.autoscaling.targetCPUUtilizationPercentage }}
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: {{ .Values.services.worker.autoscaling.targetCPUUtilizationPercentage }}
{{- end }}
{{- if .Values.services.worker.autoscaling.targetMemoryUtilizationPercentage }}
- type: Resource
resource:
name: memory
target:
type: Utilization
averageUtilization: {{ .Values.services.worker.autoscaling.targetMemoryUtilizationPercentage }}
{{- end }}
{{- end }}

View File

@ -1,10 +1,6 @@
apiVersion: v1 apiVersion: v1
kind: Service kind: Service
metadata: metadata:
annotations:
kompose.cmd: kompose convert
kompose.version: 1.21.0 (992df58d8)
creationTimestamp: null
labels: labels:
io.kompose.service: worker-service io.kompose.service: worker-service
name: worker-service name: worker-service

View File

@ -1,56 +1,32 @@
# Default values for budibase. # -- Passed to all pods created by this chart. Should not ordinarily need to be changed.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
image:
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: ""
imagePullSecrets: [] imagePullSecrets: []
# -- Override the name of the deploymen. Defaults to {{ .Chart.Name }}.
nameOverride: "" nameOverride: ""
# fullnameOverride: ""
serviceAccount: serviceAccount:
# Specifies whether a service account should be created # -- Specifies whether a service account should be created
create: true create: true
# Annotations to add to the service account # -- Annotations to add to the service account
annotations: {} annotations: {}
# The name of the service account to use. # -- The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template # If not set and create is true, a name is generated using the fullname template
name: "" name: ""
podAnnotations: {}
podSecurityContext:
{}
# fsGroup: 2000
securityContext:
{}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service: service:
# -- Service type for the service that points to the main Budibase proxy pod.
type: ClusterIP type: ClusterIP
# -- Port to expose on the service.
port: 10000 port: 10000
ingress: ingress:
# -- Whether to create an Ingress resource pointing to the Budibase proxy.
enabled: true enabled: true
aws: false # -- What ingress class to use.
nginx: true
certificateArn: ""
className: "" className: ""
annotations: # -- Standard hosts block for the Ingress resource. Defaults to pointing to the Budibase proxy.
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/client-max-body-size: 150M
nginx.ingress.kubernetes.io/proxy-body-size: 50m
hosts: hosts:
- host: # change if using custom domain # @ignore
- host:
paths: paths:
- path: / - path: /
pathType: Prefix pathType: Prefix
@ -60,361 +36,504 @@ ingress:
port: port:
number: 10000 number: 10000
autoscaling: awsAlbIngress:
# -- Whether to create an ALB Ingress resource pointing to the Budibase proxy. Requires the AWS ALB Ingress Controller.
enabled: false enabled: false
minReplicas: 1 # -- If you're wanting to use HTTPS, you'll need to create an ACM certificate and specify the ARN here.
maxReplicas: 100 certificateArn: ""
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
nodeSelector: {}
# -- Sets the tolerations for all pods created by this chart. Should not ordinarily need to be changed.
# See <https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/> for more information
# on tolerations.
tolerations: [] tolerations: []
# -- Sets the affinity for all pods created by this chart. Should not ordinarily
# need to be changed. See
# <https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes-using-node-affinity/>
# for more information on affinity.
affinity: {} affinity: {}
globals: globals:
appVersion: "" # Use as an override to .Chart.AppVersion # -- The version of Budibase to deploy. Defaults to what's specified by {{ .Chart.AppVersion }}.
# Ends up being used as the image version tag for the apps, proxy, and worker images.
appVersion: ""
# -- Sets the environment variable BUDIBASE_ENVIRONMENT for the apps and worker pods. Should not
# ordinarily need to be changed.
budibaseEnv: PRODUCTION budibaseEnv: PRODUCTION
# -- Sets what feature flags are enabled and for which tenants. Should not ordinarily need to be
# changed.
tenantFeatureFlags: "*:LICENSING,*:USER_GROUPS,*:ONBOARDING_TOUR" tenantFeatureFlags: "*:LICENSING,*:USER_GROUPS,*:ONBOARDING_TOUR"
# -- Whether to enable analytics or not. You can read more about our analytics here:
# <https://docs.budibase.com/docs/analytics>.
enableAnalytics: "1" enableAnalytics: "1"
# @ignore (only used if enableAnalytics is set to 1)
posthogToken: "phc_bIjZL7oh2GEUd2vqvTBH8WvrX0fWTFQMs6H5KQxiUxU" posthogToken: "phc_bIjZL7oh2GEUd2vqvTBH8WvrX0fWTFQMs6H5KQxiUxU"
selfHosted: "1" # set to 0 for budibase cloud environment, set to 1 for self-hosted setup # @ignore (should not normally need to be changed, we only set this to "0"
multiTenancy: "0" # set to 0 to disable multiple orgs, set to 1 to enable multiple orgs # when deploying to our Cloud environment)
offlineMode: "0" # set to 1 to enable offline mode selfHosted: "1"
# @ignore (doesn't work out of the box for self-hosted users, only meant for Budicloud)
multiTenancy: "0"
# @ignore (only currently used to determine whether to fetch licenses offline or not, should
# not normally need to be changed, and only applies to Enterprise customers)
offlineMode: "0"
# @ignore (only needs to be set in our cloud environment)
accountPortalUrl: "" accountPortalUrl: ""
# @ignore (only needs to be set in our cloud environment)
accountPortalApiKey: "" accountPortalApiKey: ""
# -- Sets the domain attribute of the cookie that Budibase uses to store session information.
# See <https://developer.mozilla.org/en-US/docs/Web/HTTP/Cookies#define_where_cookies_are_sent>
# for details on why you might want to set this.
cookieDomain: "" cookieDomain: ""
# -- Set the `platformUrl` binding. You can also do this in Settings > Organisation if you are
# self-hosting.
platformUrl: "" platformUrl: ""
# -- Whether or not to enable doing data migrations over the HTTP API. If this is set to "0",
# migrations are run on startup. You shouldn't ordinarily need to change this.
httpMigrations: "0" httpMigrations: "0"
# -- Google OAuth settings. These can also be set in the Budibase UI, see
# <https://docs.budibase.com/docs/sso-with-google> for details.
google: google:
# -- Client ID of your Google OAuth app.
clientId: "" clientId: ""
# -- Client secret of your Google OAuth app.
secret: "" secret: ""
# -- The maximum number of iterations allows for an automation loop step. You can read more about
# looping here: <https://docs.budibase.com/docs/looping>.
automationMaxIterations: "200" automationMaxIterations: "200"
createSecrets: true # creates an internal API key, JWT secrets and redis password for you # -- Create an internal API key, JWT secret, object store access key and
# secret, and store them in a Kubernetes `Secret`.
createSecrets: true
# if createSecrets is set to false, you can hard-code your secrets here # -- Used for encrypting API keys and environment variables when stored in the database.
# You don't need to set this if `createSecrets` is true.
apiEncryptionKey: "" apiEncryptionKey: ""
# -- API key used for internal Budibase API calls. You don't need to set this
# if `createSecrets` is true.
internalApiKey: "" internalApiKey: ""
# -- Secret used for signing JWTs. You don't need to set this if `createSecrets` is true.
jwtSecret: "" jwtSecret: ""
cdnUrl: ""
# fallback values used during live rotation # -- A fallback value for `internalApiKey`. If you're rotating your encryption key, you can
# set this to the old value for the duration of the rotation.
internalApiKeyFallback: "" internalApiKeyFallback: ""
# -- A fallback value for `jwtSecret`. If you're rotating your JWT secret, you can set this
# to the old value for the duration of the rotation.
jwtSecretFallback: "" jwtSecretFallback: ""
smtp: smtp:
# -- Whether to enable SMTP or not.
enabled: false enabled: false
# -- The hostname of your SMTP server.
# globalAgentHttpProxy: host: ""
# globalAgentHttpsProxy: # -- The port of your SMTP server.
# globalAgentNoProxy: port: "587"
# -- The email address to use in the "From:" field of emails sent by Budibase.
from: ""
# -- The username to use when authenticating with your SMTP server.
user: ""
# -- The password to use when authenticating with your SMTP server.
password: ""
services: services:
budibaseVersion: latest # -- The DNS suffix to use for service discovery. You only need to change this
# if you've configured your cluster to use a different DNS suffix.
dns: cluster.local dns: cluster.local
# tlsRejectUnauthorized: 0
proxy: proxy:
# @ignore (you shouldn't need to change this)
port: 10000 port: 10000
# -- The number of proxy replicas to run.
replicaCount: 1 replicaCount: 1
# @ignore (you should never need to change this)
upstreams: upstreams:
apps: "http://app-service.{{ .Release.Namespace }}.svc.{{ .Values.services.dns }}:{{ .Values.services.apps.port }}" apps: "http://app-service.{{ .Release.Namespace }}.svc.{{ .Values.services.dns }}:{{ .Values.services.apps.port }}"
worker: "http://worker-service.{{ .Release.Namespace }}.svc.{{ .Values.services.dns }}:{{ .Values.services.worker.port }}" worker: "http://worker-service.{{ .Release.Namespace }}.svc.{{ .Values.services.dns }}:{{ .Values.services.worker.port }}"
minio: "http://minio-service.{{ .Release.Namespace }}.svc.{{ .Values.services.dns }}:{{ .Values.services.objectStore.port }}" minio: "http://minio-service.{{ .Release.Namespace }}.svc.{{ .Values.services.dns }}:{{ .Values.services.objectStore.port }}"
couchdb: "http://{{ .Release.Name }}-svc-couchdb:{{ .Values.services.couchdb.port }}" couchdb: "http://{{ .Release.Name }}-svc-couchdb:{{ .Values.services.couchdb.port }}"
# -- The resources to use for proxy pods. See
# <https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/>
# for more information on how to set these.
resources: {} resources: {}
# -- Startup probe configuration for proxy pods. You shouldn't need to
# change this, but if you want to you can find more information here:
# <https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/>
# @default -- HTTP health checks.
startupProbe: startupProbe:
# @ignore
httpGet: httpGet:
path: /health path: /health
port: 10000 port: 10000
scheme: HTTP scheme: HTTP
# @ignore
failureThreshold: 30 failureThreshold: 30
# @ignore
periodSeconds: 3 periodSeconds: 3
# -- Readiness probe configuration for proxy pods. You shouldn't need to
# change this, but if you want to you can find more information here:
# <https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/>
# @default -- HTTP health checks.
readinessProbe: readinessProbe:
# @ignore
httpGet: httpGet:
path: /health path: /health
port: 10000 port: 10000
scheme: HTTP scheme: HTTP
# @ignore
periodSeconds: 3 periodSeconds: 3
# @ignore
failureThreshold: 1 failureThreshold: 1
# -- Liveness probe configuration for proxy pods. You shouldn't need to
# change this, but if you want to you can find more information here:
# <https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/>
# @default -- HTTP health checks.
livenessProbe: livenessProbe:
# @ignore
httpGet: httpGet:
path: /health path: /health
port: 10000 port: 10000
scheme: HTTP scheme: HTTP
# @ignore
failureThreshold: 3 failureThreshold: 3
# @ignore
periodSeconds: 5 periodSeconds: 5
# annotations: autoscaling:
# co.elastic.logs/module: nginx # -- Whether to enable horizontal pod autoscaling for the proxy service.
# co.elastic.logs/fileset.stdout: access enabled: false
# co.elastic.logs/fileset.stderr: error minReplicas: 1
maxReplicas: 10
# -- Target CPU utilization percentage for the proxy service. Note that
# for autoscaling to work, you will need to have metrics-server
# configured, and resources set for the proxy pods.
targetCPUUtilizationPercentage: 80
apps: apps:
# @ignore (you shouldn't need to change this)
port: 4002 port: 4002
# -- The number of apps replicas to run.
replicaCount: 1 replicaCount: 1
# -- The log level for the apps service.
logLevel: info logLevel: info
# -- Whether or not to log HTTP requests to the apps service.
httpLogging: 1 httpLogging: 1
# -- The resources to use for apps pods. See
# <https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/>
# for more information on how to set these.
resources: {} resources: {}
# -- Extra environment variables to set for apps pods. Takes a list of
# name=value pairs.
extraEnv: []
# -- Startup probe configuration for apps pods. You shouldn't need to
# change this, but if you want to you can find more information here:
# <https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/>
# @default -- HTTP health checks.
startupProbe: startupProbe:
# @ignore
httpGet: httpGet:
path: /health path: /health
port: 4002 port: 4002
scheme: HTTP scheme: HTTP
# @ignore
failureThreshold: 30 failureThreshold: 30
# @ignore
periodSeconds: 3 periodSeconds: 3
# -- Readiness probe configuration for apps pods. You shouldn't need to
# change this, but if you want to you can find more information here:
# <https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/>
# @default -- HTTP health checks.
readinessProbe: readinessProbe:
# @ignore
httpGet: httpGet:
path: /health path: /health
port: 4002 port: 4002
scheme: HTTP scheme: HTTP
# @ignore
periodSeconds: 3 periodSeconds: 3
# @ignore
failureThreshold: 1 failureThreshold: 1
# -- Liveness probe configuration for apps pods. You shouldn't need to
# change this, but if you want to you can find more information here:
# <https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/>
# @default -- HTTP health checks.
livenessProbe: livenessProbe:
# @ignore
httpGet: httpGet:
path: /health path: /health
port: 4002 port: 4002
scheme: HTTP scheme: HTTP
# @ignore
failureThreshold: 3 failureThreshold: 3
# @ignore
periodSeconds: 5 periodSeconds: 5
# nodeDebug: "" # set the value of NODE_DEBUG autoscaling:
# annotations: # -- Whether to enable horizontal pod autoscaling for the apps service.
# co.elastic.logs/multiline.type: pattern enabled: false
# co.elastic.logs/multiline.pattern: '^[[:space:]]' minReplicas: 1
# co.elastic.logs/multiline.negate: false maxReplicas: 10
# co.elastic.logs/multiline.match: after # -- Target CPU utilization percentage for the apps service. Note that for
# autoscaling to work, you will need to have metrics-server configured,
# and resources set for the apps pods.
targetCPUUtilizationPercentage: 80
automationWorkers:
# -- Whether or not to enable the automation worker service. If you disable this,
# automations will be processed by the apps service.
enabled: true
# @ignore (you shouldn't need to change this)
port: 4002
# -- The number of automation worker replicas to run.
replicaCount: 1
# -- The log level for the automation worker service.
logLevel: info
# -- The resources to use for automation worker pods. See
# <https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/>
# for more information on how to set these.
resources: {}
# -- Extra environment variables to set for automation worker pods. Takes a list of
# name=value pairs.
extraEnv: []
# -- Startup probe configuration for automation worker pods. You shouldn't
# need to change this, but if you want to you can find more information
# here:
# <https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/>
# @default -- HTTP health checks.
startupProbe:
# @ignore
httpGet:
path: /health
port: 4002
scheme: HTTP
# @ignore
failureThreshold: 30
# @ignore
periodSeconds: 3
# -- Readiness probe configuration for automation worker pods. You shouldn't
# need to change this, but if you want to you can find more information
# here:
# <https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/>
# @default -- HTTP health checks.
readinessProbe:
# @ignore
httpGet:
path: /health
port: 4002
scheme: HTTP
# @ignore
periodSeconds: 3
# @ignore
failureThreshold: 1
# -- Liveness probe configuration for automation worker pods. You shouldn't
# need to change this, but if you want to you can find more information
# here:
# <https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/>
# @default -- HTTP health checks.
livenessProbe:
# @ignore
httpGet:
path: /health
port: 4002
scheme: HTTP
# @ignore
failureThreshold: 3
# @ignore
periodSeconds: 30
autoscaling:
# -- Whether to enable horizontal pod autoscaling for the apps service.
enabled: false
minReplicas: 1
maxReplicas: 10
# -- Target CPU utilization percentage for the automation worker service.
# Note that for autoscaling to work, you will need to have metrics-server
# configured, and resources set for the automation worker pods.
targetCPUUtilizationPercentage: 80
worker: worker:
# @ignore (you shouldn't need to change this)
port: 4003 port: 4003
# -- The number of worker replicas to run.
replicaCount: 1 replicaCount: 1
# -- The log level for the worker service.
logLevel: info logLevel: info
# -- Whether or not to log HTTP requests to the worker service.
httpLogging: 1 httpLogging: 1
# -- The resources to use for worker pods. See
# <https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/>
# for more information on how to set these.
resources: {} resources: {}
# -- Extra environment variables to set for worker pods. Takes a list of
# name=value pairs.
extraEnv: []
# -- Startup probe configuration for worker pods. You shouldn't need to
# change this, but if you want to you can find more information here:
# <https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/>
# @default -- HTTP health checks.
startupProbe: startupProbe:
# @ignore
httpGet: httpGet:
path: /health path: /health
port: 4003 port: 4003
scheme: HTTP scheme: HTTP
# @ignore
failureThreshold: 30 failureThreshold: 30
# @ignore
periodSeconds: 3 periodSeconds: 3
# -- Readiness probe configuration for worker pods. You shouldn't need to
# change this, but if you want to you can find more information here:
# <https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/>
# @default -- HTTP health checks.
readinessProbe: readinessProbe:
# @ignore
httpGet: httpGet:
path: /health path: /health
port: 4003 port: 4003
scheme: HTTP scheme: HTTP
# @ignore
periodSeconds: 3 periodSeconds: 3
# @ignore
failureThreshold: 1 failureThreshold: 1
# -- Liveness probe configuration for worker pods. You shouldn't need to
# change this, but if you want to you can find more information here:
# <https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/>
# @default -- HTTP health checks.
livenessProbe: livenessProbe:
# @ignore
httpGet: httpGet:
path: /health path: /health
port: 4003 port: 4003
scheme: HTTP scheme: HTTP
# @ignore
failureThreshold: 3 failureThreshold: 3
# @ignore
periodSeconds: 5 periodSeconds: 5
# annotations: autoscaling:
# co.elastic.logs/multiline.type: pattern # -- Whether to enable horizontal pod autoscaling for the worker service.
# co.elastic.logs/multiline.pattern: '^[[:space:]]' enabled: false
# co.elastic.logs/multiline.negate: false minReplicas: 1
# co.elastic.logs/multiline.match: after maxReplicas: 10
# -- Target CPU utilization percentage for the worker service. Note that
# for autoscaling to work, you will need to have metrics-server
# configured, and resources set for the worker pods.
targetCPUUtilizationPercentage: 80
couchdb: couchdb:
# -- Whether or not to spin up a CouchDB instance in your cluster. True by
# default, and the configuration for the CouchDB instance is under the
# `couchdb` key at the root of this file. You can see what options are
# available to you by looking at the official CouchDB Helm chart:
# <https://github.com/apache/couchdb-helm/tree/couchdb-4.3.0/couchdb>.
enabled: true enabled: true
# url: "" # only change if pointing to existing couch server # url: "" # only change if pointing to existing couch server
# user: "" # only change if pointing to existing couch server # user: "" # only change if pointing to existing couch server
# password: "" # only change if pointing to existing couch server # password: "" # only change if pointing to existing couch server
port: 5984 port: 5984
backup: backup:
# -- Whether or not to enable periodic CouchDB backups. This works by replicating
# to another CouchDB instance.
enabled: false enabled: false
# target couchDB instance to back up to # -- Target couchDB instance to back up to, either a hostname or an IP address.
target: "" target: ""
# backup interval in seconds # -- Backup interval in seconds
interval: "" interval: ""
# -- The resources to use for CouchDB backup pods. See
# <https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/>
# for more information on how to set these.
resources: {} resources: {}
redis: redis:
enabled: true # disable if using external redis # -- Whether or not to deploy a Redis pod into your cluster.
enabled: true
# -- Port to expose Redis on.
port: 6379 port: 6379
# @ignore (you should leave this as 1, we don't support clustering Redis)
replicaCount: 1 replicaCount: 1
url: "" # only change if pointing to existing redis cluster and enabled: false # -- If you choose to run Redis externally to this chart, you can specify the
password: "budibase" # recommended to override if using built-in redis # connection details here.
url: ""
# -- The password to use when connecting to Redis. It's recommended that you change
# this from the default if you're running Redis in-cluster.
password: "budibase"
# -- How much persistent storage to allocate to Redis.
storage: 100Mi storage: 100Mi
## If defined, storageClassName: <storageClass> # -- If defined, storageClassName: <storageClass> If set to "-",
## If set to "-", storageClassName: "", which disables dynamic provisioning # storageClassName: "", which disables dynamic provisioning If undefined
## If undefined (the default) or set to null, no storageClassName spec is # (the default) or set to null, no storageClassName spec is set, choosing
## set, choosing the default provisioner. # the default provisioner.
storageClass: "" storageClass: ""
# -- The resources to use for Redis pods. See
# <https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/>
# for more information on how to set these.
resources: {} resources: {}
objectStore: objectStore:
# Set to false if using another object store such as S3 # -- Set to false if using another object store, such as S3. You will need
# to set `services.objectStore.url` to point to your bucket if you do this.
minio: true minio: true
# -- Whether to enable the Minio web console or not. If you're exposing
# Minio to the Internet (via a custom Ingress record, for example), you
# should set this to false. If you're only exposing Minio to your cluster,
# you can leave this as true.
browser: true browser: true
# @ignore
port: 9000 port: 9000
# @ignore (you should leave this as 1, we don't support clustering Minio)
replicaCount: 1 replicaCount: 1
accessKey: "" # AWS_ACCESS_KEY if using S3 or existing minio access key # -- AWS_ACCESS_KEY if using S3
secretKey: "" # AWS_SECRET_ACCESS_KEY if using S3 or existing minio secret accessKey: ""
region: "" # AWS_REGION if using S3 or existing minio secret # -- AWS_SECRET_ACCESS_KEY if using S3
url: "http://minio-service:9000" # only change if pointing to existing minio cluster or S3 and minio: false secretKey: ""
# -- AWS_REGION if using S3
region: ""
# -- URL to use for object storage. Only change this if you're using an
# external object store, such as S3. Remember to set `minio: false` if you
# do this.
url: "http://minio-service:9000"
# -- How much storage to give Minio in its PersistentVolumeClaim.
storage: 100Mi storage: 100Mi
## If defined, storageClassName: <storageClass> # -- If defined, storageClassName: <storageClass> If set to "-",
## If set to "-", storageClassName: "", which disables dynamic provisioning # storageClassName: "", which disables dynamic provisioning If undefined
## If undefined (the default) or set to null, no storageClassName spec is # (the default) or set to null, no storageClassName spec is set, choosing
## set, choosing the default provisioner. # the default provisioner.
storageClass: "" storageClass: ""
# -- The resources to use for Minio pods. See
# <https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/>
# for more information on how to set these.
resources: {} resources: {}
cloudfront: cloudfront:
# Set the url of a distribution to enable cloudfront # -- Set the url of a distribution to enable cloudfront.
cdn: "" cdn: ""
# ID of public key stored in cloudfront # -- ID of public key stored in cloudfront.
publicKeyId: "" publicKeyId: ""
# Base64 encoded private key for the above public key # -- Base64 encoded private key for the above public key.
privateKey64: "" privateKey64: ""
# Override values in couchDB subchart # Override values in couchDB subchart. We're only specifying the values we're changing.
# If you want to see all of the available values, see:
# https://github.com/apache/couchdb-helm/tree/couchdb-4.3.0/couchdb
couchdb: couchdb:
## clusterSize is the initial size of the CouchDB cluster. # -- The number of replicas to run in the CouchDB cluster. We set this to
# 1 by default to make things simpler, but you can set it to 3 if you need
# a high-availability CouchDB cluster.
clusterSize: 1 clusterSize: 1
allowAdminParty: false
# Secret Management # -- We use a custom CouchDB image for running Budibase and we don't support
createAdminSecret: true # using any other CouchDB image. You shouldn't change this, and if you do we
# can't guarantee that Budibase will work.
# adminUsername: budibase
# adminPassword: budibase
# adminHash: -pbkdf2-this_is_not_necessarily_secure_either
# cookieAuthSecret: admin
## When enabled, will deploy a networkpolicy that allows CouchDB pods to
## communicate with each other for clustering and ingress on port 5984
networkPolicy:
enabled: true
# Use a service account
serviceAccount:
enabled: true
create: true
# name:
# imagePullSecrets:
# - name: myimagepullsecret
## The storage volume used by each Pod in the StatefulSet. If a
## persistentVolume is not enabled, the Pods will use `emptyDir` ephemeral
## local storage. Setting the storageClass attribute to "-" disables dynamic
## provisioning of Persistent Volumes; leaving it unset will invoke the default
## provisioner.
persistentVolume:
enabled: false
accessModes:
- ReadWriteOnce
size: 10Gi
storageClass: ""
## The CouchDB image
image: image:
repository: couchdb # @ignore
tag: 3.1.1 repository: budibase/couchdb
pullPolicy: IfNotPresent # @ignore
tag: v3.2.1
## Experimental integration with Lucene-powered fulltext search # @ignore
enableSearch: true
searchImage:
repository: kocolosk/couchdb-search
tag: 0.2.0
pullPolicy: IfNotPresent
initImage:
repository: busybox
tag: latest
pullPolicy: Always pullPolicy: Always
## CouchDB is happy to spin up cluster nodes in parallel, but if you encounter # @ignore
## problems you can try setting podManagementPolicy to the StatefulSet default # This should remain false. We ship Clouseau ourselves as part of the
## `OrderedReady` # budibase/couchdb image, and it's not possible to disable it because it's a
podManagementPolicy: Parallel # core part of the Budibase experience.
enableSearch: false
## Optional pod annotations
annotations: {}
## Optional tolerations
tolerations: []
affinity: {}
service:
# annotations:
enabled: true
type: ClusterIP
externalPort: 5984
## An Ingress resource can provide name-based virtual hosting and TLS
## termination among other things for CouchDB deployments which are accessed
## from outside the Kubernetes cluster.
## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/
ingress:
enabled: false
hosts:
- chart-example.local
path: /
annotations:
[]
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
tls:
# Secrets must be manually created in the namespace.
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
## Optional resource requests and limits for the CouchDB container
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
resources:
{}
# requests:
# cpu: 100m
# memory: 128Mi
# limits:
# cpu: 56
# memory: 256Gi
## erlangFlags is a map that is passed to the Erlang VM as flags using the
## ERL_FLAGS env. `name` and `setcookie` flags are minimally required to
## establish connectivity between cluster nodes.
## ref: http://erlang.org/doc/man/erl.html#init_flags
erlangFlags:
name: couchdb
setcookie: monster
## couchdbConfig will override default CouchDB configuration settings.
## The contents of this map are reformatted into a .ini file laid down
## by a ConfigMap object.
## ref: http://docs.couchdb.org/en/latest/config/index.html
couchdbConfig: couchdbConfig:
couchdb: couchdb:
uuid: budibase-couchdb # REQUIRED: Unique identifier for this CouchDB server instance # -- Unique identifier for this CouchDB server instance. You shouldn't need
# cluster: # to change this.
# q: 8 # Create 8 shards for each database uuid: budibase-couchdb
chttpd:
bind_address: any
# chttpd.require_valid_user disables all the anonymous requests to the port
# 5984 when is set to true.
require_valid_user: false
# Kubernetes local cluster domain.
# This is used to generate FQDNs for peers when joining the CouchDB cluster.
dns:
clusterDomainSuffix: cluster.local
## Configure liveness and readiness probe values
## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
# FOR COUCHDB
livenessProbe:
failureThreshold: 3
initialDelaySeconds: 0
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
readinessProbe:
failureThreshold: 3
initialDelaySeconds: 0
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1

View File

@ -84,13 +84,13 @@ Component libraries are collections of components as well as the definition of t
- If the project diverges from your branch, please rebase instead of merging. This makes the commit graph easier to read. - If the project diverges from your branch, please rebase instead of merging. This makes the commit graph easier to read.
- Once your work is completed, please raise a PR against the `develop` branch with some information about what has changed and why. - Once your work is completed, please raise a PR against the `master` branch with some information about what has changed and why.
### Getting Started For Contributors ### Getting Started For Contributors
#### 1. Prerequisites #### 1. Prerequisites
- NodeJS version `18.x.x` - NodeJS version `20.x.x`
- Python version `3.x` - Python version `3.x`
### Using asdf (recommended) ### Using asdf (recommended)
@ -246,7 +246,7 @@ From here - to develop a change in pro, you can follow the below flow:
cd packages/pro cd packages/pro
# get the base branch you are working from (same as monorepo) # get the base branch you are working from (same as monorepo)
git fetch git fetch
git checkout <develop | master> git checkout master
# create a branch, named the same as the branch in your monorepo # create a branch, named the same as the branch in your monorepo
git checkout -b <some branch> git checkout -b <some branch>
... make changes ... make changes

View File

@ -0,0 +1,21 @@
module.exports = {
"no-budibase-imports": {
create: function (context) {
return {
ImportDeclaration(node) {
const importPath = node.source.value
if (
/^@budibase\/[^/]+\/.*$/.test(importPath) &&
importPath !== "@budibase/backend-core/tests"
) {
context.report({
node,
message: `Importing from @budibase is not allowed, except for @budibase/backend-core/tests.`,
})
}
},
}
},
},
}

View File

@ -22,6 +22,6 @@
"@types/react": "17.0.39", "@types/react": "17.0.39",
"eslint": "8.10.0", "eslint": "8.10.0",
"eslint-config-next": "12.1.0", "eslint-config-next": "12.1.0",
"typescript": "4.6.2" "typescript": "5.2.2"
} }
} }

View File

@ -29,7 +29,6 @@ WORKDIR /opt/couchdb
ADD couch/vm.args couch/local.ini ./etc/ ADD couch/vm.args couch/local.ini ./etc/
WORKDIR / WORKDIR /
ADD build-target-paths.sh .
ADD runner.sh ./bbcouch-runner.sh ADD runner.sh ./bbcouch-runner.sh
RUN chmod +x ./bbcouch-runner.sh /opt/clouseau/bin/clouseau ./build-target-paths.sh RUN chmod +x ./bbcouch-runner.sh /opt/clouseau/bin/clouseau
CMD ["./bbcouch-runner.sh"] CMD ["./bbcouch-runner.sh"]

View File

@ -1,24 +0,0 @@
#!/bin/bash
echo ${TARGETBUILD} > /buildtarget.txt
if [[ "${TARGETBUILD}" = "aas" ]]; then
# Azure AppService uses /home for persisent data & SSH on port 2222
DATA_DIR=/home
WEBSITES_ENABLE_APP_SERVICE_STORAGE=true
mkdir -p $DATA_DIR/{search,minio,couch}
mkdir -p $DATA_DIR/couch/{dbs,views}
chown -R couchdb:couchdb $DATA_DIR/couch/
apt update
apt-get install -y openssh-server
echo "root:Docker!" | chpasswd
mkdir -p /tmp
chmod +x /tmp/ssh_setup.sh \
&& (sleep 1;/tmp/ssh_setup.sh 2>&1 > /dev/null)
cp /etc/sshd_config /etc/ssh/sshd_config
/etc/init.d/ssh restart
sed -i "s#DATA_DIR#/home#g" /opt/clouseau/clouseau.ini
sed -i "s#DATA_DIR#/home#g" /opt/couchdb/etc/local.ini
else
sed -i "s#DATA_DIR#/data#g" /opt/clouseau/clouseau.ini
sed -i "s#DATA_DIR#/data#g" /opt/couchdb/etc/local.ini
fi

View File

@ -1,14 +1,81 @@
#!/bin/bash #!/bin/bash
DATA_DIR=${DATA_DIR:-/data} DATA_DIR=${DATA_DIR:-/data}
mkdir -p ${DATA_DIR} mkdir -p ${DATA_DIR}
mkdir -p ${DATA_DIR}/couch/{dbs,views} mkdir -p ${DATA_DIR}/couch/{dbs,views}
mkdir -p ${DATA_DIR}/search mkdir -p ${DATA_DIR}/search
chown -R couchdb:couchdb ${DATA_DIR}/couch chown -R couchdb:couchdb ${DATA_DIR}/couch
/build-target-paths.sh
echo ${TARGETBUILD} > /buildtarget.txt
if [[ "${TARGETBUILD}" = "aas" ]]; then
# Azure AppService uses /home for persistent data & SSH on port 2222
DATA_DIR="${DATA_DIR:-/home}"
WEBSITES_ENABLE_APP_SERVICE_STORAGE=true
mkdir -p $DATA_DIR/{search,minio,couch}
mkdir -p $DATA_DIR/couch/{dbs,views}
chown -R couchdb:couchdb $DATA_DIR/couch/
apt update
apt-get install -y openssh-server
echo "root:Docker!" | chpasswd
mkdir -p /tmp
chmod +x /tmp/ssh_setup.sh \
&& (sleep 1;/tmp/ssh_setup.sh 2>&1 > /dev/null)
cp /etc/sshd_config /etc/ssh/sshd_config
/etc/init.d/ssh restart
sed -i "s#DATA_DIR#/home#g" /opt/clouseau/clouseau.ini
sed -i "s#DATA_DIR#/home#g" /opt/couchdb/etc/local.ini
elif [[ "${TARGETBUILD}" = "single" ]]; then
# In the single image build, the Dockerfile specifies /data as a volume
# mount, so we use that for all persistent data.
sed -i "s#DATA_DIR#/data#g" /opt/clouseau/clouseau.ini
sed -i "s#DATA_DIR#/data#g" /opt/couchdb/etc/local.ini
elif [[ "${TARGETBUILD}" = "docker-compose" ]]; then
# We remove the database_dir and view_index_dir settings from the local.ini
# in docker-compose because it will default to /opt/couchdb/data which is what
# our docker-compose was using prior to us switching to using our own CouchDB
# image.
sed -i "s#^database_dir.*\$##g" /opt/couchdb/etc/local.ini
sed -i "s#^view_index_dir.*\$##g" /opt/couchdb/etc/local.ini
sed -i "s#^dir=.*\$#dir=/opt/couchdb/data#g" /opt/clouseau/clouseau.ini
elif [[ -n $KUBERNETES_SERVICE_HOST ]]; then
# In Kubernetes the directory /opt/couchdb/data has a persistent volume
# mount for storing database data.
sed -i "s#^dir=.*\$#dir=/opt/couchdb/data#g" /opt/clouseau/clouseau.ini
# We remove the database_dir and view_index_dir settings from the local.ini
# in Kubernetes because it will default to /opt/couchdb/data which is what
# our Helm chart was using prior to us switching to using our own CouchDB
# image.
sed -i "s#^database_dir.*\$##g" /opt/couchdb/etc/local.ini
sed -i "s#^view_index_dir.*\$##g" /opt/couchdb/etc/local.ini
# We remove the -name setting from the vm.args file in Kubernetes because
# it will default to the pod FQDN, which is what's required for clustering
# to work.
sed -i "s/^-name .*$//g" /opt/couchdb/etc/vm.args
else
# For all other builds, we use /data for persistent data.
sed -i "s#DATA_DIR#/data#g" /opt/clouseau/clouseau.ini
sed -i "s#DATA_DIR#/data#g" /opt/couchdb/etc/local.ini
fi
# Start Clouseau. Budibase won't function correctly without Clouseau running, it
# powers the search API endpoints which are used to do all sorts, including
# populating app grids.
/opt/clouseau/bin/clouseau > /dev/stdout 2>&1 & /opt/clouseau/bin/clouseau > /dev/stdout 2>&1 &
# Start CouchDB.
/docker-entrypoint.sh /opt/couchdb/bin/couchdb & /docker-entrypoint.sh /opt/couchdb/bin/couchdb &
sleep 10
curl -X PUT http://${COUCHDB_USER}:${COUCHDB_PASSWORD}@localhost:5984/_users # Wati for CouchDB to start up.
curl -X PUT http://${COUCHDB_USER}:${COUCHDB_PASSWORD}@localhost:5984/_replicator while [[ $(curl -s -w "%{http_code}\n" http://localhost:5984/_up -o /dev/null) -ne 200 ]]; do
echo 'Waiting for CouchDB to start...';
sleep 5;
done
# CouchDB needs the `_users` and `_replicator` databases to exist before it will
# function correctly, so we create them here.
curl -X PUT -u "${COUCHDB_USER}:${COUCHDB_PASSWORD}" http://localhost:5984/_users
curl -X PUT -u "${COUCHDB_USER}:${COUCHDB_PASSWORD}" http://localhost:5984/_replicator
sleep infinity sleep infinity

View File

@ -6,7 +6,7 @@ services:
app-service: app-service:
build: build:
context: .. context: ..
dockerfile: packages/server/Dockerfile.v2 dockerfile: packages/server/Dockerfile
args: args:
- BUDIBASE_VERSION=0.0.0+dev-docker - BUDIBASE_VERSION=0.0.0+dev-docker
container_name: build-bbapps container_name: build-bbapps
@ -36,7 +36,7 @@ services:
worker-service: worker-service:
build: build:
context: .. context: ..
dockerfile: packages/worker/Dockerfile.v2 dockerfile: packages/worker/Dockerfile
args: args:
- BUDIBASE_VERSION=0.0.0+dev-docker - BUDIBASE_VERSION=0.0.0+dev-docker
container_name: build-bbworker container_name: build-bbworker

View File

@ -26,7 +26,7 @@ services:
BB_ADMIN_USER_EMAIL: ${BB_ADMIN_USER_EMAIL} BB_ADMIN_USER_EMAIL: ${BB_ADMIN_USER_EMAIL}
BB_ADMIN_USER_PASSWORD: ${BB_ADMIN_USER_PASSWORD} BB_ADMIN_USER_PASSWORD: ${BB_ADMIN_USER_PASSWORD}
PLUGINS_DIR: ${PLUGINS_DIR} PLUGINS_DIR: ${PLUGINS_DIR}
OFFLINE_MODE: ${OFFLINE_MODE} OFFLINE_MODE: ${OFFLINE_MODE:-}
depends_on: depends_on:
- worker-service - worker-service
- redis-service - redis-service
@ -53,11 +53,10 @@ services:
INTERNAL_API_KEY: ${INTERNAL_API_KEY} INTERNAL_API_KEY: ${INTERNAL_API_KEY}
REDIS_URL: redis-service:6379 REDIS_URL: redis-service:6379
REDIS_PASSWORD: ${REDIS_PASSWORD} REDIS_PASSWORD: ${REDIS_PASSWORD}
OFFLINE_MODE: ${OFFLINE_MODE} OFFLINE_MODE: ${OFFLINE_MODE:-}
depends_on: depends_on:
- redis-service - redis-service
- minio-service - minio-service
- couch-init
minio-service: minio-service:
restart: unless-stopped restart: unless-stopped
@ -70,7 +69,7 @@ services:
MINIO_BROWSER: "off" MINIO_BROWSER: "off"
command: server /data --console-address ":9001" command: server /data --console-address ":9001"
healthcheck: healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"] test: "timeout 5s bash -c ':> /dev/tcp/127.0.0.1/9000' || exit 1"
interval: 30s interval: 30s
timeout: 20s timeout: 20s
retries: 3 retries: 3
@ -98,30 +97,19 @@ services:
couchdb-service: couchdb-service:
restart: unless-stopped restart: unless-stopped
image: ibmcom/couchdb3 image: budibase/couchdb
pull_policy: always
environment: environment:
- COUCHDB_PASSWORD=${COUCH_DB_PASSWORD} - COUCHDB_PASSWORD=${COUCH_DB_PASSWORD}
- COUCHDB_USER=${COUCH_DB_USER} - COUCHDB_USER=${COUCH_DB_USER}
- TARGETBUILD=docker-compose
volumes: volumes:
- couchdb3_data:/opt/couchdb/data - couchdb3_data:/opt/couchdb/data
couch-init:
image: curlimages/curl
environment:
PUT_CALL: "curl -u ${COUCH_DB_USER}:${COUCH_DB_PASSWORD} -X PUT couchdb-service:5984"
depends_on:
- couchdb-service
command:
[
"sh",
"-c",
"sleep 10 && $${PUT_CALL}/_users && $${PUT_CALL}/_replicator; fg;",
]
redis-service: redis-service:
restart: unless-stopped restart: unless-stopped
image: redis image: redis
command: redis-server --requirepass ${REDIS_PASSWORD} command: redis-server --requirepass "${REDIS_PASSWORD}"
volumes: volumes:
- redis_data:/data - redis_data:/data

View File

@ -42,7 +42,7 @@ http {
server { server {
listen 10000 default_server; listen 10000 default_server;
server_name _; server_name _;
client_max_body_size 1000m; client_max_body_size 50000m;
ignore_invalid_headers off; ignore_invalid_headers off;
proxy_buffering off; proxy_buffering off;

View File

@ -249,4 +249,31 @@ http {
gzip_comp_level 6; gzip_comp_level 6;
gzip_types text/plain text/css text/xml application/json application/javascript application/rss+xml application/atom+xml image/svg+xml; gzip_types text/plain text/css text/xml application/json application/javascript application/rss+xml application/atom+xml image/svg+xml;
} }
# From https://docs.datadoghq.com/integrations/nginx/?tab=kubernetes
server {
listen 81;
server_name localhost;
access_log off;
allow 127.0.0.1;
allow 10.0.0.0/8;
deny all;
location /nginx_status {
# Choose your status module
# freely available with open source NGINX
stub_status;
# for open source NGINX < version 1.7.5
# stub_status on;
# available only with NGINX Plus
# status;
# ensures the version information can be retrieved
server_tokens on;
}
}
} }

View File

@ -1,24 +0,0 @@
#!/bin/bash
echo ${TARGETBUILD} > /buildtarget.txt
if [[ "${TARGETBUILD}" = "aas" ]]; then
# Azure AppService uses /home for persisent data & SSH on port 2222
DATA_DIR=/home
WEBSITES_ENABLE_APP_SERVICE_STORAGE=true
mkdir -p $DATA_DIR/{search,minio,couch}
mkdir -p $DATA_DIR/couch/{dbs,views}
chown -R couchdb:couchdb $DATA_DIR/couch/
apt update
apt-get install -y openssh-server
echo "root:Docker!" | chpasswd
mkdir -p /tmp
chmod +x /tmp/ssh_setup.sh \
&& (sleep 1;/tmp/ssh_setup.sh 2>&1 > /dev/null)
cp /etc/sshd_config /etc/ssh/sshd_config
/etc/init.d/ssh restart
sed -i "s#DATA_DIR#/home#g" /opt/clouseau/clouseau.ini
sed -i "s#DATA_DIR#/home#g" /opt/couchdb/etc/local.ini
else
sed -i "s#DATA_DIR#/data#g" /opt/clouseau/clouseau.ini
sed -i "s#DATA_DIR#/data#g" /opt/couchdb/etc/local.ini
fi

View File

@ -1,44 +1,59 @@
FROM node:18-slim as build FROM node:20-slim as build
# install node-gyp dependencies # install node-gyp dependencies
RUN apt-get update && apt-get upgrade -y && apt-get install -y --no-install-recommends apt-utils cron g++ make python3 RUN apt-get update && apt-get install -y --no-install-recommends g++ make python3 jq
# add pin script
WORKDIR /
ADD scripts/cleanup.sh ./
RUN chmod +x /cleanup.sh
# build server # copy and install dependencies
WORKDIR /app WORKDIR /app
ADD packages/server . COPY package.json .
COPY yarn.lock . COPY yarn.lock .
RUN yarn install --production=true --network-timeout 1000000 COPY lerna.json .
RUN /cleanup.sh COPY .yarnrc .
# build worker COPY packages/server/package.json packages/server/package.json
WORKDIR /worker COPY packages/worker/package.json packages/worker/package.json
ADD packages/worker . # string-templates does not get bundled during the esbuild process, so we want to use the local version
COPY yarn.lock . COPY packages/string-templates/package.json packages/string-templates/package.json
RUN yarn install --production=true --network-timeout 1000000
RUN /cleanup.sh
FROM budibase/couchdb
COPY scripts/removeWorkspaceDependencies.sh scripts/removeWorkspaceDependencies.sh
RUN chmod +x ./scripts/removeWorkspaceDependencies.sh
RUN ./scripts/removeWorkspaceDependencies.sh packages/server/package.json
RUN ./scripts/removeWorkspaceDependencies.sh packages/worker/package.json
# We will never want to sync pro, but the script is still required
RUN echo '' > scripts/syncProPackage.js
RUN jq 'del(.scripts.postinstall)' package.json > temp.json && mv temp.json package.json
RUN ./scripts/removeWorkspaceDependencies.sh package.json
RUN --mount=type=cache,target=/root/.yarn YARN_CACHE_FOLDER=/root/.yarn yarn install --production
# copy the actual code
COPY packages/server/dist packages/server/dist
COPY packages/server/pm2.config.js packages/server/pm2.config.js
COPY packages/server/client packages/server/client
COPY packages/server/builder packages/server/builder
COPY packages/worker/dist packages/worker/dist
COPY packages/worker/pm2.config.js packages/worker/pm2.config.js
COPY packages/string-templates packages/string-templates
FROM budibase/couchdb as runner
ARG TARGETARCH ARG TARGETARCH
ENV TARGETARCH $TARGETARCH ENV TARGETARCH $TARGETARCH
ENV NODE_MAJOR 20
#TARGETBUILD can be set to single (for single docker image) or aas (for azure app service) #TARGETBUILD can be set to single (for single docker image) or aas (for azure app service)
# e.g. docker build --build-arg TARGETBUILD=aas .... # e.g. docker build --build-arg TARGETBUILD=aas ....
ARG TARGETBUILD=single ARG TARGETBUILD=single
ENV TARGETBUILD $TARGETBUILD ENV TARGETBUILD $TARGETBUILD
COPY --from=build /app /app
COPY --from=build /worker /worker
# install base dependencies # install base dependencies
RUN apt-get update && \ RUN apt-get update && \
apt-get install -y --no-install-recommends software-properties-common nginx uuid-runtime redis-server apt-get install -y --no-install-recommends software-properties-common nginx uuid-runtime redis-server libaio1
# Install postgres client for pg_dump utils # Install postgres client for pg_dump utils
RUN apt install software-properties-common apt-transport-https gpg -y \ RUN apt install -y software-properties-common apt-transport-https ca-certificates gnupg \
&& curl -fsSl https://www.postgresql.org/media/keys/ACCC4CF8.asc | gpg --dearmor | tee /usr/share/keyrings/postgresql.gpg > /dev/null \ && curl -fsSl https://www.postgresql.org/media/keys/ACCC4CF8.asc | gpg --dearmor | tee /usr/share/keyrings/postgresql.gpg > /dev/null \
&& echo deb [arch=amd64,arm64,ppc64el signed-by=/usr/share/keyrings/postgresql.gpg] http://apt.postgresql.org/pub/repos/apt/ $(lsb_release -cs)-pgdg main | tee /etc/apt/sources.list.d/postgresql.list \ && echo deb [arch=amd64,arm64,ppc64el signed-by=/usr/share/keyrings/postgresql.gpg] http://apt.postgresql.org/pub/repos/apt/ $(lsb_release -cs)-pgdg main | tee /etc/apt/sources.list.d/postgresql.list \
&& apt update -y \ && apt update -y \
@ -47,14 +62,12 @@ RUN apt install software-properties-common apt-transport-https gpg -y \
# install other dependencies, nodejs, oracle requirements, jdk8, redis, nginx # install other dependencies, nodejs, oracle requirements, jdk8, redis, nginx
WORKDIR /nodejs WORKDIR /nodejs
RUN curl -sL https://deb.nodesource.com/setup_16.x -o /tmp/nodesource_setup.sh && \ COPY scripts/install-node.sh ./install.sh
bash /tmp/nodesource_setup.sh && \ RUN chmod +x install.sh && ./install.sh
apt-get install -y --no-install-recommends libaio1 nodejs && \
npm install --global yarn pm2
# setup nginx # setup nginx
ADD hosting/single/nginx/nginx.conf /etc/nginx COPY hosting/single/nginx/nginx.conf /etc/nginx
ADD hosting/single/nginx/nginx-default-site.conf /etc/nginx/sites-enabled/default COPY hosting/single/nginx/nginx-default-site.conf /etc/nginx/sites-enabled/default
RUN mkdir -p /var/log/nginx && \ RUN mkdir -p /var/log/nginx && \
touch /var/log/nginx/error.log && \ touch /var/log/nginx/error.log && \
touch /var/run/nginx.pid && \ touch /var/run/nginx.pid && \
@ -62,29 +75,39 @@ RUN mkdir -p /var/log/nginx && \
WORKDIR / WORKDIR /
RUN mkdir -p scripts/integrations/oracle RUN mkdir -p scripts/integrations/oracle
ADD packages/server/scripts/integrations/oracle scripts/integrations/oracle COPY packages/server/scripts/integrations/oracle scripts/integrations/oracle
RUN /bin/bash -e ./scripts/integrations/oracle/instantclient/linux/install.sh RUN /bin/bash -e ./scripts/integrations/oracle/instantclient/linux/install.sh
# setup minio # setup minio
WORKDIR /minio WORKDIR /minio
ADD scripts/install-minio.sh ./install.sh COPY scripts/install-minio.sh ./install.sh
RUN chmod +x install.sh && ./install.sh RUN chmod +x install.sh && ./install.sh
# setup runner file # setup runner file
WORKDIR / WORKDIR /
ADD hosting/single/runner.sh . COPY hosting/single/runner.sh .
RUN chmod +x ./runner.sh RUN chmod +x ./runner.sh
ADD hosting/single/healthcheck.sh . COPY hosting/single/healthcheck.sh .
RUN chmod +x ./healthcheck.sh RUN chmod +x ./healthcheck.sh
# Script below sets the path for storing data based on $DATA_DIR # Script below sets the path for storing data based on $DATA_DIR
# For Azure App Service install SSH & point data locations to /home # For Azure App Service install SSH & point data locations to /home
ADD hosting/single/ssh/sshd_config /etc/ COPY hosting/single/ssh/sshd_config /etc/
ADD hosting/single/ssh/ssh_setup.sh /tmp COPY hosting/single/ssh/ssh_setup.sh /tmp
RUN /build-target-paths.sh
# setup letsencrypt certificate
RUN apt-get install -y certbot python3-certbot-nginx
COPY hosting/letsencrypt /app/letsencrypt
RUN chmod +x /app/letsencrypt/certificate-request.sh /app/letsencrypt/certificate-renew.sh
COPY --from=build /app/node_modules /node_modules
COPY --from=build /app/package.json /package.json
COPY --from=build /app/packages/server /app
COPY --from=build /app/packages/worker /worker
COPY --from=build /app/packages/string-templates /string-templates
RUN cd /string-templates && yarn link && cd ../app && yarn link @budibase/string-templates && cd ../worker && yarn link @budibase/string-templates
# cleanup cache
RUN yarn cache clean -f
EXPOSE 80 EXPOSE 80
EXPOSE 443 EXPOSE 443
@ -92,20 +115,10 @@ EXPOSE 443
EXPOSE 2222 EXPOSE 2222
VOLUME /data VOLUME /data
# setup letsencrypt certificate ARG BUDIBASE_VERSION
RUN apt-get install -y certbot python3-certbot-nginx # Ensuring the version argument is sent
ADD hosting/letsencrypt /app/letsencrypt RUN test -n "$BUDIBASE_VERSION"
RUN chmod +x /app/letsencrypt/certificate-request.sh /app/letsencrypt/certificate-renew.sh ENV BUDIBASE_VERSION=$BUDIBASE_VERSION
# Remove cached files
RUN rm -rf \
/root/.cache \
/root/.npm \
/root/.pip \
/usr/local/share/doc \
/usr/share/doc \
/usr/share/man \
/var/lib/apt/lists/* \
/tmp/*
HEALTHCHECK --interval=15s --timeout=15s --start-period=45s CMD "/healthcheck.sh" HEALTHCHECK --interval=15s --timeout=15s --start-period=45s CMD "/healthcheck.sh"

View File

@ -1,131 +0,0 @@
FROM node:18-slim as build
# install node-gyp dependencies
RUN apt-get update && apt-get install -y --no-install-recommends g++ make python3 jq
# copy and install dependencies
WORKDIR /app
COPY package.json .
COPY yarn.lock .
COPY lerna.json .
COPY .yarnrc .
COPY packages/server/package.json packages/server/package.json
COPY packages/worker/package.json packages/worker/package.json
# string-templates does not get bundled during the esbuild process, so we want to use the local version
COPY packages/string-templates/package.json packages/string-templates/package.json
COPY scripts/removeWorkspaceDependencies.sh scripts/removeWorkspaceDependencies.sh
RUN chmod +x ./scripts/removeWorkspaceDependencies.sh
RUN ./scripts/removeWorkspaceDependencies.sh packages/server/package.json
RUN ./scripts/removeWorkspaceDependencies.sh packages/worker/package.json
# We will never want to sync pro, but the script is still required
RUN echo '' > scripts/syncProPackage.js
RUN jq 'del(.scripts.postinstall)' package.json > temp.json && mv temp.json package.json
RUN ./scripts/removeWorkspaceDependencies.sh package.json
RUN --mount=type=cache,target=/root/.yarn YARN_CACHE_FOLDER=/root/.yarn yarn install --production
# copy the actual code
COPY packages/server/dist packages/server/dist
COPY packages/server/pm2.config.js packages/server/pm2.config.js
COPY packages/server/client packages/server/client
COPY packages/server/builder packages/server/builder
COPY packages/worker/dist packages/worker/dist
COPY packages/worker/pm2.config.js packages/worker/pm2.config.js
COPY packages/string-templates packages/string-templates
FROM budibase/couchdb as runner
ARG TARGETARCH
ENV TARGETARCH $TARGETARCH
ENV NODE_MAJOR 18
#TARGETBUILD can be set to single (for single docker image) or aas (for azure app service)
# e.g. docker build --build-arg TARGETBUILD=aas ....
ARG TARGETBUILD=single
ENV TARGETBUILD $TARGETBUILD
# install base dependencies
RUN apt-get update && \
apt-get install -y --no-install-recommends software-properties-common nginx uuid-runtime redis-server libaio1
# Install postgres client for pg_dump utils
RUN apt install -y software-properties-common apt-transport-https ca-certificates gnupg \
&& curl -fsSl https://www.postgresql.org/media/keys/ACCC4CF8.asc | gpg --dearmor | tee /usr/share/keyrings/postgresql.gpg > /dev/null \
&& echo deb [arch=amd64,arm64,ppc64el signed-by=/usr/share/keyrings/postgresql.gpg] http://apt.postgresql.org/pub/repos/apt/ $(lsb_release -cs)-pgdg main | tee /etc/apt/sources.list.d/postgresql.list \
&& apt update -y \
&& apt install postgresql-client-15 -y \
&& apt remove software-properties-common apt-transport-https gpg -y
# install other dependencies, nodejs, oracle requirements, jdk8, redis, nginx
WORKDIR /nodejs
COPY scripts/install-node.sh ./install.sh
RUN chmod +x install.sh && ./install.sh
# setup nginx
COPY hosting/single/nginx/nginx.conf /etc/nginx
COPY hosting/single/nginx/nginx-default-site.conf /etc/nginx/sites-enabled/default
RUN mkdir -p /var/log/nginx && \
touch /var/log/nginx/error.log && \
touch /var/run/nginx.pid && \
usermod -a -G tty www-data
WORKDIR /
RUN mkdir -p scripts/integrations/oracle
COPY packages/server/scripts/integrations/oracle scripts/integrations/oracle
RUN /bin/bash -e ./scripts/integrations/oracle/instantclient/linux/install.sh
# setup minio
WORKDIR /minio
COPY scripts/install-minio.sh ./install.sh
RUN chmod +x install.sh && ./install.sh
# setup runner file
WORKDIR /
COPY hosting/single/runner.sh .
RUN chmod +x ./runner.sh
COPY hosting/single/healthcheck.sh .
RUN chmod +x ./healthcheck.sh
# Script below sets the path for storing data based on $DATA_DIR
# For Azure App Service install SSH & point data locations to /home
COPY hosting/single/ssh/sshd_config /etc/
COPY hosting/single/ssh/ssh_setup.sh /tmp
RUN /build-target-paths.sh
# setup letsencrypt certificate
RUN apt-get install -y certbot python3-certbot-nginx
COPY hosting/letsencrypt /app/letsencrypt
RUN chmod +x /app/letsencrypt/certificate-request.sh /app/letsencrypt/certificate-renew.sh
COPY --from=build /app/node_modules /node_modules
COPY --from=build /app/package.json /package.json
COPY --from=build /app/packages/server /app
COPY --from=build /app/packages/worker /worker
COPY --from=build /app/packages/string-templates /string-templates
RUN cd /string-templates && yarn link && cd ../app && yarn link @budibase/string-templates && cd ../worker && yarn link @budibase/string-templates
EXPOSE 80
EXPOSE 443
# Expose port 2222 for SSH on Azure App Service build
EXPOSE 2222
VOLUME /data
ARG BUDIBASE_VERSION
# Ensuring the version argument is sent
RUN test -n "$BUDIBASE_VERSION"
ENV BUDIBASE_VERSION=$BUDIBASE_VERSION
HEALTHCHECK --interval=15s --timeout=15s --start-period=45s CMD "/healthcheck.sh"
# must set this just before running
ENV NODE_ENV=production
WORKDIR /
CMD ["./runner.sh"]

View File

@ -25,7 +25,7 @@ if [[ $(curl -s -w "%{http_code}\n" http://localhost:4002/health -o /dev/null) -
healthy=false healthy=false
fi fi
if [[ $(curl -s -w "%{http_code}\n" http://localhost:5984/ -o /dev/null) -ne 200 ]]; then if [[ $(curl -s -w "%{http_code}\n" http://localhost:5984/_up -o /dev/null) -ne 200 ]]; then
echo 'ERROR: CouchDB is not running'; echo 'ERROR: CouchDB is not running';
healthy=false healthy=false
fi fi

View File

@ -7,7 +7,7 @@ declare -a DOCKER_VARS=("APP_PORT" "APPS_URL" "ARCHITECTURE" "BUDIBASE_ENVIRONME
[[ -z "${BUDIBASE_ENVIRONMENT}" ]] && export BUDIBASE_ENVIRONMENT=PRODUCTION [[ -z "${BUDIBASE_ENVIRONMENT}" ]] && export BUDIBASE_ENVIRONMENT=PRODUCTION
[[ -z "${CLUSTER_PORT}" ]] && export CLUSTER_PORT=80 [[ -z "${CLUSTER_PORT}" ]] && export CLUSTER_PORT=80
[[ -z "${DEPLOYMENT_ENVIRONMENT}" ]] && export DEPLOYMENT_ENVIRONMENT=docker [[ -z "${DEPLOYMENT_ENVIRONMENT}" ]] && export DEPLOYMENT_ENVIRONMENT=docker
[[ -z "${MINIO_URL}" ]] && export MINIO_URL=http://127.0.0.1:9000 [[ -z "${MINIO_URL}" ]] && [[ -z "${USE_S3}" ]] && export MINIO_URL=http://127.0.0.1:9000
[[ -z "${NODE_ENV}" ]] && export NODE_ENV=production [[ -z "${NODE_ENV}" ]] && export NODE_ENV=production
[[ -z "${POSTHOG_TOKEN}" ]] && export POSTHOG_TOKEN=phc_bIjZL7oh2GEUd2vqvTBH8WvrX0fWTFQMs6H5KQxiUxU [[ -z "${POSTHOG_TOKEN}" ]] && export POSTHOG_TOKEN=phc_bIjZL7oh2GEUd2vqvTBH8WvrX0fWTFQMs6H5KQxiUxU
[[ -z "${TENANT_FEATURE_FLAGS}" ]] && export TENANT_FEATURE_FLAGS="*:LICENSING,*:USER_GROUPS,*:ONBOARDING_TOUR" [[ -z "${TENANT_FEATURE_FLAGS}" ]] && export TENANT_FEATURE_FLAGS="*:LICENSING,*:USER_GROUPS,*:ONBOARDING_TOUR"
@ -22,11 +22,11 @@ declare -a DOCKER_VARS=("APP_PORT" "APPS_URL" "ARCHITECTURE" "BUDIBASE_ENVIRONME
# Azure App Service customisations # Azure App Service customisations
if [[ "${TARGETBUILD}" = "aas" ]]; then if [[ "${TARGETBUILD}" = "aas" ]]; then
DATA_DIR=/home export DATA_DIR="${DATA_DIR:-/home}"
WEBSITES_ENABLE_APP_SERVICE_STORAGE=true WEBSITES_ENABLE_APP_SERVICE_STORAGE=true
/etc/init.d/ssh start /etc/init.d/ssh start
else else
DATA_DIR=${DATA_DIR:-/data} export DATA_DIR=${DATA_DIR:-/data}
fi fi
mkdir -p ${DATA_DIR} mkdir -p ${DATA_DIR}
# Mount NFS or GCP Filestore if env vars exist for it # Mount NFS or GCP Filestore if env vars exist for it
@ -77,7 +77,12 @@ mkdir -p ${DATA_DIR}/minio
chown -R couchdb:couchdb ${DATA_DIR}/couch chown -R couchdb:couchdb ${DATA_DIR}/couch
redis-server --requirepass $REDIS_PASSWORD > /dev/stdout 2>&1 & redis-server --requirepass $REDIS_PASSWORD > /dev/stdout 2>&1 &
/bbcouch-runner.sh & /bbcouch-runner.sh &
/minio/minio server --console-address ":9001" ${DATA_DIR}/minio > /dev/stdout 2>&1 &
# only start minio if use s3 isn't passed
if [[ -z "${USE_S3}" ]]; then
/minio/minio server --console-address ":9001" ${DATA_DIR}/minio > /dev/stdout 2>&1 &
fi
/etc/init.d/nginx restart /etc/init.d/nginx restart
if [[ ! -z "${CUSTOM_DOMAIN}" ]]; then if [[ ! -z "${CUSTOM_DOMAIN}" ]]; then
# Add monthly cron job to renew certbot certificate # Add monthly cron job to renew certbot certificate

View File

@ -1,10 +1,13 @@
{ {
"version": "2.13.3", "version": "2.14.7",
"npmClient": "yarn", "npmClient": "yarn",
"packages": [ "packages": [
"packages/*" "packages/*",
"!packages/account-portal",
"packages/account-portal/packages/*"
], ],
"useNx": true, "useNx": true,
"concurrency": 20,
"command": { "command": {
"publish": { "publish": {
"ignoreChanges": [ "ignoreChanges": [

View File

@ -2,27 +2,30 @@
"name": "root", "name": "root",
"private": true, "private": true,
"devDependencies": { "devDependencies": {
"@babel/core": "^7.22.5",
"@babel/eslint-parser": "^7.22.5",
"@babel/preset-env": "^7.22.5",
"@esbuild-plugins/tsconfig-paths": "^0.1.2", "@esbuild-plugins/tsconfig-paths": "^0.1.2",
"@typescript-eslint/parser": "6.7.2", "@types/node": "20.10.0",
"@typescript-eslint/parser": "6.9.0",
"esbuild": "^0.18.17", "esbuild": "^0.18.17",
"esbuild-node-externals": "^1.8.0", "esbuild-node-externals": "^1.8.0",
"eslint": "^8.44.0", "eslint": "^8.52.0",
"eslint-plugin-import": "^2.29.0",
"eslint-plugin-local-rules": "^2.0.0",
"eslint-plugin-svelte": "^2.34.0",
"husky": "^8.0.3", "husky": "^8.0.3",
"kill-port": "^1.6.1", "kill-port": "^1.6.1",
"lerna": "7.1.1", "lerna": "7.1.1",
"madge": "^6.0.0", "madge": "^6.0.0",
"minimist": "^1.2.8",
"nx": "16.4.3", "nx": "16.4.3",
"nx-cloud": "16.0.5", "nx-cloud": "16.0.5",
"prettier": "2.8.8", "prettier": "2.8.8",
"prettier-plugin-svelte": "^2.3.0", "prettier-plugin-svelte": "^2.3.0",
"svelte": "3.49.0", "svelte": "3.49.0",
"svelte-eslint-parser": "^0.33.1",
"typescript": "5.2.2", "typescript": "5.2.2",
"@babel/core": "^7.22.5", "yargs": "^17.7.2"
"@babel/eslint-parser": "^7.22.5",
"@babel/preset-env": "^7.22.5",
"eslint-plugin-svelte": "^2.32.2",
"svelte-eslint-parser": "^0.32.0"
}, },
"scripts": { "scripts": {
"preinstall": "node scripts/syncProPackage.js", "preinstall": "node scripts/syncProPackage.js",
@ -37,13 +40,16 @@
"nuke": "yarn run nuke:packages && yarn run nuke:docker", "nuke": "yarn run nuke:packages && yarn run nuke:docker",
"nuke:packages": "yarn run restore", "nuke:packages": "yarn run restore",
"nuke:docker": "lerna run --stream dev:stack:nuke", "nuke:docker": "lerna run --stream dev:stack:nuke",
"clean": "lerna clean -y", "clean": "lerna clean -y && echo Cleaning top level node modules 🧹 && rm -rf ./node_modules && echo Done! 🚀",
"kill-builder": "kill-port 3000", "kill-builder": "kill-port 3000",
"kill-server": "kill-port 4001 4002", "kill-server": "kill-port 4001 4002",
"kill-all": "yarn run kill-builder && yarn run kill-server", "kill-accountportal": "kill-port 3001 4003",
"dev": "yarn run kill-all && lerna run --parallel prebuild && lerna run --stream dev:builder", "kill-all": "yarn run kill-builder && yarn run kill-server && yarn kill-accountportal",
"dev:noserver": "yarn run kill-builder && lerna run --stream dev:stack:up && lerna run --stream dev:builder --ignore @budibase/backend-core --ignore @budibase/server --ignore @budibase/worker", "dev": "yarn run kill-all && lerna run --parallel prebuild && lerna run --stream dev --ignore=@budibase/account-portal-ui --ignore @budibase/account-portal-server",
"dev:server": "yarn run kill-server && lerna run --stream dev:builder --scope @budibase/worker --scope @budibase/server", "dev:noserver": "yarn run kill-builder && lerna run --stream dev:stack:up && lerna run --stream dev --ignore @budibase/backend-core --ignore @budibase/server --ignore @budibase/worker --ignore=@budibase/account-portal-ui --ignore @budibase/account-portal-server",
"dev:server": "yarn run kill-server && lerna run --stream dev --scope @budibase/worker --scope @budibase/server",
"dev:accountportal": "yarn kill-accountportal && lerna run dev --stream --scope @budibase/account-portal-ui --scope @budibase/account-portal-server",
"dev:all": "yarn run kill-all && lerna run --stream dev",
"dev:built": "yarn run kill-all && cd packages/server && yarn dev:stack:up && cd ../../ && lerna run --stream dev:built", "dev:built": "yarn run kill-all && cd packages/server && yarn dev:stack:up && cd ../../ && lerna run --stream dev:built",
"dev:docker": "yarn build --scope @budibase/server --scope @budibase/worker && docker-compose -f hosting/docker-compose.build.yaml -f hosting/docker-compose.dev.yaml --env-file hosting/.env up --build --scale proxy-service=0", "dev:docker": "yarn build --scope @budibase/server --scope @budibase/worker && docker-compose -f hosting/docker-compose.build.yaml -f hosting/docker-compose.dev.yaml --env-file hosting/.env up --build --scale proxy-service=0",
"test": "lerna run --stream test --stream", "test": "lerna run --stream test --stream",
@ -77,11 +83,14 @@
"security:audit": "node scripts/audit.js", "security:audit": "node scripts/audit.js",
"postinstall": "husky install", "postinstall": "husky install",
"submodules:load": "git submodule init && git submodule update && yarn", "submodules:load": "git submodule init && git submodule update && yarn",
"submodules:unload": "git submodule deinit --all && yarn" "submodules:unload": "git submodule deinit --all && yarn",
"add-app-migration": "node scripts/add-app-migration.js --title"
}, },
"workspaces": { "workspaces": {
"packages": [ "packages": [
"packages/*" "packages/*",
"!packages/account-portal",
"packages/account-portal/packages/*"
] ]
}, },
"resolutions": { "resolutions": {
@ -91,7 +100,7 @@
"@budibase/types": "0.0.0" "@budibase/types": "0.0.0"
}, },
"engines": { "engines": {
"node": ">=18.0.0 <19.0.0" "node": ">=20.0.0 <21.0.0"
}, },
"dependencies": {} "dependencies": {}
} }

@ -0,0 +1 @@
Subproject commit b23fb3b17961fb04badd9487913a683fcf26dbe6

View File

@ -21,7 +21,7 @@
"test:watch": "jest --watchAll" "test:watch": "jest --watchAll"
}, },
"dependencies": { "dependencies": {
"@budibase/nano": "10.1.3", "@budibase/nano": "10.1.4",
"@budibase/pouchdb-replication-stream": "1.2.10", "@budibase/pouchdb-replication-stream": "1.2.10",
"@budibase/shared-core": "0.0.0", "@budibase/shared-core": "0.0.0",
"@budibase/types": "0.0.0", "@budibase/types": "0.0.0",
@ -32,6 +32,7 @@
"bcryptjs": "2.4.3", "bcryptjs": "2.4.3",
"bull": "4.10.1", "bull": "4.10.1",
"correlation-id": "4.0.0", "correlation-id": "4.0.0",
"dd-trace": "3.13.2",
"dotenv": "16.0.1", "dotenv": "16.0.1",
"ioredis": "5.3.2", "ioredis": "5.3.2",
"joi": "17.6.0", "joi": "17.6.0",
@ -64,7 +65,6 @@
"@types/cookies": "0.7.8", "@types/cookies": "0.7.8",
"@types/jest": "29.5.5", "@types/jest": "29.5.5",
"@types/lodash": "4.14.200", "@types/lodash": "4.14.200",
"@types/node": "18.17.0",
"@types/node-fetch": "2.6.4", "@types/node-fetch": "2.6.4",
"@types/pouchdb": "6.4.0", "@types/pouchdb": "6.4.0",
"@types/redlock": "4.0.3", "@types/redlock": "4.0.3",
@ -72,9 +72,9 @@
"@types/tar-fs": "2.0.1", "@types/tar-fs": "2.0.1",
"@types/uuid": "8.3.4", "@types/uuid": "8.3.4",
"chance": "1.1.8", "chance": "1.1.8",
"ioredis-mock": "8.7.0", "ioredis-mock": "8.9.0",
"jest": "29.6.2", "jest": "29.7.0",
"jest-environment-node": "29.6.2", "jest-environment-node": "29.7.0",
"jest-serial-runner": "1.2.1", "jest-serial-runner": "1.2.1",
"pino-pretty": "10.0.0", "pino-pretty": "10.0.0",
"pouchdb-adapter-memory": "7.2.2", "pouchdb-adapter-memory": "7.2.2",

View File

@ -1,5 +1,6 @@
const _passport = require("koa-passport") const _passport = require("koa-passport")
const LocalStrategy = require("passport-local").Strategy const LocalStrategy = require("passport-local").Strategy
import { getGlobalDB } from "../context" import { getGlobalDB } from "../context"
import { Cookie } from "../constants" import { Cookie } from "../constants"
import { getSessionsForUser, invalidateSessions } from "../security/sessions" import { getSessionsForUser, invalidateSessions } from "../security/sessions"
@ -18,6 +19,7 @@ import {
GoogleInnerConfig, GoogleInnerConfig,
OIDCInnerConfig, OIDCInnerConfig,
PlatformLogoutOpts, PlatformLogoutOpts,
SessionCookie,
SSOProviderType, SSOProviderType,
} from "@budibase/types" } from "@budibase/types"
import * as events from "../events" import * as events from "../events"
@ -26,6 +28,7 @@ import { clearCookie, getCookie } from "../utils"
import { ssoSaveUserNoOp } from "../middleware/passport/sso/sso" import { ssoSaveUserNoOp } from "../middleware/passport/sso/sso"
const refresh = require("passport-oauth2-refresh") const refresh = require("passport-oauth2-refresh")
export { export {
auditLog, auditLog,
authError, authError,
@ -42,7 +45,6 @@ export const buildAuthMiddleware = authenticated
export const buildTenancyMiddleware = tenancy export const buildTenancyMiddleware = tenancy
export const buildCsrfMiddleware = csrf export const buildCsrfMiddleware = csrf
export const passport = _passport export const passport = _passport
export const jwt = require("jsonwebtoken")
// Strategies // Strategies
_passport.use(new LocalStrategy(local.options, local.authenticate)) _passport.use(new LocalStrategy(local.options, local.authenticate))
@ -189,10 +191,10 @@ export async function platformLogout(opts: PlatformLogoutOpts) {
if (!ctx) throw new Error("Koa context must be supplied to logout.") if (!ctx) throw new Error("Koa context must be supplied to logout.")
const currentSession = getCookie(ctx, Cookie.Auth) const currentSession = getCookie<SessionCookie>(ctx, Cookie.Auth)
let sessions = await getSessionsForUser(userId) let sessions = await getSessionsForUser(userId)
if (keepActiveSession) { if (currentSession && keepActiveSession) {
sessions = sessions.filter( sessions = sessions.filter(
session => session.sessionId !== currentSession.sessionId session => session.sessionId !== currentSession.sessionId
) )

View File

@ -19,7 +19,7 @@ async function populateFromDB(appId: string) {
return doWithDB( return doWithDB(
appId, appId,
(db: Database) => { (db: Database) => {
return db.get(DocumentType.APP_METADATA) return db.get<App>(DocumentType.APP_METADATA)
}, },
{ skip_setup: true } { skip_setup: true }
) )

View File

@ -1,6 +1,6 @@
const BaseCache = require("./base") import BaseCache from "./base"
const GENERIC = new BaseCache.default() const GENERIC = new BaseCache()
export enum CacheKey { export enum CacheKey {
CHECKLIST = "checklist", CHECKLIST = "checklist",
@ -18,13 +18,15 @@ export enum TTL {
ONE_DAY = 86400, ONE_DAY = 86400,
} }
function performExport(funcName: string) { export const keys = (...args: Parameters<typeof GENERIC.keys>) =>
return (...args: any) => GENERIC[funcName](...args) GENERIC.keys(...args)
} export const get = (...args: Parameters<typeof GENERIC.get>) =>
GENERIC.get(...args)
export const keys = performExport("keys") export const store = (...args: Parameters<typeof GENERIC.store>) =>
export const get = performExport("get") GENERIC.store(...args)
export const store = performExport("store") export const destroy = (...args: Parameters<typeof GENERIC.delete>) =>
export const destroy = performExport("delete") GENERIC.delete(...args)
export const withCache = performExport("withCache") export const withCache = (...args: Parameters<typeof GENERIC.withCache>) =>
export const bustCache = performExport("bustCache") GENERIC.withCache(...args)
export const bustCache = (...args: Parameters<typeof GENERIC.bustCache>) =>
GENERIC.bustCache(...args)

View File

@ -2,4 +2,6 @@ export * as generic from "./generic"
export * as user from "./user" export * as user from "./user"
export * as app from "./appMetadata" export * as app from "./appMetadata"
export * as writethrough from "./writethrough" export * as writethrough from "./writethrough"
export * as invite from "./invite"
export * as passwordReset from "./passwordReset"
export * from "./generic" export * from "./generic"

View File

@ -0,0 +1,86 @@
import * as utils from "../utils"
import { Duration, DurationType } from "../utils"
import env from "../environment"
import { getTenantId } from "../context"
import * as redis from "../redis/init"
const TTL_SECONDS = Duration.fromDays(7).toSeconds()
interface Invite {
email: string
info: any
}
interface InviteWithCode extends Invite {
code: string
}
/**
* Given an invite code and invite body, allow the update an existing/valid invite in redis
* @param code The invite code for an invite in redis
* @param value The body of the updated user invitation
*/
export async function updateCode(code: string, value: Invite) {
const client = await redis.getInviteClient()
await client.store(code, value, TTL_SECONDS)
}
/**
* Generates an invitation code and writes it to redis - which can later be checked for user creation.
* @param email the email address which the code is being sent to (for use later).
* @param info Information to be carried along with the invitation.
* @return returns the code that was stored to redis.
*/
export async function createCode(email: string, info: any): Promise<string> {
const code = utils.newid()
const client = await redis.getInviteClient()
await client.store(code, { email, info }, TTL_SECONDS)
return code
}
/**
* Checks that the provided invite code is valid - will return the email address of user that was invited.
* @param code the invite code that was provided as part of the link.
* @return If the code is valid then an email address will be returned.
*/
export async function getCode(code: string): Promise<Invite> {
const client = await redis.getInviteClient()
const value = (await client.get(code)) as Invite | undefined
if (!value) {
throw "Invitation is not valid or has expired, please request a new one."
}
return value
}
export async function deleteCode(code: string) {
const client = await redis.getInviteClient()
await client.delete(code)
}
/**
Get all currently available user invitations for the current tenant.
**/
export async function getInviteCodes(): Promise<InviteWithCode[]> {
const client = await redis.getInviteClient()
const invites: { key: string; value: Invite }[] = await client.scan()
const results: InviteWithCode[] = invites.map(invite => {
return {
...invite.value,
code: invite.key,
}
})
if (!env.MULTI_TENANCY) {
return results
}
const tenantId = getTenantId()
return results.filter(invite => tenantId === invite.info.tenantId)
}
export async function getExistingInvites(
emails: string[]
): Promise<InviteWithCode[]> {
return (await getInviteCodes()).filter(invite =>
emails.includes(invite.email)
)
}

View File

@ -0,0 +1,49 @@
import * as redis from "../redis/init"
import * as utils from "../utils"
import { Duration } from "../utils"
const TTL_SECONDS = Duration.fromHours(1).toSeconds()
interface PasswordReset {
userId: string
info: any
}
/**
* Given a user ID this will store a code (that is returned) for an hour in redis.
* The user can then return this code for resetting their password (through their reset link).
* @param userId the ID of the user which is to be reset.
* @param info Info about the user/the reset process.
* @return returns the code that was stored to redis.
*/
export async function createCode(userId: string, info: any): Promise<string> {
const code = utils.newid()
const client = await redis.getPasswordResetClient()
await client.store(code, { userId, info }, TTL_SECONDS)
return code
}
/**
* Given a reset code this will lookup to redis, check if the code is valid.
* @param code The code provided via the email link.
* @return returns the user ID if it is found
*/
export async function getCode(code: string): Promise<PasswordReset> {
const client = await redis.getPasswordResetClient()
const value = (await client.get(code)) as PasswordReset | undefined
if (!value) {
throw new Error(
"Provided information is not valid, cannot reset password - please try again."
)
}
return value
}
/**
* Given a reset code this will invalidate it.
* @param code The code provided via the email link.
*/
export async function invalidateCode(code: string): Promise<void> {
const client = await redis.getPasswordResetClient()
await client.delete(code)
}

View File

@ -1,15 +1,16 @@
import { DBTestConfiguration } from "../../../tests/extra" import { DBTestConfiguration } from "../../../tests/extra"
import { import { structures } from "../../../tests"
structures,
expectFunctionWasCalledTimesWith,
mocks,
} from "../../../tests"
import { Writethrough } from "../writethrough" import { Writethrough } from "../writethrough"
import { getDB } from "../../db" import { getDB } from "../../db"
import { Document } from "@budibase/types"
import tk from "timekeeper" import tk from "timekeeper"
tk.freeze(Date.now()) tk.freeze(Date.now())
interface ValueDoc extends Document {
value: any
}
const DELAY = 5000 const DELAY = 5000
describe("writethrough", () => { describe("writethrough", () => {
@ -117,7 +118,7 @@ describe("writethrough", () => {
describe("get", () => { describe("get", () => {
it("should be able to retrieve", async () => { it("should be able to retrieve", async () => {
await config.doInTenant(async () => { await config.doInTenant(async () => {
const response = await writethrough.get(docId) const response = await writethrough.get<ValueDoc>(docId)
expect(response.value).toBe(4) expect(response.value).toBe(4)
}) })
}) })

View File

@ -7,7 +7,7 @@ import * as locks from "../redis/redlockImpl"
const DEFAULT_WRITE_RATE_MS = 10000 const DEFAULT_WRITE_RATE_MS = 10000
let CACHE: BaseCache | null = null let CACHE: BaseCache | null = null
interface CacheItem { interface CacheItem<T extends Document> {
doc: any doc: any
lastWrite: number lastWrite: number
} }
@ -24,7 +24,10 @@ function makeCacheKey(db: Database, key: string) {
return db.name + key return db.name + key
} }
function makeCacheItem(doc: any, lastWrite: number | null = null): CacheItem { function makeCacheItem<T extends Document>(
doc: T,
lastWrite: number | null = null
): CacheItem<T> {
return { doc, lastWrite: lastWrite || Date.now() } return { doc, lastWrite: lastWrite || Date.now() }
} }
@ -35,7 +38,7 @@ async function put(
) { ) {
const cache = await getCache() const cache = await getCache()
const key = doc._id const key = doc._id
let cacheItem: CacheItem | undefined let cacheItem: CacheItem<any> | undefined
if (key) { if (key) {
cacheItem = await cache.get(makeCacheKey(db, key)) cacheItem = await cache.get(makeCacheKey(db, key))
} }
@ -53,11 +56,8 @@ async function put(
const writeDb = async (toWrite: any) => { const writeDb = async (toWrite: any) => {
// doc should contain the _id and _rev // doc should contain the _id and _rev
const response = await db.put(toWrite, { force: true }) const response = await db.put(toWrite, { force: true })
output = { output._id = response.id
...doc, output._rev = response.rev
_id: response.id,
_rev: response.rev,
}
} }
try { try {
await writeDb(doc) await writeDb(doc)
@ -84,12 +84,12 @@ async function put(
return { ok: true, id: output._id, rev: output._rev } return { ok: true, id: output._id, rev: output._rev }
} }
async function get(db: Database, id: string): Promise<any> { async function get<T extends Document>(db: Database, id: string): Promise<T> {
const cache = await getCache() const cache = await getCache()
const cacheKey = makeCacheKey(db, id) const cacheKey = makeCacheKey(db, id)
let cacheItem: CacheItem = await cache.get(cacheKey) let cacheItem: CacheItem<T> = await cache.get(cacheKey)
if (!cacheItem) { if (!cacheItem) {
const doc = await db.get(id) const doc = await db.get<T>(id)
cacheItem = makeCacheItem(doc) cacheItem = makeCacheItem(doc)
await cache.store(cacheKey, cacheItem) await cache.store(cacheKey, cacheItem)
} }
@ -123,8 +123,8 @@ export class Writethrough {
return put(this.db, doc, writeRateMs) return put(this.db, doc, writeRateMs)
} }
async get(id: string) { async get<T extends Document>(id: string) {
return get(this.db, id) return get<T>(this.db, id)
} }
async remove(docOrId: any, rev?: any) { async remove(docOrId: any, rev?: any) {

View File

@ -17,7 +17,6 @@ import { DocumentType, SEPARATOR } from "../constants"
import { CacheKey, TTL, withCache } from "../cache" import { CacheKey, TTL, withCache } from "../cache"
import * as context from "../context" import * as context from "../context"
import env from "../environment" import env from "../environment"
import environment from "../environment"
// UTILS // UTILS
@ -181,10 +180,10 @@ export async function getGoogleDatasourceConfig(): Promise<
} }
export function getDefaultGoogleConfig(): GoogleInnerConfig | undefined { export function getDefaultGoogleConfig(): GoogleInnerConfig | undefined {
if (environment.GOOGLE_CLIENT_ID && environment.GOOGLE_CLIENT_SECRET) { if (env.GOOGLE_CLIENT_ID && env.GOOGLE_CLIENT_SECRET) {
return { return {
clientID: environment.GOOGLE_CLIENT_ID!, clientID: env.GOOGLE_CLIENT_ID!,
clientSecret: environment.GOOGLE_CLIENT_SECRET!, clientSecret: env.GOOGLE_CLIENT_SECRET!,
activated: true, activated: true,
} }
} }

View File

@ -1,4 +1,5 @@
import { prefixed, DocumentType } from "@budibase/types" import { prefixed, DocumentType } from "@budibase/types"
export { export {
SEPARATOR, SEPARATOR,
UNICODE_MAX, UNICODE_MAX,
@ -28,7 +29,7 @@ export enum ViewName {
APP_BACKUP_BY_TRIGGER = "by_trigger", APP_BACKUP_BY_TRIGGER = "by_trigger",
} }
export const DeprecatedViews = { export const DeprecatedViews: Record<string, string[]> = {
[ViewName.USER_BY_EMAIL]: [ [ViewName.USER_BY_EMAIL]: [
// removed due to inaccuracy in view doc filter logic // removed due to inaccuracy in view doc filter logic
"by_email", "by_email",

View File

@ -11,24 +11,7 @@ export enum Cookie {
OIDC_CONFIG = "budibase:oidc:config", OIDC_CONFIG = "budibase:oidc:config",
} }
export enum Header { export { Header } from "@budibase/shared-core"
API_KEY = "x-budibase-api-key",
LICENSE_KEY = "x-budibase-license-key",
API_VER = "x-budibase-api-version",
APP_ID = "x-budibase-app-id",
SESSION_ID = "x-budibase-session-id",
TYPE = "x-budibase-type",
PREVIEW_ROLE = "x-budibase-role",
TENANT_ID = "x-budibase-tenant-id",
VERIFICATION_CODE = "x-budibase-verification-code",
RETURN_VERIFICATION_CODE = "x-budibase-return-verification-code",
RESET_PASSWORD_CODE = "x-budibase-reset-password-code",
RETURN_RESET_PASSWORD_CODE = "x-budibase-return-reset-password-code",
TOKEN = "x-budibase-token",
CSRF_TOKEN = "x-csrf-token",
CORRELATION_ID = "x-budibase-correlation-id",
AUTHORIZATION = "authorization",
}
export enum GlobalRole { export enum GlobalRole {
OWNER = "owner", OWNER = "owner",

View File

@ -4,7 +4,7 @@ import { ContextMap } from "./types"
export default class Context { export default class Context {
static storage = new AsyncLocalStorage<ContextMap>() static storage = new AsyncLocalStorage<ContextMap>()
static run(context: ContextMap, func: any) { static run<T>(context: ContextMap, func: () => T) {
return Context.storage.run(context, () => func()) return Context.storage.run(context, () => func())
} }

View File

@ -98,17 +98,19 @@ function updateContext(updates: ContextMap): ContextMap {
return context return context
} }
async function newContext(updates: ContextMap, task: any) { async function newContext<T>(updates: ContextMap, task: () => T) {
guardMigration()
// see if there already is a context setup // see if there already is a context setup
let context: ContextMap = updateContext(updates) let context: ContextMap = updateContext(updates)
return Context.run(context, task) return Context.run(context, task)
} }
export async function doInAutomationContext(params: { export async function doInAutomationContext<T>(params: {
appId: string appId: string
automationId: string automationId: string
task: any task: () => T
}): Promise<any> { }): Promise<T> {
const tenantId = getTenantIDFromAppID(params.appId) const tenantId = getTenantIDFromAppID(params.appId)
return newContext( return newContext(
{ {
@ -132,7 +134,7 @@ export async function doInContext(appId: string, task: any): Promise<any> {
} }
export async function doInTenant<T>( export async function doInTenant<T>(
tenantId: string | null, tenantId: string | undefined,
task: () => T task: () => T
): Promise<T> { ): Promise<T> {
// make sure default always selected in single tenancy // make sure default always selected in single tenancy
@ -144,31 +146,35 @@ export async function doInTenant<T>(
return newContext(updates, task) return newContext(updates, task)
} }
export async function doInAppContext( export async function doInAppContext<T>(
appId: string | null, appId: string,
task: any task: () => T
): Promise<any> { ): Promise<T> {
if (!appId && !env.isTest()) { return _doInAppContext(appId, task)
}
async function _doInAppContext<T>(
appId: string,
task: () => T,
extraContextSettings?: ContextMap
): Promise<T> {
if (!appId) {
throw new Error("appId is required") throw new Error("appId is required")
} }
let updates: ContextMap
if (!appId) {
updates = { appId: "" }
} else {
const tenantId = getTenantIDFromAppID(appId) const tenantId = getTenantIDFromAppID(appId)
updates = { appId } const updates: ContextMap = { appId, ...extraContextSettings }
if (tenantId) { if (tenantId) {
updates.tenantId = tenantId updates.tenantId = tenantId
} }
}
return newContext(updates, task) return newContext(updates, task)
} }
export async function doInIdentityContext( export async function doInIdentityContext<T>(
identity: IdentityContext, identity: IdentityContext,
task: any task: () => T
): Promise<any> { ): Promise<T> {
if (!identity) { if (!identity) {
throw new Error("identity is required") throw new Error("identity is required")
} }
@ -182,6 +188,24 @@ export async function doInIdentityContext(
return newContext(context, task) return newContext(context, task)
} }
function guardMigration() {
const context = Context.get()
if (context?.isMigrating) {
throw new Error(
"The context cannot be changed, a migration is currently running"
)
}
}
export async function doInAppMigrationContext<T>(
appId: string,
task: () => T
): Promise<T> {
return _doInAppContext(appId, task, {
isMigrating: true,
})
}
export function getIdentity(): IdentityContext | undefined { export function getIdentity(): IdentityContext | undefined {
try { try {
const context = Context.get() const context = Context.get()
@ -276,6 +300,9 @@ export function getAuditLogsDB(): Database {
*/ */
export function getAppDB(opts?: any): Database { export function getAppDB(opts?: any): Database {
const appId = getAppId() const appId = getAppId()
if (!appId) {
throw new Error("Unable to retrieve app DB - no app ID.")
}
return getDB(appId, opts) return getDB(appId, opts)
} }
@ -308,3 +335,11 @@ export function isScim(): boolean {
const scimCall = context?.isScim const scimCall = context?.isScim
return !!scimCall return !!scimCall
} }
export function getCurrentContext(): ContextMap | undefined {
try {
return Context.get()
} catch (e) {
return undefined
}
}

View File

@ -1,6 +1,11 @@
import { testEnv } from "../../../tests/extra" import { testEnv } from "../../../tests/extra"
import * as context from "../" import * as context from "../"
import { DEFAULT_TENANT_ID } from "../../constants" import { DEFAULT_TENANT_ID } from "../../constants"
import { structures } from "../../../tests"
import { db } from "../.."
import Context from "../Context"
import { ContextMap } from "../types"
import { IdentityType } from "@budibase/types"
describe("context", () => { describe("context", () => {
describe("doInTenant", () => { describe("doInTenant", () => {
@ -144,4 +149,107 @@ describe("context", () => {
expect(isScim).toBe(false) expect(isScim).toBe(false)
}) })
}) })
describe("doInAppMigrationContext", () => {
it("the context is set correctly", async () => {
const appId = db.generateAppID()
await context.doInAppMigrationContext(appId, () => {
const context = Context.get()
const expected: ContextMap = {
appId,
isMigrating: true,
}
expect(context).toEqual(expected)
})
})
it("the context is set correctly when running in a tenant id", async () => {
const tenantId = structures.tenant.id()
const appId = db.generateAppID(tenantId)
await context.doInAppMigrationContext(appId, () => {
const context = Context.get()
const expected: ContextMap = {
appId,
isMigrating: true,
tenantId,
}
expect(context).toEqual(expected)
})
})
it("the context is not modified outside the delegate", async () => {
const appId = db.generateAppID()
expect(Context.get()).toBeUndefined()
await context.doInAppMigrationContext(appId, () => {
const context = Context.get()
const expected: ContextMap = {
appId,
isMigrating: true,
}
expect(context).toEqual(expected)
})
expect(Context.get()).toBeUndefined()
})
it.each([
[
"doInAppMigrationContext",
() => context.doInAppMigrationContext(db.generateAppID(), () => {}),
],
[
"doInAppContext",
() => context.doInAppContext(db.generateAppID(), () => {}),
],
[
"doInAutomationContext",
() =>
context.doInAutomationContext({
appId: db.generateAppID(),
automationId: structures.generator.guid(),
task: () => {},
}),
],
["doInContext", () => context.doInContext(db.generateAppID(), () => {})],
[
"doInEnvironmentContext",
() => context.doInEnvironmentContext({}, () => {}),
],
[
"doInIdentityContext",
() =>
context.doInIdentityContext(
{
account: undefined,
type: IdentityType.USER,
_id: structures.users.user()._id!,
},
() => {}
),
],
["doInScimContext", () => context.doInScimContext(() => {})],
[
"doInTenant",
() => context.doInTenant(structures.tenant.id(), () => {}),
],
])(
"a nested context.%s function cannot run",
async (_, otherContextCall: () => Promise<void>) => {
await expect(
context.doInAppMigrationContext(db.generateAppID(), async () => {
await otherContextCall()
})
).rejects.toThrowError(
"The context cannot be changed, a migration is currently running"
)
}
)
})
}) })

View File

@ -1,4 +1,5 @@
import { IdentityContext } from "@budibase/types" import { IdentityContext } from "@budibase/types"
import { ExecutionTimeTracker } from "../timers"
// keep this out of Budibase types, don't want to expose context info // keep this out of Budibase types, don't want to expose context info
export type ContextMap = { export type ContextMap = {
@ -8,4 +9,6 @@ export type ContextMap = {
environmentVariables?: Record<string, string> environmentVariables?: Record<string, string>
isScim?: boolean isScim?: boolean
automationId?: string automationId?: string
isMigrating?: boolean
jsExecutionTracker?: ExecutionTimeTracker
} }

View File

@ -10,12 +10,16 @@ import {
DatabaseDeleteIndexOpts, DatabaseDeleteIndexOpts,
Document, Document,
isDocument, isDocument,
RowResponse,
} from "@budibase/types" } from "@budibase/types"
import { getCouchInfo } from "./connections" import { getCouchInfo } from "./connections"
import { directCouchUrlCall } from "./utils" import { directCouchUrlCall } from "./utils"
import { getPouchDB } from "./pouchDB" import { getPouchDB } from "./pouchDB"
import { WriteStream, ReadStream } from "fs" import { WriteStream, ReadStream } from "fs"
import { newid } from "../../docIds/newid" import { newid } from "../../docIds/newid"
import { DDInstrumentedDatabase } from "../instrumentation"
const DATABASE_NOT_FOUND = "Database does not exist."
function buildNano(couchInfo: { url: string; cookie: string }) { function buildNano(couchInfo: { url: string; cookie: string }) {
return Nano({ return Nano({
@ -29,15 +33,15 @@ function buildNano(couchInfo: { url: string; cookie: string }) {
}) })
} }
type DBCall<T> = () => Promise<T>
export function DatabaseWithConnection( export function DatabaseWithConnection(
dbName: string, dbName: string,
connection: string, connection: string,
opts?: DatabaseOpts opts?: DatabaseOpts
) { ) {
if (!connection) { const db = new DatabaseImpl(dbName, opts, connection)
throw new Error("Must provide connection details") return new DDInstrumentedDatabase(db)
}
return new DatabaseImpl(dbName, opts, connection)
} }
export class DatabaseImpl implements Database { export class DatabaseImpl implements Database {
@ -48,10 +52,7 @@ export class DatabaseImpl implements Database {
private readonly couchInfo = getCouchInfo() private readonly couchInfo = getCouchInfo()
constructor(dbName?: string, opts?: DatabaseOpts, connection?: string) { constructor(dbName: string, opts?: DatabaseOpts, connection?: string) {
if (dbName == null) {
throw new Error("Database name cannot be undefined.")
}
this.name = dbName this.name = dbName
this.pouchOpts = opts || {} this.pouchOpts = opts || {}
if (connection) { if (connection) {
@ -81,7 +82,11 @@ export class DatabaseImpl implements Database {
return this.instanceNano || DatabaseImpl.nano return this.instanceNano || DatabaseImpl.nano
} }
async checkSetup() { private getDb() {
return this.nano().db.use(this.name)
}
private async checkAndCreateDb() {
let shouldCreate = !this.pouchOpts?.skip_setup let shouldCreate = !this.pouchOpts?.skip_setup
// check exists in a lightweight fashion // check exists in a lightweight fashion
let exists = await this.exists() let exists = await this.exists()
@ -98,30 +103,68 @@ export class DatabaseImpl implements Database {
} }
} }
} }
return this.nano().db.use(this.name) return this.getDb()
} }
private async updateOutput(fnc: any) { // this function fetches the DB and handles if DB creation is needed
private async performCall<T>(
call: (db: Nano.DocumentScope<any>) => Promise<DBCall<T>> | DBCall<T>
): Promise<any> {
const db = this.getDb()
const fnc = await call(db)
try { try {
return await fnc() return await fnc()
} catch (err: any) { } catch (err: any) {
if (err.statusCode) { if (err.statusCode === 404 && err.reason === DATABASE_NOT_FOUND) {
await this.checkAndCreateDb()
return await this.performCall(call)
} else if (err.statusCode) {
err.status = err.statusCode err.status = err.statusCode
} }
throw err throw err
} }
} }
async get<T>(id?: string): Promise<T | any> { async get<T extends Document>(id?: string): Promise<T> {
const db = await this.checkSetup() return this.performCall(db => {
if (!id) { if (!id) {
throw new Error("Unable to get doc without a valid _id.") throw new Error("Unable to get doc without a valid _id.")
} }
return this.updateOutput(() => db.get(id)) return () => db.get(id)
})
}
async getMultiple<T extends Document>(
ids: string[],
opts?: { allowMissing?: boolean }
): Promise<T[]> {
// get unique
ids = [...new Set(ids)]
const response = await this.allDocs<T>({
keys: ids,
include_docs: true,
})
const rowUnavailable = (row: RowResponse<T>) => {
// row is deleted - key lookup can return this
if (row.doc == null || ("deleted" in row.value && row.value.deleted)) {
return true
}
return row.error === "not_found"
}
const rows = response.rows.filter(row => !rowUnavailable(row))
const someMissing = rows.length !== response.rows.length
// some were filtered out - means some missing
if (!opts?.allowMissing && someMissing) {
const missing = response.rows.filter(row => rowUnavailable(row))
const missingIds = missing.map(row => row.key).join(", ")
throw new Error(`Unable to get documents: ${missingIds}`)
}
return rows.map(row => row.doc!)
} }
async remove(idOrDoc: string | Document, rev?: string) { async remove(idOrDoc: string | Document, rev?: string) {
const db = await this.checkSetup() return this.performCall(db => {
let _id: string let _id: string
let _rev: string let _rev: string
@ -136,7 +179,8 @@ export class DatabaseImpl implements Database {
if (!_id || !_rev) { if (!_id || !_rev) {
throw new Error("Unable to remove doc without a valid _id and _rev.") throw new Error("Unable to remove doc without a valid _id and _rev.")
} }
return this.updateOutput(() => db.destroy(_id, _rev)) return () => db.destroy(_id, _rev)
})
} }
async post(document: AnyDocument, opts?: DatabasePutOpts) { async post(document: AnyDocument, opts?: DatabasePutOpts) {
@ -150,7 +194,7 @@ export class DatabaseImpl implements Database {
if (!document._id) { if (!document._id) {
throw new Error("Cannot store document without _id field.") throw new Error("Cannot store document without _id field.")
} }
const db = await this.checkSetup() return this.performCall(async db => {
if (!document.createdAt) { if (!document.createdAt) {
document.createdAt = new Date().toISOString() document.createdAt = new Date().toISOString()
} }
@ -167,26 +211,32 @@ export class DatabaseImpl implements Database {
} }
} }
} }
return this.updateOutput(() => db.insert(document)) return () => db.insert(document)
})
} }
async bulkDocs(documents: AnyDocument[]) { async bulkDocs(documents: AnyDocument[]) {
const db = await this.checkSetup() return this.performCall(db => {
return this.updateOutput(() => db.bulk({ docs: documents })) return () => db.bulk({ docs: documents })
})
} }
async allDocs<T>(params: DatabaseQueryOpts): Promise<AllDocsResponse<T>> { async allDocs<T extends Document>(
const db = await this.checkSetup() params: DatabaseQueryOpts
return this.updateOutput(() => db.list(params)) ): Promise<AllDocsResponse<T>> {
return this.performCall(db => {
return () => db.list(params)
})
} }
async query<T>( async query<T extends Document>(
viewName: string, viewName: string,
params: DatabaseQueryOpts params: DatabaseQueryOpts
): Promise<AllDocsResponse<T>> { ): Promise<AllDocsResponse<T>> {
const db = await this.checkSetup() return this.performCall(db => {
const [database, view] = viewName.split("/") const [database, view] = viewName.split("/")
return this.updateOutput(() => db.view(database, view, params)) return () => db.view(database, view, params)
})
} }
async destroy() { async destroy() {
@ -203,8 +253,9 @@ export class DatabaseImpl implements Database {
} }
async compact() { async compact() {
const db = await this.checkSetup() return this.performCall(db => {
return this.updateOutput(() => db.compact()) return () => db.compact()
})
} }
// All below functions are in-frequently called, just utilise PouchDB // All below functions are in-frequently called, just utilise PouchDB

View File

@ -1,11 +1,9 @@
import env from "../environment"
import { directCouchQuery, DatabaseImpl } from "./couch" import { directCouchQuery, DatabaseImpl } from "./couch"
import { CouchFindOptions, Database } from "@budibase/types" import { CouchFindOptions, Database, DatabaseOpts } from "@budibase/types"
import { DDInstrumentedDatabase } from "./instrumentation"
const dbList = new Set() export function getDB(dbName: string, opts?: DatabaseOpts): Database {
return new DDInstrumentedDatabase(new DatabaseImpl(dbName, opts))
export function getDB(dbName?: string, opts?: any): Database {
return new DatabaseImpl(dbName, opts)
} }
// we have to use a callback for this so that we can close // we have to use a callback for this so that we can close
@ -14,7 +12,7 @@ export function getDB(dbName?: string, opts?: any): Database {
export async function doWithDB<T>( export async function doWithDB<T>(
dbName: string, dbName: string,
cb: (db: Database) => Promise<T>, cb: (db: Database) => Promise<T>,
opts = {} opts?: DatabaseOpts
) { ) {
const db = getDB(dbName, opts) const db = getDB(dbName, opts)
// need this to be async so that we can correctly close DB after all // need this to be async so that we can correctly close DB after all
@ -22,13 +20,6 @@ export async function doWithDB<T>(
return await cb(db) return await cb(db)
} }
export function allDbs() {
if (!env.isTest()) {
throw new Error("Cannot be used outside test environment.")
}
return [...dbList]
}
export async function directCouchAllDbs(queryString?: string) { export async function directCouchAllDbs(queryString?: string) {
let couchPath = "/_all_dbs" let couchPath = "/_all_dbs"
if (queryString) { if (queryString) {

View File

@ -0,0 +1,149 @@
import {
DocumentScope,
DocumentDestroyResponse,
DocumentInsertResponse,
DocumentBulkResponse,
OkResponse,
} from "@budibase/nano"
import {
AllDocsResponse,
AnyDocument,
Database,
DatabaseDumpOpts,
DatabasePutOpts,
DatabaseQueryOpts,
Document,
} from "@budibase/types"
import tracer from "dd-trace"
import { Writable } from "stream"
export class DDInstrumentedDatabase implements Database {
constructor(private readonly db: Database) {}
get name(): string {
return this.db.name
}
exists(): Promise<boolean> {
return tracer.trace("db.exists", span => {
span?.addTags({ db_name: this.name })
return this.db.exists()
})
}
get<T extends Document>(id?: string | undefined): Promise<T> {
return tracer.trace("db.get", span => {
span?.addTags({ db_name: this.name, doc_id: id })
return this.db.get(id)
})
}
getMultiple<T extends Document>(
ids: string[],
opts?: { allowMissing?: boolean | undefined } | undefined
): Promise<T[]> {
return tracer.trace("db.getMultiple", span => {
span?.addTags({
db_name: this.name,
num_docs: ids.length,
allow_missing: opts?.allowMissing,
})
return this.db.getMultiple(ids, opts)
})
}
remove(
id: string | Document,
rev?: string | undefined
): Promise<DocumentDestroyResponse> {
return tracer.trace("db.remove", span => {
span?.addTags({ db_name: this.name, doc_id: id })
return this.db.remove(id, rev)
})
}
put(
document: AnyDocument,
opts?: DatabasePutOpts | undefined
): Promise<DocumentInsertResponse> {
return tracer.trace("db.put", span => {
span?.addTags({ db_name: this.name, doc_id: document._id })
return this.db.put(document, opts)
})
}
bulkDocs(documents: AnyDocument[]): Promise<DocumentBulkResponse[]> {
return tracer.trace("db.bulkDocs", span => {
span?.addTags({ db_name: this.name, num_docs: documents.length })
return this.db.bulkDocs(documents)
})
}
allDocs<T extends Document>(
params: DatabaseQueryOpts
): Promise<AllDocsResponse<T>> {
return tracer.trace("db.allDocs", span => {
span?.addTags({ db_name: this.name })
return this.db.allDocs(params)
})
}
query<T extends Document>(
viewName: string,
params: DatabaseQueryOpts
): Promise<AllDocsResponse<T>> {
return tracer.trace("db.query", span => {
span?.addTags({ db_name: this.name, view_name: viewName })
return this.db.query(viewName, params)
})
}
destroy(): Promise<void | OkResponse> {
return tracer.trace("db.destroy", span => {
span?.addTags({ db_name: this.name })
return this.db.destroy()
})
}
compact(): Promise<void | OkResponse> {
return tracer.trace("db.compact", span => {
span?.addTags({ db_name: this.name })
return this.db.compact()
})
}
dump(stream: Writable, opts?: DatabaseDumpOpts | undefined): Promise<any> {
return tracer.trace("db.dump", span => {
span?.addTags({ db_name: this.name })
return this.db.dump(stream, opts)
})
}
load(...args: any[]): Promise<any> {
return tracer.trace("db.load", span => {
span?.addTags({ db_name: this.name })
return this.db.load(...args)
})
}
createIndex(...args: any[]): Promise<any> {
return tracer.trace("db.createIndex", span => {
span?.addTags({ db_name: this.name })
return this.db.createIndex(...args)
})
}
deleteIndex(...args: any[]): Promise<any> {
return tracer.trace("db.deleteIndex", span => {
span?.addTags({ db_name: this.name })
return this.db.deleteIndex(...args)
})
}
getIndexes(...args: any[]): Promise<any> {
return tracer.trace("db.getIndexes", span => {
span?.addTags({ db_name: this.name })
return this.db.getIndexes(...args)
})
}
}

View File

@ -5,7 +5,6 @@ const { getDB } = require("../db")
describe("db", () => { describe("db", () => {
describe("getDB", () => { describe("getDB", () => {
it("returns a db", async () => { it("returns a db", async () => {
const dbName = structures.db.id() const dbName = structures.db.id()
const db = getDB(dbName) const db = getDB(dbName)
expect(db).toBeDefined() expect(db).toBeDefined()

View File

@ -6,6 +6,7 @@ import { AppState, DeletedApp, getAppMetadata } from "../cache/appMetadata"
import { isDevApp, isDevAppID, getProdAppID } from "../docIds/conversions" import { isDevApp, isDevAppID, getProdAppID } from "../docIds/conversions"
import { App, Database } from "@budibase/types" import { App, Database } from "@budibase/types"
import { getStartEndKeyURL } from "../docIds" import { getStartEndKeyURL } from "../docIds"
export * from "../docIds" export * from "../docIds"
/** /**

View File

@ -7,12 +7,19 @@ import {
} from "../constants" } from "../constants"
import { getGlobalDB } from "../context" import { getGlobalDB } from "../context"
import { doWithDB } from "./" import { doWithDB } from "./"
import { AllDocsResponse, Database, DatabaseQueryOpts } from "@budibase/types" import {
AllDocsResponse,
Database,
DatabaseQueryOpts,
Document,
DesignDocument,
DBView,
} from "@budibase/types"
import env from "../environment" import env from "../environment"
const DESIGN_DB = "_design/database" const DESIGN_DB = "_design/database"
function DesignDoc() { function DesignDoc(): DesignDocument {
return { return {
_id: DESIGN_DB, _id: DESIGN_DB,
// view collation information, read before writing any complex views: // view collation information, read before writing any complex views:
@ -21,20 +28,14 @@ function DesignDoc() {
} }
} }
interface DesignDocument {
views: any
}
async function removeDeprecated(db: Database, viewName: ViewName) { async function removeDeprecated(db: Database, viewName: ViewName) {
// @ts-ignore
if (!DeprecatedViews[viewName]) { if (!DeprecatedViews[viewName]) {
return return
} }
try { try {
const designDoc = await db.get<DesignDocument>(DESIGN_DB) const designDoc = await db.get<DesignDocument>(DESIGN_DB)
// @ts-ignore
for (let deprecatedNames of DeprecatedViews[viewName]) { for (let deprecatedNames of DeprecatedViews[viewName]) {
delete designDoc.views[deprecatedNames] delete designDoc.views?.[deprecatedNames]
} }
await db.put(designDoc) await db.put(designDoc)
} catch (err) { } catch (err) {
@ -43,18 +44,18 @@ async function removeDeprecated(db: Database, viewName: ViewName) {
} }
export async function createView( export async function createView(
db: any, db: Database,
viewJs: string, viewJs: string,
viewName: string viewName: string
): Promise<void> { ): Promise<void> {
let designDoc let designDoc
try { try {
designDoc = (await db.get(DESIGN_DB)) as DesignDocument designDoc = await db.get<DesignDocument>(DESIGN_DB)
} catch (err) { } catch (err) {
// no design doc, make one // no design doc, make one
designDoc = DesignDoc() designDoc = DesignDoc()
} }
const view = { const view: DBView = {
map: viewJs, map: viewJs,
} }
designDoc.views = { designDoc.views = {
@ -109,7 +110,7 @@ export interface QueryViewOptions {
arrayResponse?: boolean arrayResponse?: boolean
} }
export async function queryViewRaw<T>( export async function queryViewRaw<T extends Document>(
viewName: ViewName, viewName: ViewName,
params: DatabaseQueryOpts, params: DatabaseQueryOpts,
db: Database, db: Database,
@ -137,18 +138,16 @@ export async function queryViewRaw<T>(
} }
} }
export const queryView = async <T>( export const queryView = async <T extends Document>(
viewName: ViewName, viewName: ViewName,
params: DatabaseQueryOpts, params: DatabaseQueryOpts,
db: Database, db: Database,
createFunc: any, createFunc: any,
opts?: QueryViewOptions opts?: QueryViewOptions
): Promise<T[] | T | undefined> => { ): Promise<T[] | T> => {
const response = await queryViewRaw<T>(viewName, params, db, createFunc, opts) const response = await queryViewRaw<T>(viewName, params, db, createFunc, opts)
const rows = response.rows const rows = response.rows
const docs = rows.map((row: any) => const docs = rows.map(row => (params.include_docs ? row.doc! : row.value))
params.include_docs ? row.doc : row.value
)
// if arrayResponse has been requested, always return array regardless of length // if arrayResponse has been requested, always return array regardless of length
if (opts?.arrayResponse) { if (opts?.arrayResponse) {
@ -198,11 +197,11 @@ export const createPlatformUserView = async () => {
await createPlatformView(viewJs, ViewName.PLATFORM_USERS_LOWERCASE) await createPlatformView(viewJs, ViewName.PLATFORM_USERS_LOWERCASE)
} }
export const queryPlatformView = async <T>( export const queryPlatformView = async <T extends Document>(
viewName: ViewName, viewName: ViewName,
params: DatabaseQueryOpts, params: DatabaseQueryOpts,
opts?: QueryViewOptions opts?: QueryViewOptions
): Promise<T[] | T | undefined> => { ): Promise<T[] | T> => {
const CreateFuncByName: any = { const CreateFuncByName: any = {
[ViewName.ACCOUNT_BY_EMAIL]: createPlatformAccountEmailView, [ViewName.ACCOUNT_BY_EMAIL]: createPlatformAccountEmailView,
[ViewName.PLATFORM_USERS_LOWERCASE]: createPlatformUserView, [ViewName.PLATFORM_USERS_LOWERCASE]: createPlatformUserView,
@ -220,7 +219,7 @@ const CreateFuncByName: any = {
[ViewName.USER_BY_APP]: createUserAppView, [ViewName.USER_BY_APP]: createUserAppView,
} }
export const queryGlobalView = async <T>( export const queryGlobalView = async <T extends Document>(
viewName: ViewName, viewName: ViewName,
params: DatabaseQueryOpts, params: DatabaseQueryOpts,
db?: Database, db?: Database,
@ -231,10 +230,10 @@ export const queryGlobalView = async <T>(
db = getGlobalDB() db = getGlobalDB()
} }
const createFn = CreateFuncByName[viewName] const createFn = CreateFuncByName[viewName]
return queryView(viewName, params, db!, createFn, opts) return queryView<T>(viewName, params, db!, createFn, opts)
} }
export async function queryGlobalViewRaw<T>( export async function queryGlobalViewRaw<T extends Document>(
viewName: ViewName, viewName: ViewName,
params: DatabaseQueryOpts, params: DatabaseQueryOpts,
opts?: QueryViewOptions opts?: QueryViewOptions

View File

@ -1,5 +1,6 @@
import { APP_DEV_PREFIX, APP_PREFIX } from "../constants" import { APP_DEV_PREFIX, APP_PREFIX } from "../constants"
import { App } from "@budibase/types" import { App } from "@budibase/types"
const NO_APP_ERROR = "No app provided" const NO_APP_ERROR = "No app provided"
export function isDevAppID(appId?: string) { export function isDevAppID(appId?: string) {

View File

@ -107,6 +107,7 @@ const environment = {
ENCRYPTION_KEY: process.env.ENCRYPTION_KEY, ENCRYPTION_KEY: process.env.ENCRYPTION_KEY,
API_ENCRYPTION_KEY: getAPIEncryptionKey(), API_ENCRYPTION_KEY: getAPIEncryptionKey(),
COUCH_DB_URL: process.env.COUCH_DB_URL || "http://localhost:4005", COUCH_DB_URL: process.env.COUCH_DB_URL || "http://localhost:4005",
COUCH_DB_SQL_URL: process.env.COUCH_DB_SQL_URL || "http://localhost:4984",
COUCH_DB_USERNAME: process.env.COUCH_DB_USER, COUCH_DB_USERNAME: process.env.COUCH_DB_USER,
COUCH_DB_PASSWORD: process.env.COUCH_DB_PASSWORD, COUCH_DB_PASSWORD: process.env.COUCH_DB_PASSWORD,
GOOGLE_CLIENT_ID: process.env.GOOGLE_CLIENT_ID, GOOGLE_CLIENT_ID: process.env.GOOGLE_CLIENT_ID,
@ -165,6 +166,8 @@ const environment = {
DISABLE_JWT_WARNING: process.env.DISABLE_JWT_WARNING, DISABLE_JWT_WARNING: process.env.DISABLE_JWT_WARNING,
BLACKLIST_IPS: process.env.BLACKLIST_IPS, BLACKLIST_IPS: process.env.BLACKLIST_IPS,
SERVICE_TYPE: "unknown", SERVICE_TYPE: "unknown",
PASSWORD_MIN_LENGTH: process.env.PASSWORD_MIN_LENGTH,
PASSWORD_MAX_LENGTH: process.env.PASSWORD_MAX_LENGTH,
/** /**
* Enable to allow an admin user to login using a password. * Enable to allow an admin user to login using a password.
* This can be useful to prevent lockout when configuring SSO. * This can be useful to prevent lockout when configuring SSO.

View File

@ -1,2 +1,3 @@
import PosthogProcessor from "./PosthogProcessor" import PosthogProcessor from "./PosthogProcessor"
export default PosthogProcessor export default PosthogProcessor

View File

@ -1,7 +1,9 @@
import { testEnv } from "../../../../../tests/extra" import { testEnv } from "../../../../../tests/extra"
import PosthogProcessor from "../PosthogProcessor" import PosthogProcessor from "../PosthogProcessor"
import { Event, IdentityType, Hosting } from "@budibase/types" import { Event, IdentityType, Hosting } from "@budibase/types"
const tk = require("timekeeper") const tk = require("timekeeper")
import * as cache from "../../../../cache/generic" import * as cache from "../../../../cache/generic"
import { CacheKey } from "../../../../cache/generic" import { CacheKey } from "../../../../cache/generic"
import * as context from "../../../../context" import * as context from "../../../../context"

View File

@ -1,5 +1,6 @@
import env from "../environment" import env from "../environment"
import * as context from "../context" import * as context from "../context"
export * from "./installation" export * from "./installation"
/** /**

View File

@ -32,11 +32,14 @@ export * as blacklist from "./blacklist"
export * as docUpdates from "./docUpdates" export * as docUpdates from "./docUpdates"
export * from "./utils/Duration" export * from "./utils/Duration"
export { SearchParams } from "./db" export { SearchParams } from "./db"
export * as docIds from "./docIds"
export * as security from "./security"
// Add context to tenancy for backwards compatibility // Add context to tenancy for backwards compatibility
// only do this for external usages to prevent internal // only do this for external usages to prevent internal
// circular dependencies // circular dependencies
import * as context from "./context" import * as context from "./context"
import * as _tenancy from "./tenancy" import * as _tenancy from "./tenancy"
export const tenancy = { export const tenancy = {
..._tenancy, ..._tenancy,
...context, ...context,
@ -50,6 +53,7 @@ export * from "./constants"
// expose package init function // expose package init function
import * as db from "./db" import * as db from "./db"
export const init = (opts: any = {}) => { export const init = (opts: any = {}) => {
db.init(opts.db) db.init(opts.db)
} }

View File

@ -1,7 +1,6 @@
import { newid } from "./utils" import { newid } from "./utils"
import * as events from "./events" import * as events from "./events"
import { StaticDatabases } from "./db" import { StaticDatabases, doWithDB } from "./db"
import { doWithDB } from "./db"
import { Installation, IdentityType, Database } from "@budibase/types" import { Installation, IdentityType, Database } from "@budibase/types"
import * as context from "./context" import * as context from "./context"
import semver from "semver" import semver from "semver"

View File

@ -1,4 +1,5 @@
import { Header } from "../../constants" import { Header } from "../../constants"
const correlator = require("correlation-id") const correlator = require("correlation-id")
export const setHeader = (headers: any) => { export const setHeader = (headers: any) => {

View File

@ -1,5 +1,6 @@
import { Header } from "../../constants" import { Header } from "../../constants"
import { v4 as uuid } from "uuid" import { v4 as uuid } from "uuid"
const correlator = require("correlation-id") const correlator = require("correlation-id")
const correlation = (ctx: any, next: any) => { const correlation = (ctx: any, next: any) => {

Some files were not shown because too many files have changed in this diff Show More