diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md index 7abfe537e9..adfbc29008 100644 --- a/.github/CONTRIBUTING.md +++ b/.github/CONTRIBUTING.md @@ -79,6 +79,8 @@ Component libraries are collections of components as well as the definition of t ### Getting Started For Contributors #### 1. Prerequisites +NodeJS Version `14.x.x` + *yarn -* `npm install -g yarn` *jest* - `npm install -g jest` diff --git a/.github/README.md b/.github/README.md new file mode 100644 index 0000000000..d2fcd16bb0 --- /dev/null +++ b/.github/README.md @@ -0,0 +1,93 @@ + +# Budibase CI Pipelines + +Welcome to the budibase CI pipelines directory. This document details what each of the CI pipelines are for, and come common combinations. + +## All CI Pipelines + +### Note +- When running workflow dispatch jobs, ensure you always run them off the `master` branch. It defaults to `develop`, so double check before running any jobs. + +### Standard CI Build Job (budibase_ci.yml) +Triggers: +- PR or push to develop +- PR or push to master + +The standard CI Build job is what runs when you raise a PR to develop or master. +- Installs all dependencies, +- builds the project +- run the unit tests +- Generate test coverage metrics with codecov +- Run the cypress tests + +### Release Develop Job (release-develop.yml) +Triggers: +- Push to develop + +The job responsible for building, tagging and pushing docker images out to the test and staging environments. +- Installs all dependencies +- builds the project +- run the unit tests +- publish the budibase JS packages under a prerelease tag to NPM +- build, tag and push docker images under the `develop` tag to docker hub + +These images will then be pulled by the test and staging environments, updating the latest automatically. Discord notifications are sent to the #infra channel when this occurs. + +### Release Job (release.yml) +Triggers: +- Push to master + +This job is responsible for building and pushing the latest code to NPM and docker hub, so that it can be deployed. +- Installs all dependencies +- builds the project +- run the unit tests +- publish the budibase JS packages under a release tag to NPM (always incremented by patch versions) +- build, tag and push docker images under the `v.x.x.x` (the tag of the NPM release) tag to docker hub + +### Release Selfhost Job (release-selfhost.yml) +Triggers: +- Manual Workflow Dispatch Trigger + +This job is responsible for delivering the latest version of budibase to those that are self-hosting. + +This job relies on the release job to have run first, so the latest image is pushed to dockerhub. This job then will pull the latest version from `lerna.json` and try to find an image in dockerhub corresponding to that version. For example, if the version in `lerna.json` is `1.0.0`: +- Pull the images for all budibase services tagged `v1.0.0` from dockerhub +- Tag these images as `latest` +- Push them back to dockerhub. This now means anyone who pulls `latest` (self hosters using docker-compose) will get the latest version. +- Build and release the budibase helm chart for kubernetes users +- Perform a github release with the latest version. You can see previous releases here (https://github.com/Budibase/budibase/releases) + + +### Cloud Deploy (deploy-cloud.yml) +Triggers: +- Manual Workflow Dispatch Trigger + +This job is responsible for deploying to our production, cloud kubernetes environment. You must run the release job first, to ensure that the latest images have been built and pushed to docker hub. You can also manually enter a version number for this job, so you can perform rollbacks or upgrade to a specific version. After kicking off this job, the following will occur: + +- Checks out the master branch +- Pulls the latest `values.yaml` from budibase infra, a private repo containing budibases infrastructure configuration +- Gets the latest budibase version from `lerna.json`, if it hasn't been specified in the workflow when you kicked it off +- Configures AWS Credentials +- Deploys the helm chart in the budibase repo to our production EKS cluster, injecting the `values.yaml` we pulled from budibase-infra +- Fires off a discord webhook in the #infra channel to show that the deployment completely successfully. + +## Common Workflows + +### Deploy Changes to Production (Release) +- Merge `develop` into `master` +- Wait for budibase CI job and release job to run +- Run cloud deploy job +- Run release selfhost job + +### Deploy Changes to Production (Hotfix) +- Branch off `master` +- Perform your hotfix +- Merge back into `master` +- Wait for budibase CI job and release job to run +- Run cloud deploy job +- Run release selfhost job + +### Rollback A Bad Cloud Deployment +- Kick off cloud deploy job +- Ensure you are running off master +- Enter the version number of the last known good version of budibase. For example `1.0.0` \ No newline at end of file diff --git a/.github/workflows/budibase_ci.yml b/.github/workflows/budibase_ci.yml index 5c4a111e23..7e95115415 100644 --- a/.github/workflows/budibase_ci.yml +++ b/.github/workflows/budibase_ci.yml @@ -41,4 +41,6 @@ jobs: files: ./packages/server/coverage/clover.xml name: codecov-umbrella verbose: true + + # TODO: parallelise this - run: yarn test:e2e:ci diff --git a/.github/workflows/deploy-cloud.yaml b/.github/workflows/deploy-cloud.yaml new file mode 100644 index 0000000000..26422a2e7b --- /dev/null +++ b/.github/workflows/deploy-cloud.yaml @@ -0,0 +1,61 @@ +name: Budibase Cloud Deploy + +on: + workflow_dispatch: + inputs: + version: + description: Budibase release version. For example - 1.0.0 + required: false + +jobs: + release: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v2 + + - name: Pull values.yaml from budibase-infra + run: | + curl -H "Authorization: token ${{ secrets.GH_PERSONAL_TOKEN }}" \ + -H 'Accept: application/vnd.github.v3.raw' \ + -o values.production.yaml \ + -L https://api.github.com/repos/budibase/budibase-infra/contents/kubernetes/values.yaml + wc -l values.production.yaml + + - name: Get the latest budibase release version + id: version + run: | + if [ -z "${{ github.event.inputs.version }}" ]; then + release_version=$(cat lerna.json | jq -r '.version') + else + release_version=${{ github.event.inputs.version }} + fi + echo "RELEASE_VERSION=$release_version" >> $GITHUB_ENV + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v1 + with: + aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} + aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + aws-region: eu-west-1 + + - name: Deploy to EKS + uses: craftech-io/eks-helm-deploy-action@v1 + with: + aws-access-key-id: ${{ secrets.AWS_ACCESS__KEY_ID }} + aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + aws-region: eu-west-1 + cluster-name: budibase-eks-production + config-files: values.production.yaml + chart-path: charts/budibase + namespace: budibase + values: globals.appVersion=v${{ env.RELEASE_VERSION }} + name: budibase-prod + + - name: Discord Webhook Action + uses: tsickert/discord-webhook@v4.0.0 + with: + webhook-url: ${{ secrets.PROD_DEPLOY_WEBHOOK_URL }} + content: "Production Deployment Complete: ${{ env.RELEASE_VERSION }} deployed to Budibase Cloud." + embed-title: ${{ env.RELEASE_VERSION }} + diff --git a/.github/workflows/deploy-preprod.yml b/.github/workflows/deploy-preprod.yml new file mode 100644 index 0000000000..5b3282313c --- /dev/null +++ b/.github/workflows/deploy-preprod.yml @@ -0,0 +1,66 @@ +name: Budibase Release Preprod + +on: + workflow_dispatch: + +env: + POSTHOG_TOKEN: ${{ secrets.POSTHOG_TOKEN }} + INTERCOM_TOKEN: ${{ secrets.INTERCOM_TOKEN }} + POSTHOG_URL: ${{ secrets.POSTHOG_URL }} + SENTRY_DSN: ${{ secrets.SENTRY_DSN }} + +jobs: + release: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v2 + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v1 + with: + aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} + aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + aws-region: eu-west-1 + + - name: Get the latest budibase release version + id: version + run: | + release_version=$(cat lerna.json | jq -r '.version') + echo "RELEASE_VERSION=$release_version" >> $GITHUB_ENV + + - name: Pull values.yaml from budibase-infra + run: | + curl -H "Authorization: token ${{ secrets.GH_PERSONAL_TOKEN }}" \ + -H 'Accept: application/vnd.github.v3.raw' \ + -o values.preprod.yaml \ + -L https://api.github.com/repos/budibase/budibase-infra/contents/kubernetes/budibase-preprod/values.yaml + wc -l values.preprod.yaml + + - name: Deploy to Preprod Environment + uses: deliverybot/helm@v1 + with: + release: budibase-preprod + namespace: budibase + chart: charts/budibase + token: ${{ github.token }} + helm: helm3 + values: | + globals: + appVersion: v${{ env.RELEASE_VERSION }} + ingress: + enabled: true + nginx: true + value-files: >- + [ + "values.preprod.yaml" + ] + env: + KUBECONFIG_FILE: '${{ secrets.PREPROD_KUBECONFIG }}' + + - name: Discord Webhook Action + uses: tsickert/discord-webhook@v4.0.0 + with: + webhook-url: ${{ secrets.PROD_DEPLOY_WEBHOOK_URL }} + content: "Preprod Deployment Complete: ${{ env.RELEASE_VERSION }} deployed to Budibase Pre-prod." + embed-title: ${{ env.RELEASE_VERSION }} diff --git a/.github/workflows/release-selfhost.yml b/.github/workflows/release-selfhost.yml index 42c7027b24..5223fc6864 100644 --- a/.github/workflows/release-selfhost.yml +++ b/.github/workflows/release-selfhost.yml @@ -3,53 +3,62 @@ name: Budibase Release Selfhost on: workflow_dispatch: -env: - POSTHOG_TOKEN: ${{ secrets.POSTHOG_TOKEN }} - INTERCOM_TOKEN: ${{ secrets.INTERCOM_TOKEN }} - POSTHOG_URL: ${{ secrets.POSTHOG_URL }} - jobs: release: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - - uses: actions/setup-node@v1 - with: - node-version: 14.x - - run: yarn - - run: yarn bootstrap + with: + fetch_depth: 0 - - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v1 - with: - aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} - aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - aws-region: eu-west-1 - - - name: 'Get Previous tag' - id: previoustag - uses: "WyriHaximus/github-action-get-previous-tag@v1" - - - name: Build/release Docker images (Self Host) + - name: Tag and release Docker images (Self Host) run: | docker login -u $DOCKER_USER -p $DOCKER_PASSWORD - yarn build - yarn build:docker:selfhost + + # Get latest release version + release_version=$(cat lerna.json | jq -r '.version') + echo "RELEASE_VERSION=$release_version" >> $GITHUB_ENV + release_tag=v$release_version + + # Pull apps and worker images + docker pull budibase/apps:$release_tag + docker pull budibase/worker:$release_tag + + # Tag apps and worker images + docker tag budibase/apps:$release_tag budibase/apps:$SELFHOST_TAG + docker tag budibase/worker:$release_tag budibase/worker:$SELFHOST_TAG + + # Push images + docker push budibase/apps:$SELFHOST_TAG + docker push budibase/worker:$SELFHOST_TAG env: DOCKER_USER: ${{ secrets.DOCKER_USERNAME }} DOCKER_PASSWORD: ${{ secrets.DOCKER_API_KEY }} - BUDIBASE_RELEASE_VERSION: ${{ steps.previoustag.outputs.tag }} + SELFHOST_TAG: latest - - uses: azure/setup-helm@v1 - id: install + - name: Setup Helm + uses: azure/setup-helm@v1 + id: helm-install - # So, we need to inject the values into this - - run: yarn release:helm - - - name: Run chart-releaser - uses: helm/chart-releaser-action@v1.1.0 - with: - charts_dir: docs + - name: Build and release helm chart + run: | + git config user.name "Budibase Helm Bot" + git config user.email "<>" + git pull + helm package charts/budibase + git checkout gh-pages + mv *.tgz docs + helm repo index docs + git add -A + git commit -m "Helm Release: ${{ env.RELEASE_VERSION }}" + git push env: - CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}" + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + - name: Perform Github Release + uses: softprops/action-gh-release@v1 + with: + name: v${{ env.RELEASE_VERSION }} + tag_name: v${{ env.RELEASE_VERSION }} + generate_release_notes: true \ No newline at end of file diff --git a/hosting/kubernetes/budibase/.helmignore b/charts/budibase/.helmignore similarity index 100% rename from hosting/kubernetes/budibase/.helmignore rename to charts/budibase/.helmignore diff --git a/charts/budibase/Chart.lock b/charts/budibase/Chart.lock new file mode 100644 index 0000000000..75b9de07b5 --- /dev/null +++ b/charts/budibase/Chart.lock @@ -0,0 +1,9 @@ +dependencies: +- name: couchdb + repository: https://apache.github.io/couchdb-helm + version: 3.3.4 +- name: ingress-nginx + repository: https://kubernetes.github.io/ingress-nginx + version: 4.0.13 +digest: sha256:20892705c2d8e64c98257d181063a514ac55013e2b43399a6e54868a97f97845 +generated: "2021-12-30T18:55:30.878411Z" diff --git a/charts/budibase/Chart.yaml b/charts/budibase/Chart.yaml new file mode 100644 index 0000000000..8c9d44f201 --- /dev/null +++ b/charts/budibase/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: budibase +description: >- + Budibase is an open source low-code platform, helping thousands of teams build + apps for their workplace in minutes. +keywords: + - low-code + - database + - cluster +sources: + - https://github.com/Budibase/budibase + - https://budibase.com +type: application +version: 0.2.5 +appVersion: 1.0.25 +dependencies: + - name: couchdb + version: 3.3.4 + repository: https://apache.github.io/couchdb-helm + condition: services.couchdb.enabled + - name: ingress-nginx + version: 4.0.13 + repository: https://kubernetes.github.io/ingress-nginx + condition: ingress.nginx diff --git a/hosting/kubernetes/budibase/README.md b/charts/budibase/README.md similarity index 100% rename from hosting/kubernetes/budibase/README.md rename to charts/budibase/README.md diff --git a/charts/budibase/charts/couchdb-3.3.4.tgz b/charts/budibase/charts/couchdb-3.3.4.tgz new file mode 100644 index 0000000000..f7ebfd3e96 Binary files /dev/null and b/charts/budibase/charts/couchdb-3.3.4.tgz differ diff --git a/charts/budibase/charts/ingress-nginx-4.0.13.tgz b/charts/budibase/charts/ingress-nginx-4.0.13.tgz new file mode 100644 index 0000000000..1e34215c5f Binary files /dev/null and b/charts/budibase/charts/ingress-nginx-4.0.13.tgz differ diff --git a/hosting/kubernetes/budibase/templates/NOTES.txt b/charts/budibase/templates/NOTES.txt similarity index 100% rename from hosting/kubernetes/budibase/templates/NOTES.txt rename to charts/budibase/templates/NOTES.txt diff --git a/hosting/kubernetes/budibase/templates/_helpers.tpl b/charts/budibase/templates/_helpers.tpl similarity index 100% rename from hosting/kubernetes/budibase/templates/_helpers.tpl rename to charts/budibase/templates/_helpers.tpl diff --git a/hosting/kubernetes/budibase/templates/alb-ingress.yaml b/charts/budibase/templates/alb-ingress.yaml similarity index 100% rename from hosting/kubernetes/budibase/templates/alb-ingress.yaml rename to charts/budibase/templates/alb-ingress.yaml diff --git a/hosting/kubernetes/budibase/templates/app-service-deployment.yaml b/charts/budibase/templates/app-service-deployment.yaml similarity index 93% rename from hosting/kubernetes/budibase/templates/app-service-deployment.yaml rename to charts/budibase/templates/app-service-deployment.yaml index 7c62ada63f..8086c0ab20 100644 --- a/hosting/kubernetes/budibase/templates/app-service-deployment.yaml +++ b/charts/budibase/templates/app-service-deployment.yaml @@ -73,17 +73,13 @@ spec: name: {{ template "budibase.fullname" . }} key: objectStoreSecret - name: MINIO_URL - {{ if .Values.services.objectStore.url }} value: {{ .Values.services.objectStore.url }} - {{ else }} - value: http://minio-service:{{ .Values.services.objectStore.port }} - {{ end }} - name: PORT value: {{ .Values.services.apps.port | quote }} - name: MULTI_TENANCY value: {{ .Values.globals.multiTenancy | quote }} - name: LOG_LEVEL - value: {{ .Values.services.apps.logLevel | quote }} + value: {{ default "info" .Values.services.apps.logLevel | quote }} - name: REDIS_PASSWORD value: {{ .Values.services.redis.password }} - name: REDIS_URL @@ -110,7 +106,7 @@ spec: value: {{ .Values.globals.accountPortalApiKey | quote }} - name: COOKIE_DOMAIN value: {{ .Values.globals.cookieDomain | quote }} - image: budibase/apps + image: budibase/apps:{{ .Values.globals.appVersion }} imagePullPolicy: Always name: bbapps ports: diff --git a/hosting/kubernetes/budibase/templates/app-service-service.yaml b/charts/budibase/templates/app-service-service.yaml similarity index 100% rename from hosting/kubernetes/budibase/templates/app-service-service.yaml rename to charts/budibase/templates/app-service-service.yaml diff --git a/charts/budibase/templates/couchdb-backup.yaml b/charts/budibase/templates/couchdb-backup.yaml new file mode 100644 index 0000000000..1072046c8c --- /dev/null +++ b/charts/budibase/templates/couchdb-backup.yaml @@ -0,0 +1,43 @@ +{{- if .Values.services.couchdb.backup.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.21.0 (992df58d8) + creationTimestamp: null + labels: + app.kubernetes.io/name: couchdb-backup + name: couchdb-backup +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: couchdb-backup + strategy: + type: Recreate + template: + metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.21.0 (992df58d8) + creationTimestamp: null + labels: + app.kubernetes.io/name: couchdb-backup + spec: + containers: + - env: + - name: SOURCE + value: {{ .Values.services.couchdb.url }} + - name: TARGET + value: {{ .Values.services.couchdb.backup.target | quote }} + - name: RUN_EVERY_SECS + value: {{ .Values.services.couchdb.backup.interval | quote }} + - name: VERBOSE + value: "true" + image: redgeoff/replicate-couchdb-cluster + imagePullPolicy: Always + name: couchdb-backup + resources: {} +status: {} +{{- end }} diff --git a/hosting/kubernetes/budibase/templates/hpa.yaml b/charts/budibase/templates/hpa.yaml similarity index 100% rename from hosting/kubernetes/budibase/templates/hpa.yaml rename to charts/budibase/templates/hpa.yaml diff --git a/hosting/kubernetes/budibase/templates/ingress.yaml b/charts/budibase/templates/ingress.yaml similarity index 100% rename from hosting/kubernetes/budibase/templates/ingress.yaml rename to charts/budibase/templates/ingress.yaml diff --git a/hosting/kubernetes/budibase/templates/minio-data-persistentvolumeclaim.yaml b/charts/budibase/templates/minio-data-persistentvolumeclaim.yaml similarity index 100% rename from hosting/kubernetes/budibase/templates/minio-data-persistentvolumeclaim.yaml rename to charts/budibase/templates/minio-data-persistentvolumeclaim.yaml diff --git a/hosting/kubernetes/budibase/templates/minio-service-deployment.yaml b/charts/budibase/templates/minio-service-deployment.yaml similarity index 100% rename from hosting/kubernetes/budibase/templates/minio-service-deployment.yaml rename to charts/budibase/templates/minio-service-deployment.yaml diff --git a/hosting/kubernetes/budibase/templates/minio-service-service.yaml b/charts/budibase/templates/minio-service-service.yaml similarity index 100% rename from hosting/kubernetes/budibase/templates/minio-service-service.yaml rename to charts/budibase/templates/minio-service-service.yaml diff --git a/hosting/kubernetes/budibase/templates/proxy-service-deployment.yaml b/charts/budibase/templates/proxy-service-deployment.yaml similarity index 100% rename from hosting/kubernetes/budibase/templates/proxy-service-deployment.yaml rename to charts/budibase/templates/proxy-service-deployment.yaml diff --git a/hosting/kubernetes/budibase/templates/proxy-service-service.yaml b/charts/budibase/templates/proxy-service-service.yaml similarity index 100% rename from hosting/kubernetes/budibase/templates/proxy-service-service.yaml rename to charts/budibase/templates/proxy-service-service.yaml diff --git a/hosting/kubernetes/budibase/templates/redis-data-persistentvolumeclaim.yaml b/charts/budibase/templates/redis-data-persistentvolumeclaim.yaml similarity index 100% rename from hosting/kubernetes/budibase/templates/redis-data-persistentvolumeclaim.yaml rename to charts/budibase/templates/redis-data-persistentvolumeclaim.yaml diff --git a/hosting/kubernetes/budibase/templates/redis-service-deployment.yaml b/charts/budibase/templates/redis-service-deployment.yaml similarity index 100% rename from hosting/kubernetes/budibase/templates/redis-service-deployment.yaml rename to charts/budibase/templates/redis-service-deployment.yaml diff --git a/hosting/kubernetes/budibase/templates/redis-service-service.yaml b/charts/budibase/templates/redis-service-service.yaml similarity index 100% rename from hosting/kubernetes/budibase/templates/redis-service-service.yaml rename to charts/budibase/templates/redis-service-service.yaml diff --git a/hosting/kubernetes/budibase/templates/secrets.yaml b/charts/budibase/templates/secrets.yaml similarity index 100% rename from hosting/kubernetes/budibase/templates/secrets.yaml rename to charts/budibase/templates/secrets.yaml diff --git a/hosting/kubernetes/budibase/templates/service.yaml b/charts/budibase/templates/service.yaml similarity index 100% rename from hosting/kubernetes/budibase/templates/service.yaml rename to charts/budibase/templates/service.yaml diff --git a/hosting/kubernetes/budibase/templates/serviceaccount.yaml b/charts/budibase/templates/serviceaccount.yaml similarity index 100% rename from hosting/kubernetes/budibase/templates/serviceaccount.yaml rename to charts/budibase/templates/serviceaccount.yaml diff --git a/hosting/kubernetes/budibase/templates/tests/test-connection.yaml b/charts/budibase/templates/tests/test-connection.yaml similarity index 100% rename from hosting/kubernetes/budibase/templates/tests/test-connection.yaml rename to charts/budibase/templates/tests/test-connection.yaml diff --git a/hosting/kubernetes/budibase/templates/worker-service-deployment.yaml b/charts/budibase/templates/worker-service-deployment.yaml similarity index 93% rename from hosting/kubernetes/budibase/templates/worker-service-deployment.yaml rename to charts/budibase/templates/worker-service-deployment.yaml index 6cded8545f..8b6f5564ad 100644 --- a/hosting/kubernetes/budibase/templates/worker-service-deployment.yaml +++ b/charts/budibase/templates/worker-service-deployment.yaml @@ -70,17 +70,13 @@ spec: name: {{ template "budibase.fullname" . }} key: objectStoreSecret - name: MINIO_URL - {{ if .Values.services.objectStore.url }} value: {{ .Values.services.objectStore.url }} - {{ else }} - value: http://minio-service:{{ .Values.services.objectStore.port }} - {{ end }} - name: PORT value: {{ .Values.services.worker.port | quote }} - name: MULTI_TENANCY value: {{ .Values.globals.multiTenancy | quote }} - name: LOG_LEVEL - value: {{ .Values.services.worker.logLevel | quote }} + value: {{ default "info" .Values.services.worker.logLevel | quote }} - name: REDIS_PASSWORD value: {{ .Values.services.redis.password | quote }} - name: REDIS_URL @@ -115,7 +111,7 @@ spec: value: {{ .Values.globals.smtp.from | quote }} - name: APPS_URL value: http://app-service:{{ .Values.services.apps.port }} - image: budibase/worker + image: budibase/worker:{{ .Values.globals.appVersion }} imagePullPolicy: Always name: bbworker ports: diff --git a/hosting/kubernetes/budibase/templates/worker-service-service.yaml b/charts/budibase/templates/worker-service-service.yaml similarity index 100% rename from hosting/kubernetes/budibase/templates/worker-service-service.yaml rename to charts/budibase/templates/worker-service-service.yaml diff --git a/charts/budibase/values.yaml b/charts/budibase/values.yaml new file mode 100644 index 0000000000..4666d01c70 --- /dev/null +++ b/charts/budibase/values.yaml @@ -0,0 +1,305 @@ +# Default values for budibase. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +image: + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "" + +imagePullSecrets: [] +nameOverride: "" +# fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +podAnnotations: {} + +podSecurityContext: + {} + # fsGroup: 2000 + +securityContext: + {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 10000 + +ingress: + enabled: true + aws: false + nginx: true + certificateArn: "" + className: "" + annotations: + kubernetes.io/ingress.class: nginx + hosts: + - host: # change if using custom domain + paths: + - path: / + pathType: Prefix + backend: + service: + name: proxy-service + port: + number: 10000 + +resources: + {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 100 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + +nodeSelector: {} + +tolerations: [] + +affinity: {} + +globals: + appVersion: "latest" + budibaseEnv: PRODUCTION + enableAnalytics: true + sentryDSN: "" + posthogToken: "" + logLevel: info + selfHosted: "1" # set to 0 for budibase cloud environment, set to 1 for self-hosted setup + multiTenancy: "0" # set to 0 to disable multiple orgs, set to 1 to enable multiple orgs + accountPortalUrl: "" + accountPortalApiKey: "" + cookieDomain: "" + platformUrl: "" + + createSecrets: true # creates an internal API key, JWT secrets and redis password for you + + # if createSecrets is set to false, you can hard-code your secrets here + internalApiKey: "" + jwtSecret: "" + + smtp: + enabled: false + +services: + budibaseVersion: latest + dns: cluster.local + + proxy: + port: 10000 + replicaCount: 1 + + apps: + port: 4002 + replicaCount: 1 + logLevel: info + + worker: + port: 4001 + replicaCount: 1 + + couchdb: + enabled: true + # url: "" # only change if pointing to existing couch server + # user: "" # only change if pointing to existing couch server + # password: "" # only change if pointing to existing couch server + port: 5984 + backup: + enabled: false + # target couchDB instance to back up to + target: "" + # backup interval in seconds + interval: "" + + redis: + enabled: true # disable if using external redis + port: 6379 + replicaCount: 1 + url: "" # only change if pointing to existing redis cluster and enabled: false + password: "budibase" # recommended to override if using built-in redis + storage: 100Mi + + objectStore: + minio: true + browser: true + port: 9000 + replicaCount: 1 + accessKey: "" # AWS_ACCESS_KEY if using S3 or existing minio access key + secretKey: "" # AWS_SECRET_ACCESS_KEY if using S3 or existing minio secret + region: "" # AWS_REGION if using S3 or existing minio secret + url: "http://minio-service:9000" # only change if pointing to existing minio cluster or S3 and minio: false + storage: 100Mi + +# Override values in couchDB subchart +couchdb: + ## clusterSize is the initial size of the CouchDB cluster. + clusterSize: 3 + allowAdminParty: false + + # Secret Management + createAdminSecret: true + + # adminUsername: budibase + # adminPassword: budibase + # adminHash: -pbkdf2-this_is_not_necessarily_secure_either + # cookieAuthSecret: admin + + ## When enabled, will deploy a networkpolicy that allows CouchDB pods to + ## communicate with each other for clustering and ingress on port 5984 + networkPolicy: + enabled: true + + # Use a service account + serviceAccount: + enabled: true + create: true + # name: + # imagePullSecrets: + # - name: myimagepullsecret + + ## The storage volume used by each Pod in the StatefulSet. If a + ## persistentVolume is not enabled, the Pods will use `emptyDir` ephemeral + ## local storage. Setting the storageClass attribute to "-" disables dynamic + ## provisioning of Persistent Volumes; leaving it unset will invoke the default + ## provisioner. + persistentVolume: + enabled: false + accessModes: + - ReadWriteOnce + size: 10Gi + storageClass: "" + + ## The CouchDB image + image: + repository: couchdb + tag: 3.1.0 + pullPolicy: IfNotPresent + + ## Experimental integration with Lucene-powered fulltext search + enableSearch: true + searchImage: + repository: kocolosk/couchdb-search + tag: 0.2.0 + pullPolicy: IfNotPresent + + initImage: + repository: busybox + tag: latest + pullPolicy: Always + + ## CouchDB is happy to spin up cluster nodes in parallel, but if you encounter + ## problems you can try setting podManagementPolicy to the StatefulSet default + ## `OrderedReady` + podManagementPolicy: Parallel + + ## Optional pod annotations + annotations: {} + + ## Optional tolerations + tolerations: [] + + service: + # annotations: + enabled: true + type: ClusterIP + externalPort: 5984 + + ## An Ingress resource can provide name-based virtual hosting and TLS + ## termination among other things for CouchDB deployments which are accessed + ## from outside the Kubernetes cluster. + ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/ + ingress: + enabled: false + hosts: + - chart-example.local + path: / + annotations: [] + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + tls: + # Secrets must be manually created in the namespace. + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + + ## Optional resource requests and limits for the CouchDB container + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + resources: + {} + # requests: + # cpu: 100m + # memory: 128Mi + # limits: + # cpu: 56 + # memory: 256Gi + + ## erlangFlags is a map that is passed to the Erlang VM as flags using the + ## ERL_FLAGS env. `name` and `setcookie` flags are minimally required to + ## establish connectivity between cluster nodes. + ## ref: http://erlang.org/doc/man/erl.html#init_flags + erlangFlags: + name: couchdb + setcookie: monster + + ## couchdbConfig will override default CouchDB configuration settings. + ## The contents of this map are reformatted into a .ini file laid down + ## by a ConfigMap object. + ## ref: http://docs.couchdb.org/en/latest/config/index.html + couchdbConfig: + couchdb: + uuid: budibase-couchdb # REQUIRED: Unique identifier for this CouchDB server instance + # cluster: + # q: 8 # Create 8 shards for each database + chttpd: + bind_address: any + # chttpd.require_valid_user disables all the anonymous requests to the port + # 5984 when is set to true. + require_valid_user: false + + # Kubernetes local cluster domain. + # This is used to generate FQDNs for peers when joining the CouchDB cluster. + dns: + clusterDomainSuffix: cluster.local + + ## Configure liveness and readiness probe values + ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + livenessProbe: + enabled: true + failureThreshold: 3 + initialDelaySeconds: 0 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + readinessProbe: + enabled: true + failureThreshold: 3 + initialDelaySeconds: 0 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 diff --git a/docs/budibase-0.1.0.tgz b/docs/budibase-0.1.0.tgz deleted file mode 100644 index 7873874ab0..0000000000 Binary files a/docs/budibase-0.1.0.tgz and /dev/null differ diff --git a/docs/budibase-0.1.1.tgz b/docs/budibase-0.1.1.tgz deleted file mode 100644 index b38527c4a4..0000000000 Binary files a/docs/budibase-0.1.1.tgz and /dev/null differ diff --git a/docs/budibase-0.2.0.tgz b/docs/budibase-0.2.0.tgz deleted file mode 100644 index 379b92cbb7..0000000000 Binary files a/docs/budibase-0.2.0.tgz and /dev/null differ diff --git a/docs/budibase-0.2.1.tgz b/docs/budibase-0.2.1.tgz deleted file mode 100644 index f3423763a5..0000000000 Binary files a/docs/budibase-0.2.1.tgz and /dev/null differ diff --git a/docs/budibase-0.2.2.tgz b/docs/budibase-0.2.2.tgz deleted file mode 100644 index c70754eb99..0000000000 Binary files a/docs/budibase-0.2.2.tgz and /dev/null differ diff --git a/docs/index.html b/docs/index.html deleted file mode 100644 index 0fa6060f8f..0000000000 --- a/docs/index.html +++ /dev/null @@ -1,9 +0,0 @@ - -
-Point Helm at this repo to see charts.
- - \ No newline at end of file diff --git a/docs/index.yaml b/docs/index.yaml deleted file mode 100644 index 04543c147e..0000000000 --- a/docs/index.yaml +++ /dev/null @@ -1,132 +0,0 @@ -apiVersion: v1 -entries: - budibase: - - apiVersion: v2 - appVersion: 0.9.169 - created: "2021-10-20T14:27:23.521358+01:00" - dependencies: - - condition: services.couchdb.enabled - name: couchdb - repository: https://apache.github.io/couchdb-helm - version: 3.3.4 - - condition: ingress.nginx - name: ingress-nginx - repository: https://github.com/kubernetes/ingress-nginx - version: 3.35.0 - description: Budibase is an open source low-code platform, helping thousands of teams build apps for their workplace in minutes. - digest: 57f365d799fcaace4658883cb8ec961a7905383a68acf065af4f6e57f9878ff8 - keywords: - - low-code - - database - - cluster - name: budibase - sources: - - https://github.com/Budibase/budibase - - https://budibase.com - type: application - urls: - - https://budibase.github.io/budibase/budibase-0.2.2.tgz - version: 0.2.2 - - apiVersion: v2 - appVersion: 0.9.163 - created: "2021-10-20T14:27:23.5153+01:00" - dependencies: - - condition: services.couchdb.enabled - name: couchdb - repository: https://apache.github.io/couchdb-helm - version: 3.3.4 - - condition: ingress.nginx - name: ingress-nginx - repository: https://github.com/kubernetes/ingress-nginx - version: 3.35.0 - description: Budibase is an open source low-code platform, helping thousands of teams build apps for their workplace in minutes. - digest: ebac6d8631cc38b266c3689508b5123f5afc395f23bdb02738be26c7cae0b0b5 - keywords: - - low-code - - database - - cluster - name: budibase - sources: - - https://github.com/Budibase/budibase - - https://budibase.com - type: application - urls: - - https://budibase.github.io/budibase/budibase-0.2.1.tgz - version: 0.2.1 - - apiVersion: v2 - appVersion: 0.9.163 - created: "2021-10-20T14:27:23.510041+01:00" - dependencies: - - condition: services.couchdb.enabled - name: couchdb - repository: https://apache.github.io/couchdb-helm - version: 3.3.4 - - condition: ingress.nginx - name: ingress-nginx - repository: https://github.com/kubernetes/ingress-nginx - version: 3.35.0 - description: Budibase is an open source low-code platform, helping thousands of teams build apps for their workplace in minutes. - digest: f369536c0eac1f6959d51e8ce6d74a87a7a9df29ae84fb9cbed0a273ab77429b - keywords: - - low-code - - database - - cluster - name: budibase - sources: - - https://github.com/Budibase/budibase - - https://budibase.com - type: application - urls: - - https://budibase.github.io/budibase/budibase-0.2.0.tgz - version: 0.2.0 - - apiVersion: v2 - appVersion: 0.9.56 - created: "2021-10-20T14:27:23.504543+01:00" - dependencies: - - condition: services.couchdb.enabled - name: couchdb - repository: https://apache.github.io/couchdb-helm - version: 3.3.4 - - name: ingress-nginx - repository: https://github.com/kubernetes/ingress-nginx - version: 3.35.0 - description: Budibase is an open source low-code platform, helping thousands of teams build apps for their workplace in minutes. - digest: 8dc4f2ed4d98cad5adf25936aefea680042d3e4e17832f846b961fd8708ad192 - keywords: - - low-code - - database - - cluster - name: budibase - sources: - - https://github.com/Budibase/budibase - - https://budibase.com - type: application - urls: - - https://budibase.github.io/budibase/budibase-0.1.1.tgz - version: 0.1.1 - - apiVersion: v2 - appVersion: 0.9.56 - created: "2021-10-20T14:27:23.496847+01:00" - dependencies: - - condition: services.couchdb.enabled - name: couchdb - repository: https://apache.github.io/couchdb-helm - version: 3.3.4 - - name: ingress-nginx - repository: https://github.com/kubernetes/ingress-nginx - version: 3.35.0 - description: Budibase is an open source low-code platform, helping thousands of teams build apps for their workplace in minutes. - digest: 08031b0803cce0eff64472e569d454d9176119c8207aa9873a9c95ee66cc7d3f - keywords: - - low-code - - database - - cluster - name: budibase - sources: - - https://github.com/Budibase/budibase - - https://budibase.com - type: application - urls: - - https://budibase.github.io/budibase/budibase-0.1.0.tgz - version: 0.1.0 -generated: "2021-10-20T14:27:23.491132+01:00" diff --git a/hosting/digitalocean/README.md b/hosting/digitalocean/README.md new file mode 100644 index 0000000000..72c1950d17 --- /dev/null +++ b/hosting/digitalocean/README.md @@ -0,0 +1,19 @@ +# Budibase DigitalOcean One Click +You will find in this directory configuration for packaging and creating a snapshot for the Budibase 1 click Digitalocean build. We use this configuration to have an immutable and reproducible build package for Digitalocean, that rarely needs updated. + +## Prerequisites +You must install Hashicorps `packer` to build the snapshot for digitalocean. Follow the instructions to install packer [here](https://learn.hashicorp.com/tutorials/packer/get-started-install-cli) + +You must have the `DIGITALOCEAN_TOKEN` environment variable set, so that packer can reach out to the digitalocean API for build information. + +## Building +Just run the following command: +``` +yarn build:digitalocean +``` + +## Uploading to Marketplace +You can upload the snapshot to the Digitalocean vendor portal at the following link (Requires vendor account): + +https://marketplace.digitalocean.com/vendorportal + diff --git a/hosting/digitalocean/build.sh b/hosting/digitalocean/build.sh new file mode 100755 index 0000000000..743629ca12 --- /dev/null +++ b/hosting/digitalocean/build.sh @@ -0,0 +1,2 @@ +#!/bin/bash +packer build template.json diff --git a/hosting/digitalocean/files/etc/update-motd.d/99-one-click b/hosting/digitalocean/files/etc/update-motd.d/99-one-click new file mode 100644 index 0000000000..0f087a26ee --- /dev/null +++ b/hosting/digitalocean/files/etc/update-motd.d/99-one-click @@ -0,0 +1,19 @@ +#!/bin/sh +# +# Configured as part of the DigitalOcean 1-Click Image build process + +myip=$(hostname -I | awk '{print$1}') +cat <Creating your Budibase app from your selected template...
+tags from text +function removeTags(text) { + return text ? text.replace(tagsRegex, "") : text +} + +function addInfo(projectJson) { + let info = {} + info["title"] = projectJson.title || projectJson.name + info["version"] = projectJson.version + info["description"] = projectJson.description + return info +} + +/** + * Extracts paths provided in json format + * post, patch, put request parameters are extracted in body + * get and delete are extracted to path parameters + * @param apidocJson + * @returns {{}} + */ +function extractPaths(apidocJson) { + let apiPaths = groupByUrl(apidocJson) + let paths = {} + for (let i = 0; i < apiPaths.length; i++) { + let verbs = apiPaths[i].verbs + let url = verbs[0].url + let pattern = pathToRegexp(url, null) + let matches = pattern.exec(url) + + // Surrounds URL parameters with curly brackets -> :email with {email} + let pathKeys = [] + for (let j = 1; j < matches.length; j++) { + let key = matches[j].substr(1) + url = url.replace(matches[j], "{" + key + "}") + pathKeys.push(key) + } + + for (let j = 0; j < verbs.length; j++) { + let verb = verbs[j] + let type = verb.type + + let obj = (paths[url] = paths[url] || {}) + + if (type === "post" || type === "patch" || type === "put") { + _.extend( + obj, + createPostPushPutOutput(verb, swagger.definitions, pathKeys) + ) + } else { + _.extend(obj, createGetDeleteOutput(verb, swagger.definitions)) + } + } + } + return paths +} + +function createPostPushPutOutput(verbs, definitions, pathKeys) { + let pathItemObject = {} + let verbDefinitionResult = createVerbDefinitions(verbs, definitions) + + let params = [] + let pathParams = createPathParameters(verbs, pathKeys) + pathParams = _.filter(pathParams, function (param) { + let hasKey = pathKeys.indexOf(param.name) !== -1 + return !(param.in === "path" && !hasKey) + }) + + params = params.concat(pathParams) + let required = + verbs.parameter && + verbs.parameter.fields && + verbs.parameter.fields.Parameter && + verbs.parameter.fields.Parameter.length > 0 + + params.push({ + in: "body", + name: "body", + description: removeTags(verbs.description), + required: required, + schema: { + $ref: "#/definitions/" + verbDefinitionResult.topLevelParametersRef, + }, + }) + + pathItemObject[verbs.type] = { + tags: [verbs.group], + summary: removeTags(verbs.description), + consumes: ["application/json"], + produces: ["application/json"], + parameters: params, + } + + if (verbDefinitionResult.topLevelSuccessRef) { + pathItemObject[verbs.type].responses = { + 200: { + description: "successful operation", + schema: { + type: verbDefinitionResult.topLevelSuccessRefType, + items: { + $ref: "#/definitions/" + verbDefinitionResult.topLevelSuccessRef, + }, + }, + }, + } + } + + return pathItemObject +} + +function createVerbDefinitions(verbs, definitions) { + let result = { + topLevelParametersRef: null, + topLevelSuccessRef: null, + topLevelSuccessRefType: null, + } + let defaultObjectName = verbs.name + + let fieldArrayResult = {} + if (verbs && verbs.parameter && verbs.parameter.fields) { + fieldArrayResult = createFieldArrayDefinitions( + verbs.parameter.fields.Parameter, + definitions, + verbs.name, + defaultObjectName + ) + result.topLevelParametersRef = fieldArrayResult.topLevelRef + } + + if (verbs && verbs.success && verbs.success.fields) { + fieldArrayResult = createFieldArrayDefinitions( + verbs.success.fields["Success 200"], + definitions, + verbs.name, + defaultObjectName + ) + result.topLevelSuccessRef = fieldArrayResult.topLevelRef + result.topLevelSuccessRefType = fieldArrayResult.topLevelRefType + } + + return result +} + +function createFieldArrayDefinitions( + fieldArray, + definitions, + topLevelRef, + defaultObjectName +) { + let result = { + topLevelRef: topLevelRef, + topLevelRefType: null, + } + + if (!fieldArray) { + return result + } + + for (let i = 0; i < fieldArray.length; i++) { + let parameter = fieldArray[i] + + let nestedName = createNestedName(parameter.field) + let objectName = nestedName.objectName + if (!objectName) { + objectName = defaultObjectName + } + let type = parameter.type + if (i === 0) { + result.topLevelRefType = type + if (parameter.type === "Object") { + objectName = nestedName.propertyName + nestedName.propertyName = null + } else if (parameter.type === "Array") { + objectName = nestedName.propertyName + nestedName.propertyName = null + result.topLevelRefType = "array" + } + result.topLevelRef = objectName + } + + definitions[objectName] = definitions[objectName] || { + properties: {}, + required: [], + } + + if (nestedName.propertyName) { + let prop = { + type: (parameter.type || "").toLowerCase(), + description: removeTags(parameter.description), + } + if (parameter.type === "Object") { + prop.$ref = "#/definitions/" + parameter.field + } + + let typeIndex = type.indexOf("[]") + if (typeIndex !== -1 && typeIndex === type.length - 2) { + prop.type = "array" + prop.items = { + type: type.slice(0, type.length - 2), + } + } + + definitions[objectName]["properties"][nestedName.propertyName] = prop + if (!parameter.optional) { + let arr = definitions[objectName]["required"] + if (arr.indexOf(nestedName.propertyName) === -1) { + arr.push(nestedName.propertyName) + } + } + } + } + + return result +} + +function createNestedName(field) { + let propertyName = field + let objectName + let propertyNames = field.split(".") + if (propertyNames && propertyNames.length > 1) { + propertyName = propertyNames[propertyNames.length - 1] + propertyNames.pop() + objectName = propertyNames.join(".") + } + + return { + propertyName: propertyName, + objectName: objectName, + } +} + +/** + * Generate get, delete method output + * @param verbs + * @param definitions + * @returns {{}} + */ +function createGetDeleteOutput(verbs, definitions) { + let pathItemObject = {} + verbs.type = verbs.type === "del" ? "delete" : verbs.type + + let verbDefinitionResult = createVerbDefinitions(verbs, definitions) + pathItemObject[verbs.type] = { + tags: [verbs.group], + summary: removeTags(verbs.description), + consumes: ["application/json"], + produces: ["application/json"], + parameters: createPathParameters(verbs), + } + if (verbDefinitionResult.topLevelSuccessRef) { + pathItemObject[verbs.type].responses = { + 200: { + description: "successful operation", + schema: { + type: verbDefinitionResult.topLevelSuccessRefType, + items: { + $ref: "#/definitions/" + verbDefinitionResult.topLevelSuccessRef, + }, + }, + }, + } + } + return pathItemObject +} + +/** + * Iterate through all method parameters and create array of parameter objects which are stored as path parameters + * @param verbs + * @returns {Array} + */ +function createPathParameters(verbs) { + let pathItemObject = [] + if (verbs.parameter && verbs.parameter.fields.Parameter) { + for (let i = 0; i < verbs.parameter.fields.Parameter.length; i++) { + let param = verbs.parameter.fields.Parameter[i] + let field = param.field + let type = param.type + pathItemObject.push({ + name: field, + in: type === "file" ? "formData" : "path", + required: !param.optional, + type: param.type.toLowerCase(), + description: removeTags(param.description), + }) + } + } + return pathItemObject +} + +function groupByUrl(apidocJson) { + return _.chain(apidocJson) + .groupBy("url") + .toPairs() + .map(function (element) { + return _.zipObject(["url", "verbs"], element) + }) + .value() +} + +module.exports = toSwagger diff --git a/packages/server/scripts/integrations/oracle/docker-compose.yml b/packages/server/scripts/integrations/oracle/docker-compose.yml index 5cd5e02f81..c54cd0a40b 100644 --- a/packages/server/scripts/integrations/oracle/docker-compose.yml +++ b/packages/server/scripts/integrations/oracle/docker-compose.yml @@ -4,7 +4,7 @@ version: "3.8" services: db: - container_name: oracle-xe + restart: always platform: linux/x86_64 image: container-registry.oracle.com/database/express:18.4.0-xe environment: diff --git a/packages/server/scripts/integrations/oracle/oracle.md b/packages/server/scripts/integrations/oracle/oracle.md index 912de08b65..6c2d7a9252 100644 --- a/packages/server/scripts/integrations/oracle/oracle.md +++ b/packages/server/scripts/integrations/oracle/oracle.md @@ -84,7 +84,7 @@ The `HR` schema is populated with dummy data by default in oracle for testing pu To connect to the HR schema first update the user password and unlock the account by performing ```sql ALTER USER hr ACCOUNT UNLOCK; -ALTER USER hr IDENTIFIED BY hr +ALTER USER hr IDENTIFIED BY hr; ``` You should now be able to connect to the hr schema using the credentials hr/hr diff --git a/packages/server/src/api/controllers/application.js b/packages/server/src/api/controllers/application.js index ef2913e812..6f55834a49 100644 --- a/packages/server/src/api/controllers/application.js +++ b/packages/server/src/api/controllers/application.js @@ -15,14 +15,12 @@ const { generateAppID, getLayoutParams, getScreenParams, - generateScreenID, generateDevAppID, DocumentTypes, AppStatus, } = require("../../db/utils") const { BUILTIN_ROLE_IDS, AccessController } = require("@budibase/auth/roles") const { BASE_LAYOUTS } = require("../../constants/layouts") -const { createHomeScreen } = require("../../constants/screens") const { cloneDeep } = require("lodash/fp") const { processObject } = require("@budibase/string-templates") const { @@ -409,9 +407,5 @@ const createEmptyAppPackage = async (ctx, app) => { screensAndLayouts.push(await processObject(cloned, app)) } - const homeScreen = createHomeScreen(app) - homeScreen._id = generateScreenID() - screensAndLayouts.push(homeScreen) - await db.bulkDocs(screensAndLayouts) } diff --git a/packages/server/src/api/controllers/query.js b/packages/server/src/api/controllers/query.js index 502ef5e67b..cf6f03f00f 100644 --- a/packages/server/src/api/controllers/query.js +++ b/packages/server/src/api/controllers/query.js @@ -1,8 +1,11 @@ const { processString } = require("@budibase/string-templates") const CouchDB = require("../../db") -const { generateQueryID, getQueryParams } = require("../../db/utils") +const { + generateQueryID, + getQueryParams, + isProdAppID, +} = require("../../db/utils") const { BaseQueryVerbs } = require("../../constants") -const env = require("../../environment") const { Thread, ThreadType } = require("../../threads") const Runner = new Thread(ThreadType.QUERY, { timeoutMs: 10000 }) @@ -90,10 +93,9 @@ exports.find = async function (ctx) { const db = new CouchDB(ctx.appId) const query = enrichQueries(await db.get(ctx.params.queryId)) // remove properties that could be dangerous in real app - if (env.isProd()) { + if (isProdAppID(ctx.appId)) { delete query.fields delete query.parameters - delete query.schema } ctx.body = query } diff --git a/packages/server/src/api/controllers/row/ExternalRequest.ts b/packages/server/src/api/controllers/row/ExternalRequest.ts index 2226dc99be..af199561dc 100644 --- a/packages/server/src/api/controllers/row/ExternalRequest.ts +++ b/packages/server/src/api/controllers/row/ExternalRequest.ts @@ -1,4 +1,5 @@ import { + FilterTypes, IncludeRelationships, Operation, PaginationJson, @@ -118,8 +119,13 @@ module External { } // check the row and filters to make sure they aren't a key of some sort if (config.filters) { - for (let filter of Object.values(config.filters)) { - if (typeof filter !== "object" || Object.keys(filter).length === 0) { + for (let [key, filter] of Object.entries(config.filters)) { + // oneOf is an array, don't iterate it + if ( + typeof filter !== "object" || + Object.keys(filter).length === 0 || + key === FilterTypes.ONE_OF + ) { continue } iterateObject(filter) diff --git a/packages/server/src/api/controllers/table/internal.js b/packages/server/src/api/controllers/table/internal.js index 660618aef0..10a5c9746a 100644 --- a/packages/server/src/api/controllers/table/internal.js +++ b/packages/server/src/api/controllers/table/internal.js @@ -44,14 +44,10 @@ exports.save = async function (ctx) { // the column if you want to change the type if (oldTable && oldTable.schema) { for (let propKey of Object.keys(tableToSave.schema)) { - let column = tableToSave.schema[propKey] let oldColumn = oldTable.schema[propKey] if (oldColumn && oldColumn.type === "internal") { oldColumn.type = "auto" } - if (oldColumn && oldColumn.type !== column.type) { - ctx.throw(400, "Cannot change the type of a column") - } } } diff --git a/packages/server/src/api/controllers/table/utils.js b/packages/server/src/api/controllers/table/utils.js index 0623fc64b5..e4086e8071 100644 --- a/packages/server/src/api/controllers/table/utils.js +++ b/packages/server/src/api/controllers/table/utils.js @@ -75,6 +75,7 @@ exports.handleDataImport = async (appId, user, table, dataImport) => { if (!dataImport || !dataImport.csvString) { return table } + const db = new CouchDB(appId) // Populate the table with rows imported from CSV in a bulk update const data = await csvParser.transform({ diff --git a/packages/server/src/api/controllers/user.js b/packages/server/src/api/controllers/user.js index 6dfaf9847d..95e9a9969d 100644 --- a/packages/server/src/api/controllers/user.js +++ b/packages/server/src/api/controllers/user.js @@ -81,7 +81,7 @@ exports.syncUser = async function (ctx) { throw err } } - const roles = user.roles + const roles = deleting ? {} : user.roles // remove props which aren't useful to metadata delete user.password delete user.forceResetPassword diff --git a/packages/server/src/api/controllers/view/index.js b/packages/server/src/api/controllers/view/index.js index 3b43ef2408..e3232323bf 100644 --- a/packages/server/src/api/controllers/view/index.js +++ b/packages/server/src/api/controllers/view/index.js @@ -5,6 +5,7 @@ const exporters = require("./exporters") const { saveView, getView, getViews, deleteView } = require("./utils") const { fetchView } = require("../row") const { getTable } = require("../table/utils") +const { FieldTypes } = require("../../../constants") exports.fetch = async ctx => { const db = new CouchDB(ctx.appId) @@ -77,6 +78,7 @@ exports.exportView = async ctx => { } await fetchView(ctx) + let rows = ctx.body let schema = view && view.meta && view.meta.schema if (!schema) { @@ -85,11 +87,23 @@ exports.exportView = async ctx => { schema = table.schema } + // remove any relationships + const relationships = Object.entries(schema) + .filter(entry => entry[1].type === FieldTypes.LINK) + .map(entry => entry[0]) + // iterate relationship columns and remove from and row and schema + relationships.forEach(column => { + rows.forEach(row => { + delete row[column] + }) + delete schema[column] + }) + // make sure no "undefined" entries appear in the CSV if (format === exporters.ExportFormats.CSV) { const schemaKeys = Object.keys(schema) for (let key of schemaKeys) { - for (let row of ctx.body) { + for (let row of rows) { if (row[key] == null) { row[key] = "" } @@ -103,5 +117,5 @@ exports.exportView = async ctx => { const filename = `${viewName}.${format}` // send down the file ctx.attachment(filename) - ctx.body = apiFileReturn(exporter(headers, ctx.body)) + ctx.body = apiFileReturn(exporter(headers, rows)) } diff --git a/packages/server/src/api/routes/row.js b/packages/server/src/api/routes/row.js index 44f71ad545..8251328df3 100644 --- a/packages/server/src/api/routes/row.js +++ b/packages/server/src/api/routes/row.js @@ -14,24 +14,130 @@ const { const router = Router() router + /** + * @api {get} /api/:tableId/:rowId/enrich Get an enriched row + * @apiName Get an enriched row + * @apiGroup rows + * @apiPermission table read access + * @apiDescription This API is only useful when dealing with rows that have relationships. + * Normally when a row is a returned from the API relationships will only have the structure + * `{ primaryDisplay: "name", _id: ... }` but this call will return the full related rows + * for each relationship instead. + * + * @apiParam {string} rowId The ID of the row which is to be retrieved and enriched. + * + * @apiSuccess {object} row The response body will be the enriched row. + */ .get( "/api/:tableId/:rowId/enrich", paramSubResource("tableId", "rowId"), authorized(PermissionTypes.TABLE, PermissionLevels.READ), rowController.fetchEnrichedRow ) + /** + * @api {get} /api/:tableId/rows Get all rows in a table + * @apiName Get all rows in a table + * @apiGroup rows + * @apiPermission table read access + * @apiDescription This is a deprecated endpoint that should not be used anymore, instead use the search endpoint. + * This endpoint gets all of the rows within the specified table - it is not heavily used + * due to its lack of support for pagination. With SQL tables this will retrieve up to a limit and then + * will simply stop. + * + * @apiParam {string} tableId The ID of the table to retrieve all rows within. + * + * @apiSuccess {object[]} rows The response body will be an array of all rows found. + */ .get( "/api/:tableId/rows", paramResource("tableId"), authorized(PermissionTypes.TABLE, PermissionLevels.READ), rowController.fetch ) + /** + * @api {get} /api/:tableId/rows/:rowId Retrieve a single row + * @apiName Retrieve a single row + * @apiGroup rows + * @apiPermission table read access + * @apiDescription This endpoint retrieves only the specified row. If you wish to retrieve + * a row by anything other than its _id field, use the search endpoint. + * + * @apiParam {string} tableId The ID of the table to retrieve a row from. + * @apiParam {string} rowId The ID of the row to retrieve. + * + * @apiSuccess {object} body The response body will be the row that was found. + */ .get( "/api/:tableId/rows/:rowId", paramSubResource("tableId", "rowId"), authorized(PermissionTypes.TABLE, PermissionLevels.READ), rowController.find ) + /** + * @api {post} /api/:tableId/search Search for rows in a table + * @apiName Search for rows in a table + * @apiGroup rows + * @apiPermission table read access + * @apiDescription This is the primary method of accessing rows in Budibase, the data provider + * and data UI in the builder are built atop this. All filtering, sorting and pagination is + * handled through this, for internal and external (datasource plus, e.g. SQL) tables. + * + * @apiParam {string} tableId The ID of the table to retrieve rows from. + * + * @apiParam (Body) {boolean} [paginate] If pagination is required then this should be set to true, + * defaults to false. + * @apiParam (Body) {object} [query] This contains a set of filters which should be applied, if none + * specified then the request will be unfiltered. An example with all of the possible query + * options has been supplied below. + * @apiParam (Body) {number} [limit] This sets a limit for the number of rows that will be returned, + * this will be implemented at the database level if supported for performance reasons. This + * is useful when paginating to set exactly how many rows per page. + * @apiParam (Body) {string} [bookmark] If pagination is enabled then a bookmark will be returned + * with each successful search request, this should be supplied back to get the next page. + * @apiParam (Body) {object} [sort] If sort is desired this should contain the name of the column to + * sort on. + * @apiParam (Body) {string} [sortOrder] If sort is enabled then this can be either "descending" or + * "ascending" as required. + * @apiParam (Body) {string} [sortType] If sort is enabled then you must specify the type of search + * being used, either "string" or "number". This is only used for internal tables. + * + * @apiParamExample {json} Example: + * { + * "tableId": "ta_70260ff0b85c467ca74364aefc46f26d", + * "query": { + * "string": {}, + * "fuzzy": {}, + * "range": { + * "columnName": { + * "high": 20, + * "low": 10, + * } + * }, + * "equal": { + * "columnName": "someValue" + * }, + * "notEqual": {}, + * "empty": {}, + * "notEmpty": {}, + * "contains": {}, + * "notContains": {} + * "oneOf": { + * "columnName": ["value"] + * } + * }, + * "limit": 10, + * "sort": "name", + * "sortOrder": "descending", + * "sortType": "string", + * "paginate": true + * } + * + * @apiSuccess {object[]} rows An array of rows that was found based on the supplied parameters. + * @apiSuccess {boolean} hasNextPage If pagination was enabled then this specifies whether or + * not there is another page after this request. + * @apiSuccess {string} bookmark The bookmark to be sent with the next request to get the next + * page. + */ .post( "/api/:tableId/search", paramResource("tableId"), @@ -46,6 +152,30 @@ router authorized(PermissionTypes.TABLE, PermissionLevels.READ), rowController.search ) + /** + * @api {post} /api/:tableId/rows Creates a new row + * @apiName Creates a new row + * @apiGroup rows + * @apiPermission table write access + * @apiDescription This API will create a new row based on the supplied body. If the + * body includes an "_id" field then it will update an existing row if the field + * links to one. Please note that "_id", "_rev" and "tableId" are fields that are + * already used by Budibase tables and cannot be used for columns. + * + * @apiParam {string} tableId The ID of the table to save a row to. + * + * @apiParam (Body) {string} [_id] If the row exists already then an ID for the row must be provided. + * @apiParam (Body) {string} [_rev] If working with an existing row for an internal table its revision + * must also be provided. + * @apiParam (Body) {string} tableId The ID of the table should also be specified in the row body itself. + * @apiParam (Body) {any} [any] Any field supplied in the body will be assessed to see if it matches + * a column in the specified table. All other fields will be dropped and not stored. + * + * @apiSuccess {string} _id The ID of the row that was just saved, if it was just created this + * is the rows new ID. + * @apiSuccess {string} [_rev] If saving to an internal table a revision will also be returned. + * @apiSuccess {object} body The contents of the row that was saved will be returned as well. + */ .post( "/api/:tableId/rows", paramResource("tableId"), @@ -53,18 +183,66 @@ router usage, rowController.save ) + /** + * @api {patch} /api/:tableId/rows Updates a row + * @apiName Update a row + * @apiGroup rows + * @apiPermission table write access + * @apiDescription This endpoint is identical to the row creation endpoint but instead it will + * error if an _id isn't provided, it will only function for existing rows. + */ .patch( "/api/:tableId/rows", paramResource("tableId"), authorized(PermissionTypes.TABLE, PermissionLevels.WRITE), rowController.patch ) + /** + * @api {post} /api/:tableId/rows/validate Validate inputs for a row + * @apiName Validate inputs for a row + * @apiGroup rows + * @apiPermission table write access + * @apiDescription When attempting to save a row you may want to check if the row is valid + * given the table schema, this will iterate through all the constraints on the table and + * check if the request body is valid. + * + * @apiParam {string} tableId The ID of the table the row is to be validated for. + * + * @apiParam (Body) {any} [any] Any fields provided in the request body will be tested + * against the table schema and constraints. + * + * @apiSuccess {boolean} valid If inputs provided are acceptable within the table schema this + * will be true, if it is not then then errors property will be populated. + * @apiSuccess {object} [errors] A key value map of information about fields on the input + * which do not match the table schema. The key name will be the column names that have breached + * the schema. + */ .post( "/api/:tableId/rows/validate", paramResource("tableId"), authorized(PermissionTypes.TABLE, PermissionLevels.WRITE), rowController.validate ) + /** + * @api {delete} /api/:tableId/rows Delete rows + * @apiName Delete rows + * @apiGroup rows + * @apiPermission table write access + * @apiDescription This endpoint can delete a single row, or delete them in a bulk + * fashion. + * + * @apiParam {string} tableId The ID of the table the row is to be deleted from. + * + * @apiParam (Body) {object[]} [rows] If bulk deletion is desired then provide the rows in this + * key of the request body that are to be deleted. + * @apiParam (Body) {string} [_id] If deleting a single row then provide its ID in this field. + * @apiParam (Body) {string} [_rev] If deleting a single row from an internal table then provide its + * revision here. + * + * @apiSuccess {object[]|object} body If deleting bulk then the response body will be an array + * of the deleted rows, if deleting a single row then the body will contain a "row" property which + * is the deleted row. + */ .delete( "/api/:tableId/rows", paramResource("tableId"), diff --git a/packages/server/src/api/routes/table.js b/packages/server/src/api/routes/table.js index 62146b0bbe..d8ddbe8133 100644 --- a/packages/server/src/api/routes/table.js +++ b/packages/server/src/api/routes/table.js @@ -27,13 +27,110 @@ function generateSaveValidator() { } router + /** + * @api {get} /api/tables Fetch all tables + * @apiName Fetch all tables + * @apiGroup tables + * @apiPermission table read access + * @apiDescription This endpoint retrieves all of the tables which have been created in + * an app. This includes all of the external and internal tables; to tell the difference + * between these look for the "type" property on each table, either being "internal" or "external". + * + * @apiSuccess {object[]} body The response body will be the list of tables that was found - as + * this does not take any parameters the only error scenario is no access. + */ .get("/api/tables", authorized(BUILDER), tableController.fetch) + /** + * @api {get} /api/tables/:id Fetch a single table + * @apiName Fetch a single table + * @apiGroup tables + * @apiPermission table read access + * @apiDescription Retrieves a single table this could be be internal or external based on + * the provided table ID. + * + * @apiParam {string} id The ID of the table which is to be retrieved. + * + * @apiSuccess {object[]} body The response body will be the table that was found. + */ .get( "/api/tables/:id", paramResource("id"), authorized(PermissionTypes.TABLE, PermissionLevels.READ), tableController.find ) + /** + * @api {post} /api/tables Save a table + * @apiName Save a table + * @apiGroup tables + * @apiPermission builder + * @apiDescription Create or update a table with this endpoint, this will function for both internal + * external tables. + * + * @apiParam (Body) {string} [_id] If updating an existing table then the ID of the table must be specified. + * @apiParam (Body) {string} [_rev] If updating an existing internal table then the revision must also be specified. + * @apiParam (Body) {string} type] This should either be "internal" or "external" depending on the table type - + * this will default to internal. + * @apiParam (Body) {string} [sourceId] If creating an external table then this should be set to the data source ID. If + * building an internal table this does not need to be set, although it will be returned as "bb_internal". + * @apiParam (Body) {string} name The name of the table, this will be used in the UI. To rename the table simply + * supply the table structure to this endpoint with the name changed. + * @apiParam (Body) {object} schema A key value object which has all of the columns in the table as the keys in this + * object. For each column a "type" and "constraints" must be specified, with some types requiring further information. + * More information about the schema structure can be found in the Typescript definitions. + * @apiParam (Body) {string} [primaryDisplay] The name of the column which should be used when displaying rows + * from this table as relationships. + * @apiParam (Body) {object[]} [indexes] Specifies the search indexes - this is deprecated behaviour with the introduction + * of lucene indexes. This functionality is only available for internal tables. + * @apiParam (Body) {object} [_rename] If a column is to be renamed then the "old" column name should be set in this + * structure, and the "updated", new column name should also be supplied. The schema should also be updated, this field + * lets the server know that a field hasn't just been deleted, that the data has moved to a new name, this will fix + * the rows in the table. This functionality is only available for internal tables. + * @apiParam (Body) {object} [dataImport] When creating an internal table it can be built from a CSV, by using the + * CSV validation endpoint. Send the CSV data to the validation endpoint, then put the results of that call + * into this property, along with the CSV and a table/rows will be built from it. This is not supported when updating + * or for external tables. + * + * @apiParamExample {json} Example: + * { + * "_id": "ta_05541307fa0f4044abee071ca2a82119", + * "_rev": "10-0fbe4e78f69b255d79f1017e2eeef807", + * "type": "internal", + * "views": {}, + * "name": "tableName", + * "schema": { + * "column": { + * "type": "string", + * "constraints": { + * "type": "string", + * "length": { + * "maximum": null + * }, + * "presence": false + * }, + * "name": "column" + * }, + * }, + * "primaryDisplay": "column", + * "indexes": [], + * "sourceId": "bb_internal", + * "_rename": { + * "old": "columnName", + * "updated": "newColumnName", + * }, + * "dataImport": { + * "csvString": "column\nvalue", + * "primaryDisplay": "column", + * "schema": { + * "column": { + * "type": "string" + * } + * } + * } + * } + * + * @apiSuccess {object} table The response body will contain the table structure after being cleaned up and + * saved to the database. + */ .post( "/api/tables", // allows control over updating a table @@ -42,22 +139,68 @@ router generateSaveValidator(), tableController.save ) + /** + * @api {post} /api/tables/csv/validate Validate a CSV for a table + * @apiName Validate a CSV for a table + * @apiGroup tables + * @apiPermission builder + * @apiDescription When creating a new table, or importing a CSV to an existing table the CSV must be validated and + * converted into a Budibase schema; this endpoint does this. + * + * @apiParam (Body) {string} csvString The CSV which is to be validated as a string. + * @apiParam (Body) {object} [schema] When a CSV has been validated it is possible to re-validate after changing the + * type of a field, by default everything will be strings as there is no way to infer types. The returned schema can + * be updated and then returned to the endpoint to re-validate and check if the type will work for the CSV, e.g. + * using a number instead of strings. + * @apiParam (Body) {string} [tableId] If importing data to an existing table this will pull the current table and + * remove any fields from the CSV schema which do not exist on the table/don't match the type of the table. When + * importing a CSV to an existing table only fields that are present on the table can be imported. + * + * @apiSuccess {object} schema The response body will contain a "schema" object that represents the schema found for + * the CSV - this will be in the same format used for table schema.s + */ .post( "/api/tables/csv/validate", authorized(BUILDER), tableController.validateCSVSchema ) + /** + * @api {post} /api/tables/:tableId/:revId Delete a table + * @apiName Delete a table + * @apiGroup tables + * @apiPermission builder + * @apiDescription This endpoint will delete a table and all of its associated data, for this reason it is + * quite dangerous - it will work for internal and external tables. + * + * @apiParam {string} tableId The ID of the table which is to be deleted. + * @apiParam {string} [revId] If deleting an internal table then the revision must also be supplied (_rev), for + * external tables this can simply be set to anything, e.g. "external". + * + * @apiSuccess {string} message A message stating that the table was deleted successfully. + */ .delete( "/api/tables/:tableId/:revId", paramResource("tableId"), authorized(BUILDER), tableController.destroy ) - // this is currently builder only, but in the future - // it could be carried out by an end user in app, - // however some thought will need to be had about - // implications for automations (triggers) - // new trigger type, bulk rows created + /** + * @api {post} /api/tables/:tableId/:revId Import CSV to existing table + * @apiName Import CSV to existing table + * @apiGroup tables + * @apiPermission builder + * @apiDescription This endpoint will import data to existing tables, internal or external. It is used in combination + * with the CSV validation endpoint. Take the output of the CSV validation endpoint and pass it to this endpoint to + * import the data; please note this will only import fields that already exist on the table/match the type. + * + * @apiParam {string} tableId The ID of the table which the data should be imported to. + * + * @apiParam (Body) {object} dataImport This is the same as the structure used when creating an internal table with + * a CSV, it will have the "schema" returned from the CSV validation endpoint and the "csvString" which is to be + * turned into rows. + * + * @apiSuccess {string} message A message stating that the data was imported successfully. + */ .post( "/api/tables/:tableId/import", paramResource("tableId"), diff --git a/packages/server/src/api/routes/tests/application.spec.js b/packages/server/src/api/routes/tests/application.spec.js index 05e0bc231b..ce1cb80435 100644 --- a/packages/server/src/api/routes/tests/application.spec.js +++ b/packages/server/src/api/routes/tests/application.spec.js @@ -75,7 +75,6 @@ describe("/applications", () => { .expect("Content-Type", /json/) .expect(200) // should have empty packages - expect(res.body.screens.length).toEqual(1) expect(res.body.layouts.length).toEqual(2) }) }) @@ -88,7 +87,6 @@ describe("/applications", () => { .expect("Content-Type", /json/) .expect(200) expect(res.body.application).toBeDefined() - expect(res.body.screens.length).toEqual(1) expect(res.body.layouts.length).toEqual(2) }) }) diff --git a/packages/server/src/api/routes/tests/query.spec.js b/packages/server/src/api/routes/tests/query.spec.js index 6b9be7447f..37c969aba8 100644 --- a/packages/server/src/api/routes/tests/query.spec.js +++ b/packages/server/src/api/routes/tests/query.spec.js @@ -1,6 +1,13 @@ -// mock out postgres for this +// Mock out postgres for this jest.mock("pg") +// Mock isProdAppID to we can later mock the implementation and pretend we are +// using prod app IDs +const authDb = require("@budibase/auth/db") +const { isProdAppID } = authDb +const mockIsProdAppID = jest.fn(isProdAppID) +authDb.isProdAppID = mockIsProdAppID + const setup = require("./utilities") const { checkBuilderEndpoint } = require("./utilities/TestFunctions") const { basicQuery, basicDatasource } = setup.structures @@ -19,10 +26,12 @@ describe("/queries", () => { }) async function createInvalidIntegration() { - const datasource = await config.createDatasource({datasource: { - ...basicDatasource().datasource, - source: "INVALID_INTEGRATION", - }}) + const datasource = await config.createDatasource({ + datasource: { + ...basicDatasource().datasource, + source: "INVALID_INTEGRATION", + }, + }) const query = await config.createQuery() return { datasource, query } } @@ -96,11 +105,32 @@ describe("/queries", () => { .set(await config.defaultHeaders()) .expect(200) .expect("Content-Type", /json/) - expect(res.body.fields).toBeUndefined() - expect(res.body.parameters).toBeUndefined() - expect(res.body.schema).toBeUndefined() + expect(res.body.fields).toBeDefined() + expect(res.body.parameters).toBeDefined() + expect(res.body.schema).toBeDefined() }) }) + + it("should remove sensitive info for prod apps", async () => { + // Mock isProdAppID to pretend we are using a prod app + mockIsProdAppID.mockClear() + mockIsProdAppID.mockImplementation(() => true) + + const query = await config.createQuery() + const res = await request + .get(`/api/queries/${query._id}`) + .set(await config.defaultHeaders()) + .expect("Content-Type", /json/) + .expect(200) + expect(res.body._id).toEqual(query._id) + expect(res.body.fields).toBeUndefined() + expect(res.body.parameters).toBeUndefined() + expect(res.body.schema).toBeDefined() + + // Reset isProdAppID mock + expect(mockIsProdAppID).toHaveBeenCalledTimes(1) + mockIsProdAppID.mockImplementation(isProdAppID) + }) }) describe("destroy", () => { diff --git a/packages/server/src/api/routes/tests/screen.spec.js b/packages/server/src/api/routes/tests/screen.spec.js index 6a47b78853..c2d60ec006 100644 --- a/packages/server/src/api/routes/tests/screen.spec.js +++ b/packages/server/src/api/routes/tests/screen.spec.js @@ -21,7 +21,7 @@ describe("/screens", () => { .set(config.defaultHeaders()) .expect("Content-Type", /json/) .expect(200) - expect(res.body.length).toEqual(2) + expect(res.body.length).toEqual(1) expect(res.body.some(s => s._id === screen._id)).toEqual(true) }) diff --git a/packages/server/src/definitions/datasource.ts b/packages/server/src/definitions/datasource.ts index af56dc2368..6711e7c391 100644 --- a/packages/server/src/definitions/datasource.ts +++ b/packages/server/src/definitions/datasource.ts @@ -55,6 +55,17 @@ export enum IncludeRelationships { EXCLUDE = 0, } +export enum FilterTypes { + STRING = "string", + FUZZY = "fuzzy", + RANGE = "range", + EQUAL = "equal", + NOT_EQUAL = "notEqual", + EMPTY = "empty", + NOT_EMPTY = "notEmpty", + ONE_OF = "oneOf", +} + export interface QueryDefinition { type: QueryTypes displayName?: string diff --git a/packages/server/src/integrations/base/sql.ts b/packages/server/src/integrations/base/sql.ts index 471774db3d..df4f2d511b 100644 --- a/packages/server/src/integrations/base/sql.ts +++ b/packages/server/src/integrations/base/sql.ts @@ -93,7 +93,7 @@ class InternalBuilder { if (filters.oneOf) { iterate(filters.oneOf, (key, array) => { const fnc = allOr ? "orWhereIn" : "whereIn" - query = query[fnc](key, array) + query = query[fnc](key, Array.isArray(array) ? array : [array]) }) } if (filters.string) { @@ -435,8 +435,6 @@ class SqlQueryBuilder extends SqlTableQueryBuilder { id = results?.[0].id } else if (sqlClient === SqlClients.MY_SQL) { id = results?.insertId - } else if (sqlClient === SqlClients.ORACLE) { - id = response.outBinds[0][0] } row = processFn( await this.getReturningRow(queryFn, this.checkLookupKeys(id, json)) diff --git a/packages/server/src/integrations/mysql.ts b/packages/server/src/integrations/mysql.ts index 98584e04d2..24a55a273d 100644 --- a/packages/server/src/integrations/mysql.ts +++ b/packages/server/src/integrations/mysql.ts @@ -143,7 +143,7 @@ module MySQLModule { const schema: TableSchema = {} const descResp = await internalQuery( this.client, - { sql: `DESCRIBE ${tableName};` }, + { sql: `DESCRIBE \`${tableName}\`;` }, false ) for (let column of descResp) { diff --git a/packages/server/src/integrations/oracle.ts b/packages/server/src/integrations/oracle.ts index 13658399db..afaa902655 100644 --- a/packages/server/src/integrations/oracle.ts +++ b/packages/server/src/integrations/oracle.ts @@ -348,27 +348,7 @@ module OracleModule { this.schemaErrors = final.errors } - /** - * Knex default returning behaviour does not work with oracle - * Manually add the behaviour for the return column - */ - private addReturning( - query: SqlQuery, - bindings: BindParameters, - returnColumn: string - ) { - if (bindings instanceof Array) { - bindings.push({ dir: oracledb.BIND_OUT }) - query.sql = - query.sql + ` returning \"${returnColumn}\" into :${bindings.length}` - } - } - - private async internalQuery