Merge branch 'develop' into cypress-testing
This commit is contained in:
commit
6ffa6853e2
|
@ -0,0 +1,13 @@
|
|||
#!/bin/bash
|
||||
CUSTOM_DOMAIN="$1"
|
||||
|
||||
if [[ ! -z "${CUSTOM_DOMAIN}" ]]; then
|
||||
certbot certonly --webroot --webroot-path="/var/www/html" \
|
||||
--register-unsafely-without-email \
|
||||
--domains $CUSTOM_DOMAIN \
|
||||
--rsa-key-size 4096 \
|
||||
--agree-tos \
|
||||
--force-renewal
|
||||
|
||||
nginx -s reload
|
||||
fi
|
|
@ -0,0 +1,24 @@
|
|||
#!/bin/bash
|
||||
CUSTOM_DOMAIN="$1"
|
||||
# Request from Lets Encrypt
|
||||
certbot certonly --webroot --webroot-path="/var/www/html" \
|
||||
--register-unsafely-without-email \
|
||||
--domains $CUSTOM_DOMAIN \
|
||||
--rsa-key-size 4096 \
|
||||
--agree-tos \
|
||||
--force-renewal
|
||||
|
||||
if (($? != 0)); then
|
||||
echo "ERROR: certbot request failed for $CUSTOM_DOMAIN use http on port 80 - exiting"
|
||||
nginx -s stop
|
||||
exit 1
|
||||
else
|
||||
cp /app/letsencrypt/options-ssl-nginx.conf /etc/letsencrypt/options-ssl-nginx.conf
|
||||
cp /app/letsencrypt/ssl-dhparams.pem /etc/letsencrypt/ssl-dhparams.pem
|
||||
cp /app/letsencrypt/nginx-ssl.conf /etc/nginx/sites-available/nginx-ssl.conf
|
||||
sed -i 's/CUSTOM_DOMAIN/$CUSTOM_DOMAIN/g' /etc/nginx/sites-available/nginx-ssl.conf
|
||||
ln -s /etc/nginx/sites-available/nginx-ssl.conf /etc/nginx/sites-enabled/nginx-ssl.conf
|
||||
|
||||
echo "INFO: restart nginx after certbot request"
|
||||
nginx -s reload
|
||||
fi
|
|
@ -0,0 +1,94 @@
|
|||
server {
|
||||
listen 443 ssl default_server;
|
||||
listen [::]:443 ssl default_server;
|
||||
server_name _;
|
||||
ssl_certificate /etc/letsencrypt/live/CUSTOM_DOMAIN/fullchain.pem;
|
||||
ssl_certificate_key /etc/letsencrypt/live/CUSTOM_DOMAIN/privkey.pem;
|
||||
include /etc/letsencrypt/options-ssl-nginx.conf;
|
||||
ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem;
|
||||
client_max_body_size 1000m;
|
||||
ignore_invalid_headers off;
|
||||
proxy_buffering off;
|
||||
# port_in_redirect off;
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
default_type "text/plain";
|
||||
root /var/www/html;
|
||||
break;
|
||||
}
|
||||
location = /.well-known/acme-challenge/ {
|
||||
return 404;
|
||||
}
|
||||
|
||||
location /app {
|
||||
proxy_pass http://127.0.0.1:4001;
|
||||
}
|
||||
|
||||
location = / {
|
||||
proxy_pass http://127.0.0.1:4001;
|
||||
}
|
||||
|
||||
location ~ ^/(builder|app_) {
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Connection $connection_upgrade;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_pass http://127.0.0.1:4001;
|
||||
}
|
||||
|
||||
location ~ ^/api/(system|admin|global)/ {
|
||||
proxy_pass http://127.0.0.1:4002;
|
||||
}
|
||||
|
||||
location /worker/ {
|
||||
proxy_pass http://127.0.0.1:4002;
|
||||
rewrite ^/worker/(.*)$ /$1 break;
|
||||
}
|
||||
|
||||
location /api/ {
|
||||
# calls to the API are rate limited with bursting
|
||||
limit_req zone=ratelimit burst=20 nodelay;
|
||||
|
||||
# 120s timeout on API requests
|
||||
proxy_read_timeout 120s;
|
||||
proxy_connect_timeout 120s;
|
||||
proxy_send_timeout 120s;
|
||||
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Connection $connection_upgrade;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
|
||||
proxy_pass http://127.0.0.1:4001;
|
||||
}
|
||||
|
||||
location /db/ {
|
||||
proxy_pass http://127.0.0.1:5984;
|
||||
rewrite ^/db/(.*)$ /$1 break;
|
||||
}
|
||||
|
||||
location / {
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
|
||||
proxy_connect_timeout 300;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Connection "";
|
||||
chunked_transfer_encoding off;
|
||||
proxy_pass http://127.0.0.1:9000;
|
||||
}
|
||||
|
||||
client_header_timeout 60;
|
||||
client_body_timeout 60;
|
||||
keepalive_timeout 60;
|
||||
|
||||
# gzip
|
||||
gzip on;
|
||||
gzip_vary on;
|
||||
gzip_proxied any;
|
||||
gzip_comp_level 6;
|
||||
gzip_types text/plain text/css text/xml application/json application/javascript application/rss+xml application/atom+xml image/svg+xml;
|
||||
}
|
|
@ -0,0 +1,13 @@
|
|||
# This file contains important security parameters. If you modify this file
|
||||
# manually, Certbot will be unable to automatically provide future security
|
||||
# updates. Instead, Certbot will print and log an error message with a path to
|
||||
# the up-to-date file that you will need to refer to when manually updating
|
||||
# this file.
|
||||
|
||||
ssl_session_cache shared:le_nginx_SSL:10m;
|
||||
ssl_session_timeout 1440m;
|
||||
|
||||
ssl_protocols TLSv1.2 TLSv1.3;
|
||||
ssl_prefer_server_ciphers off;
|
||||
|
||||
ssl_ciphers "ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384";
|
|
@ -0,0 +1,8 @@
|
|||
-----BEGIN DH PARAMETERS-----
|
||||
MIIBCAKCAQEA//////////+t+FRYortKmq/cViAnPTzx2LnFg84tNpWp4TZBFGQz
|
||||
+8yTnc4kmz75fS/jY2MMddj2gbICrsRhetPfHtXV/WVhJDP1H18GbtCFY2VVPe0a
|
||||
87VXE15/V8k1mE8McODmi3fipona8+/och3xWKE2rec1MKzKT0g6eXq8CrGCsyT7
|
||||
YdEIqUuyyOP7uWrat2DX9GgdT0Kj3jlN9K5W7edjcrsZCwenyO4KbXCeAvzhzffi
|
||||
7MA0BM0oNC9hkXL+nOmFg/+OTxIy7vKBg8P+OxtMb61zO7X8vC7CIAXFjvGDfRaD
|
||||
ssbzSibBsu/6iGtCOGEoXJf//////////wIBAg==
|
||||
-----END DH PARAMETERS-----
|
|
@ -0,0 +1,33 @@
|
|||
#!/usr/bin/env bash
|
||||
healthy=true
|
||||
|
||||
if [[ $(curl -Lfk -s -w "%{http_code}\n" http://localhost/ -o /dev/null) -ne 200 ]]; then
|
||||
echo 'ERROR: Budibase is not running';
|
||||
healthy=false
|
||||
fi
|
||||
|
||||
if [[ $(curl -s -w "%{http_code}\n" http://localhost:4001/health -o /dev/null) -ne 200 ]]; then
|
||||
echo 'ERROR: Budibase backend is not running';
|
||||
healthy=false
|
||||
fi
|
||||
|
||||
if [[ $(curl -s -w "%{http_code}\n" http://localhost:4002/health -o /dev/null) -ne 200 ]]; then
|
||||
echo 'ERROR: Budibase worker is not running';
|
||||
healthy=false
|
||||
fi
|
||||
|
||||
if [[ $(curl -s -w "%{http_code}\n" http://localhost:5984/ -o /dev/null) -ne 200 ]]; then
|
||||
echo 'ERROR: CouchDB is not running';
|
||||
healthy=false
|
||||
fi
|
||||
if [[ $(redis-cli -a $REDIS_PASSWORD --no-auth-warning ping) != 'PONG' ]]; then
|
||||
echo 'ERROR: Redis is down';
|
||||
healthy=false
|
||||
fi
|
||||
# mino, clouseau,
|
||||
|
||||
if [ $healthy == true ]; then
|
||||
exit 0
|
||||
else
|
||||
exit 1
|
||||
fi
|
|
@ -1,7 +1,7 @@
|
|||
FROM node:14-slim as build
|
||||
|
||||
# install node-gyp dependencies
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends g++ make python
|
||||
RUN apt-get update && apt-get upgrade -y && apt-get install -y --no-install-recommends apt-utils cron g++ make python
|
||||
|
||||
# add pin script
|
||||
WORKDIR /
|
||||
|
@ -20,32 +20,36 @@ RUN node /pinVersions.js && yarn && yarn build && /cleanup.sh
|
|||
|
||||
FROM couchdb:3.2.1
|
||||
|
||||
ARG TARGETARCH amd64
|
||||
|
||||
COPY --from=build /app /app
|
||||
COPY --from=build /worker /worker
|
||||
|
||||
ENV DEPLOYMENT_ENVIRONMENT=docker \
|
||||
POSTHOG_TOKEN=phc_fg5I3nDOf6oJVMHSaycEhpPdlgS8rzXG2r6F2IpxCHS \
|
||||
ENV \
|
||||
APP_PORT=4001 \
|
||||
ARCHITECTURE=amd \
|
||||
BUDIBASE_ENVIRONMENT=PRODUCTION \
|
||||
CLUSTER_PORT=80 \
|
||||
COUCHDB_PASSWORD=budibase \
|
||||
COUCHDB_USER=budibase \
|
||||
COUCH_DB_URL=http://budibase:budibase@localhost:5984 \
|
||||
BUDIBASE_ENVIRONMENT=PRODUCTION \
|
||||
MINIO_URL=http://localhost:9000 \
|
||||
REDIS_URL=localhost:6379 \
|
||||
WORKER_URL=http://localhost:4002 \
|
||||
CUSTOM_DOMAIN=budi001.custom.com \
|
||||
DEPLOYMENT_ENVIRONMENT=docker \
|
||||
INTERNAL_API_KEY=budibase \
|
||||
JWT_SECRET=testsecret \
|
||||
MINIO_ACCESS_KEY=budibase \
|
||||
MINIO_SECRET_KEY=budibase \
|
||||
SELF_HOSTED=1 \
|
||||
CLUSTER_PORT=10000 \
|
||||
MINIO_URL=http://localhost:9000 \
|
||||
POSTHOG_TOKEN=phc_fg5I3nDOf6oJVMHSaycEhpPdlgS8rzXG2r6F2IpxCHS \
|
||||
REDIS_PASSWORD=budibase \
|
||||
ARCHITECTURE=amd \
|
||||
APP_PORT=4001 \
|
||||
WORKER_PORT=4002
|
||||
REDIS_URL=localhost:6379 \
|
||||
SELF_HOSTED=1 \
|
||||
WORKER_PORT=4002 \
|
||||
WORKER_URL=http://localhost:4002
|
||||
|
||||
# install base dependencies
|
||||
RUN apt-get update && \
|
||||
apt-get install software-properties-common wget -y && \
|
||||
apt-get install -y software-properties-common wget nginx && \
|
||||
apt-add-repository 'deb http://security.debian.org/debian-security stretch/updates main' && \
|
||||
apt-get update
|
||||
|
||||
|
@ -53,20 +57,19 @@ RUN apt-get update && \
|
|||
WORKDIR /nodejs
|
||||
RUN curl -sL https://deb.nodesource.com/setup_16.x -o /tmp/nodesource_setup.sh && \
|
||||
bash /tmp/nodesource_setup.sh && \
|
||||
apt-get install libaio1 nodejs nginx openjdk-8-jdk redis-server unzip -y && \
|
||||
apt-get install -y libaio1 nodejs nginx openjdk-8-jdk redis-server unzip && \
|
||||
npm install --global yarn pm2
|
||||
|
||||
# setup nginx
|
||||
ADD hosting/single/nginx.conf /etc/nginx
|
||||
RUN mkdir /etc/nginx/logs && \
|
||||
useradd www && \
|
||||
touch /etc/nginx/logs/error.log && \
|
||||
touch /etc/nginx/logs/nginx.pid
|
||||
RUN mkdir -p /var/log/nginx && \
|
||||
touch /var/log/nginx/error.log && \
|
||||
touch /var/run/nginx.pid
|
||||
|
||||
WORKDIR /
|
||||
RUN mkdir -p scripts/integrations/oracle
|
||||
ADD packages/server/scripts/integrations/oracle scripts/integrations/oracle
|
||||
RUN /bin/bash -e ./scripts/integrations/oracle/instantclient/linux/x86-64/install.sh
|
||||
RUN /bin/bash -e ./scripts/integrations/oracle/instantclient/linux/install.sh
|
||||
|
||||
# setup clouseau
|
||||
WORKDIR /
|
||||
|
@ -87,20 +90,41 @@ ADD hosting/single/vm.args ./etc/
|
|||
|
||||
# setup minio
|
||||
WORKDIR /minio
|
||||
RUN wget https://dl.min.io/server/minio/release/linux-${ARCHITECTURE}64/minio && chmod +x minio
|
||||
ADD scripts/install-minio.sh ./install.sh
|
||||
RUN chmod +x install.sh && ./install.sh
|
||||
|
||||
# setup runner file
|
||||
WORKDIR /
|
||||
ADD hosting/single/runner.sh .
|
||||
RUN chmod +x ./runner.sh
|
||||
ADD hosting/scripts/healthcheck.sh .
|
||||
RUN chmod +x ./healthcheck.sh
|
||||
|
||||
# cleanup cache
|
||||
RUN yarn cache clean -f
|
||||
|
||||
EXPOSE 10000
|
||||
EXPOSE 80
|
||||
EXPOSE 443
|
||||
VOLUME /opt/couchdb/data
|
||||
VOLUME /minio
|
||||
|
||||
# setup letsencrypt certificate
|
||||
RUN apt-get install -y certbot python3-certbot-nginx
|
||||
ADD hosting/letsencrypt /app/letsencrypt
|
||||
RUN chmod +x /app/letsencrypt/certificate-request.sh /app/letsencrypt/certificate-renew.sh
|
||||
# Remove cached files
|
||||
RUN rm -rf \
|
||||
/root/.cache \
|
||||
/root/.npm \
|
||||
/root/.pip \
|
||||
/usr/local/share/doc \
|
||||
/usr/share/doc \
|
||||
/usr/share/man \
|
||||
/var/lib/apt/lists/* \
|
||||
/tmp/*
|
||||
|
||||
HEALTHCHECK --interval=15s --timeout=15s --start-period=45s CMD "/healthcheck.sh"
|
||||
|
||||
# must set this just before running
|
||||
ENV NODE_ENV=production
|
||||
WORKDIR /
|
||||
|
|
|
@ -0,0 +1,105 @@
|
|||
# Docker Single Image for Budibase
|
||||
|
||||
## Overview
|
||||
As an alternative to running several docker containers via docker-compose, the files under ./hosting/single can be used to build a docker image containing all of the Budibase components (minio, couch, clouseau etc).
|
||||
We call this the 'single image' container as the Dockerfile adds all the components to a single docker image.
|
||||
|
||||
|
||||
## Usage
|
||||
|
||||
- Amend Environment Variables
|
||||
- Build Requirements
|
||||
- Build the Image
|
||||
- Run the Container
|
||||
|
||||
### Amend Environment Variables
|
||||
|
||||
Edit the Dockerfile in this directory amending the environment variables to suit your usage. Pay particular attention to changing passwords.
|
||||
The CUSTOM_DOMAIN variable will be used to request a certificate from LetsEncrypt and if successful you can point traffic to port 443. If you choose to use the CUSTOM_DOMAIN variable ensure that the DNS for your custom domain points to the public IP address where you are running Budibase - otherwise the certificate issuance will fail.
|
||||
If you have other arrangements for a proxy in front of the single image container you can omit the CUSTOM_DOMAIN environment variable and the request to LetsEncrypt will be skipped. You can then point traffic to port 80.
|
||||
|
||||
### Build Requirements
|
||||
We would suggest building the image with 6GB of RAM and 20GB of free disk space for build artifacts. The resulting image size will use approx 2GB of disk space.
|
||||
|
||||
### Build the Image
|
||||
The guidance below is based on building the Budibase single image on Debian 11. If you use another distro or OS you will need to amend the commands to suit.
|
||||
Install Node
|
||||
Budibase requires a recent version of node (14+) than is in the base Debian repos so:
|
||||
|
||||
```
|
||||
curl -sL https://deb.nodesource.com/setup_16.x | sudo bash -
|
||||
apt install -y nodejs
|
||||
node -v
|
||||
```
|
||||
Install yarn and lerna:
|
||||
```
|
||||
npm install -g yarn jest lerna
|
||||
```
|
||||
Install Docker
|
||||
```
|
||||
apt install -y docker.io
|
||||
apt install -y python3-pip
|
||||
pip3 install docker-compose
|
||||
```
|
||||
Check the versions of each installed version. This process was tested with the version numbers below so YMMV using anything else:
|
||||
|
||||
- Docker: 20.10.5
|
||||
- docker-compose: 1.29.2
|
||||
- node: 16.15.1
|
||||
- yarn: 1.22.19
|
||||
- lerna: 5.1.4
|
||||
|
||||
Clone the Budibase repo
|
||||
```
|
||||
git clone https://github.com/Budibase/budibase.git
|
||||
cd budibase
|
||||
```
|
||||
Node setup:
|
||||
```
|
||||
node ./hosting/scripts/setup.js
|
||||
yarn
|
||||
yarn bootstrap
|
||||
yarn build
|
||||
```
|
||||
|
||||
Build the image from the Dockerfile:
|
||||
|
||||
```
|
||||
yarn build:docker:single
|
||||
```
|
||||
If the docker build step fails run that step again manually with:
|
||||
```
|
||||
docker build --no-cache -t budibase:latest -f ./hosting/single/Dockerfile .
|
||||
```
|
||||
|
||||
### Run the Container
|
||||
```
|
||||
docker run -d -p 80:80 -p 443:443 --name budibase budibase:latest
|
||||
```
|
||||
Where:
|
||||
- -d runs the container in detached mode
|
||||
- -p forwards ports from your host to the ports inside the container. If you are already using port 80 on your host for something else you can try running with an alternative port e.g. `-p 8080:80`
|
||||
- --name is the name for the container as shown in `docker ps` and can be used with other docker commands e.g. `docker restart budibase`
|
||||
|
||||
When the container runs you should be able to access the container over http at your host address e.g. http://1.2.3.4/ or using your custom domain e.g. https://my.custom.domain/
|
||||
|
||||
When the Budibase UI appears you will be prompted to create an account to get started.
|
||||
|
||||
### Check
|
||||
There are many things that could go wrong so if your container is not building or running as expected please check the following before opening a support issue.
|
||||
Verify the healthcheck status of the container:
|
||||
```
|
||||
docker ps
|
||||
```
|
||||
Check the container logs:
|
||||
```
|
||||
docker logs budibase
|
||||
```
|
||||
|
||||
|
||||
### Support
|
||||
This single image build is still a work-in-progress so if you open an issue please provide the following information:
|
||||
- The OS and OS version you are building on
|
||||
- The versions you are using of docker, docker-compose, yarn, node, lerna
|
||||
- For build errors please provide zipped output
|
||||
- For container errors please provide zipped container logs
|
|
@ -1,6 +1,6 @@
|
|||
user www www;
|
||||
error_log /etc/nginx/logs/error.log;
|
||||
pid /etc/nginx/logs/nginx.pid;
|
||||
user www-data www-data;
|
||||
error_log /var/log/nginx/error.log;
|
||||
pid /var/run/nginx.pid;
|
||||
worker_processes auto;
|
||||
worker_rlimit_nofile 8192;
|
||||
|
||||
|
@ -33,14 +33,23 @@ http {
|
|||
}
|
||||
|
||||
server {
|
||||
listen 10000 default_server;
|
||||
listen [::]:10000 default_server;
|
||||
listen 80 default_server;
|
||||
listen [::]:80 default_server;
|
||||
server_name _;
|
||||
client_max_body_size 1000m;
|
||||
ignore_invalid_headers off;
|
||||
proxy_buffering off;
|
||||
# port_in_redirect off;
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
default_type "text/plain";
|
||||
root /var/www/html;
|
||||
break;
|
||||
}
|
||||
location = /.well-known/acme-challenge/ {
|
||||
return 404;
|
||||
}
|
||||
|
||||
location /app {
|
||||
proxy_pass http://127.0.0.1:4001;
|
||||
}
|
||||
|
|
|
@ -2,6 +2,15 @@ redis-server --requirepass $REDIS_PASSWORD &
|
|||
/opt/clouseau/bin/clouseau &
|
||||
/minio/minio server /minio &
|
||||
/docker-entrypoint.sh /opt/couchdb/bin/couchdb &
|
||||
/etc/init.d/nginx restart
|
||||
if [[ ! -z "${CUSTOM_DOMAIN}" ]]; then
|
||||
# Add monthly cron job to renew certbot certificate
|
||||
echo -n "* * 2 * * root exec /app/letsencrypt/certificate-renew.sh ${CUSTOM_DOMAIN}" >> /etc/cron.d/certificate-renew
|
||||
chmod +x /etc/cron.d/certificate-renew
|
||||
# Request the certbot certificate
|
||||
/app/letsencrypt/certificate-request.sh ${CUSTOM_DOMAIN}
|
||||
fi
|
||||
|
||||
/etc/init.d/nginx restart
|
||||
pushd app
|
||||
pm2 start --name app "yarn run:docker"
|
||||
|
@ -10,7 +19,6 @@ pushd worker
|
|||
pm2 start --name worker "yarn run:docker"
|
||||
popd
|
||||
sleep 10
|
||||
URL=http://${COUCHDB_USER}:${COUCHDB_PASSWORD}@localhost:5984
|
||||
curl -X PUT ${URL}/_users
|
||||
curl -X PUT ${URL}/_replicator
|
||||
sleep infinity
|
||||
curl -X PUT ${COUCH_DB_URL}/_users
|
||||
curl -X PUT ${COUCH_DB_URL}/_replicator
|
||||
sleep infinity
|
||||
|
|
|
@ -62,6 +62,7 @@
|
|||
"build:docker:develop": "node scripts/pinVersions && lerna run build:docker && npm run build:docker:proxy:compose && cd hosting/scripts/linux/ && ./release-to-docker-hub.sh develop && cd -",
|
||||
"build:docker:airgap": "node hosting/scripts/airgapped/airgappedDockerBuild",
|
||||
"build:digitalocean": "cd hosting/digitalocean && ./build.sh && cd -",
|
||||
"build:docker:single:multiarch": "docker buildx build --platform linux/arm64,linux/amd64 -f hosting/single/Dockerfile -t budibase:latest .",
|
||||
"build:docker:single:image": "docker build -f hosting/single/Dockerfile -t budibase:latest .",
|
||||
"build:docker:single": "lerna run build && lerna run predocker && npm run build:docker:single:image",
|
||||
"build:docs": "lerna run build:docs",
|
||||
|
|
|
@ -190,6 +190,7 @@ export const getFrontendStore = () => {
|
|||
|
||||
// Build array of promises to speed up bulk deletions
|
||||
const promises = []
|
||||
let deleteUrls = []
|
||||
screensToDelete.forEach(screen => {
|
||||
// Delete the screen
|
||||
promises.push(
|
||||
|
@ -199,14 +200,10 @@ export const getFrontendStore = () => {
|
|||
})
|
||||
)
|
||||
// Remove links to this screen
|
||||
promises.push(
|
||||
store.actions.components.links.delete(
|
||||
screen.routing.route,
|
||||
screen.props._instanceName
|
||||
)
|
||||
)
|
||||
deleteUrls.push(screen.routing.route)
|
||||
})
|
||||
|
||||
promises.push(store.actions.links.delete(deleteUrls))
|
||||
await Promise.all(promises)
|
||||
const deletedIds = screensToDelete.map(screen => screen._id)
|
||||
store.update(state => {
|
||||
|
@ -578,89 +575,38 @@ export const getFrontendStore = () => {
|
|||
})
|
||||
await store.actions.preview.saveSelected()
|
||||
},
|
||||
links: {
|
||||
save: async (url, title) => {
|
||||
const layout = get(mainLayout)
|
||||
if (!layout) {
|
||||
return
|
||||
}
|
||||
},
|
||||
links: {
|
||||
save: async (url, title) => {
|
||||
const layout = get(mainLayout)
|
||||
if (!layout) {
|
||||
return
|
||||
}
|
||||
|
||||
// Add link setting to main layout
|
||||
if (layout.props._component.endsWith("layout")) {
|
||||
// If using a new SDK, add to the layout component settings
|
||||
if (!layout.props.links) {
|
||||
layout.props.links = []
|
||||
}
|
||||
layout.props.links.push({
|
||||
text: title,
|
||||
url,
|
||||
})
|
||||
} else {
|
||||
// If using an old SDK, add to the navigation component
|
||||
// TODO: remove this when we can assume everyone has updated
|
||||
const nav = findComponentType(
|
||||
layout.props,
|
||||
"@budibase/standard-components/navigation"
|
||||
)
|
||||
if (!nav) {
|
||||
return
|
||||
}
|
||||
// Add link setting to main layout
|
||||
if (!layout.props.links) {
|
||||
layout.props.links = []
|
||||
}
|
||||
layout.props.links.push({
|
||||
text: title,
|
||||
url,
|
||||
})
|
||||
|
||||
let newLink
|
||||
if (nav._children && nav._children.length) {
|
||||
// Clone an existing link if one exists
|
||||
newLink = cloneDeep(nav._children[0])
|
||||
await store.actions.layouts.save(layout)
|
||||
},
|
||||
delete: async urls => {
|
||||
const layout = get(mainLayout)
|
||||
if (!layout?.props.links?.length) {
|
||||
return
|
||||
}
|
||||
|
||||
// Set our new props
|
||||
newLink._id = Helpers.uuid()
|
||||
newLink._instanceName = `${title} Link`
|
||||
newLink.url = url
|
||||
newLink.text = title
|
||||
} else {
|
||||
// Otherwise create vanilla new link
|
||||
newLink = {
|
||||
...store.actions.components.createInstance("link"),
|
||||
url,
|
||||
text: title,
|
||||
_instanceName: `${title} Link`,
|
||||
}
|
||||
nav._children = [...nav._children, newLink]
|
||||
}
|
||||
}
|
||||
// Filter out the URLs to delete
|
||||
urls = Array.isArray(urls) ? urls : [urls]
|
||||
layout.props.links = layout.props.links.filter(
|
||||
link => !urls.includes(link.url)
|
||||
)
|
||||
|
||||
// Save layout
|
||||
await store.actions.layouts.save(layout)
|
||||
},
|
||||
delete: async (url, title) => {
|
||||
const layout = get(mainLayout)
|
||||
if (!layout) {
|
||||
return
|
||||
}
|
||||
|
||||
// Add link setting to main layout
|
||||
if (layout.props._component.endsWith("layout")) {
|
||||
// If using a new SDK, add to the layout component settings
|
||||
layout.props.links = layout.props.links.filter(
|
||||
link => !(link.text === title && link.url === url)
|
||||
)
|
||||
} else {
|
||||
// If using an old SDK, add to the navigation component
|
||||
// TODO: remove this when we can assume everyone has updated
|
||||
const nav = findComponentType(
|
||||
layout.props,
|
||||
"@budibase/standard-components/navigation"
|
||||
)
|
||||
if (!nav) {
|
||||
return
|
||||
}
|
||||
|
||||
nav._children = nav._children.filter(
|
||||
child => !(child.url === url && child.text === title)
|
||||
)
|
||||
}
|
||||
// Save layout
|
||||
await store.actions.layouts.save(layout)
|
||||
},
|
||||
await store.actions.layouts.save(layout)
|
||||
},
|
||||
},
|
||||
settings: {
|
||||
|
|
|
@ -15,7 +15,7 @@ export default function (tables) {
|
|||
name: `${table.name} - New`,
|
||||
create: () => createScreen(table),
|
||||
id: NEW_ROW_TEMPLATE,
|
||||
table: table.name,
|
||||
table: table._id,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
|
@ -17,7 +17,7 @@ export default function (tables) {
|
|||
name: `${table.name} - Detail`,
|
||||
create: () => createScreen(table),
|
||||
id: ROW_DETAIL_TEMPLATE,
|
||||
table: table.name,
|
||||
table: table._id,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
|
@ -10,7 +10,7 @@ export default function (tables) {
|
|||
name: `${table.name} - List`,
|
||||
create: () => createScreen(table),
|
||||
id: ROW_LIST_TEMPLATE,
|
||||
table: table.name,
|
||||
table: table._id,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
|
@ -14,14 +14,14 @@
|
|||
let selectedScreens = [...initalScreens]
|
||||
|
||||
const toggleScreenSelection = (table, datasource) => {
|
||||
if (selectedScreens.find(s => s.table === table.name)) {
|
||||
if (selectedScreens.find(s => s.table === table._id)) {
|
||||
selectedScreens = selectedScreens.filter(
|
||||
screen => screen.table !== table.name
|
||||
screen => screen.table !== table._id
|
||||
)
|
||||
} else {
|
||||
let partialTemplates = getTemplates($store, $tables.list).reduce(
|
||||
(acc, template) => {
|
||||
if (template.table === table.name) {
|
||||
if (template.table === table._id) {
|
||||
template.datasource = datasource.name
|
||||
acc.push(template)
|
||||
}
|
||||
|
@ -88,7 +88,7 @@
|
|||
<div
|
||||
class="data-source-entry"
|
||||
class:selected={selectedScreens.find(
|
||||
x => x.table === table.name
|
||||
x => x.table === table._id
|
||||
)}
|
||||
on:click={() => toggleScreenSelection(table, datasource)}
|
||||
>
|
||||
|
@ -102,8 +102,7 @@
|
|||
<use xlink:href="#spectrum-icon-18-Table" />
|
||||
</svg>
|
||||
{table.name}
|
||||
|
||||
{#if selectedScreens.find(x => x.table === table.name)}
|
||||
{#if selectedScreens.find(x => x.table === table._id)}
|
||||
<span class="data-source-check">
|
||||
<Icon size="S" name="CheckmarkCircle" />
|
||||
</span>
|
||||
|
@ -116,7 +115,7 @@
|
|||
<div
|
||||
class="data-source-entry"
|
||||
class:selected={selectedScreens.find(
|
||||
x => x.table === datasource.entities[table_key].name
|
||||
x => x.table === datasource.entities[table_key]._id
|
||||
)}
|
||||
on:click={() =>
|
||||
toggleScreenSelection(
|
||||
|
@ -134,8 +133,7 @@
|
|||
<use xlink:href="#spectrum-icon-18-Table" />
|
||||
</svg>
|
||||
{datasource.entities[table_key].name}
|
||||
|
||||
{#if selectedScreens.find(x => x.table === datasource.entities[table_key].name)}
|
||||
{#if selectedScreens.find(x => x.table === datasource.entities[table_key]._id)}
|
||||
<span class="data-source-check">
|
||||
<Icon size="S" name="CheckmarkCircle" />
|
||||
</span>
|
||||
|
|
|
@ -66,7 +66,7 @@
|
|||
|
||||
// Add link in layout for list screens
|
||||
if (screen.props._instanceName.endsWith("List")) {
|
||||
await store.actions.components.links.save(
|
||||
await store.actions.links.save(
|
||||
screen.routing.route,
|
||||
screen.routing.route.split("/")[1]
|
||||
)
|
||||
|
@ -131,6 +131,7 @@
|
|||
const screens = selectedTemplates.map(template => {
|
||||
let screenTemplate = template.create()
|
||||
screenTemplate.datasource = template.datasource
|
||||
screenTemplate.autoTableId = template.table
|
||||
return screenTemplate
|
||||
})
|
||||
await createScreens({ screens, screenAccessRole })
|
||||
|
|
|
@ -1,27 +1,18 @@
|
|||
<script>
|
||||
import { Label, Select, Body } from "@budibase/bbui"
|
||||
import { findAllMatchingComponents } from "builderStore/componentUtils"
|
||||
import { Label, Select, Body, Multiselect } from "@budibase/bbui"
|
||||
import {
|
||||
findAllMatchingComponents,
|
||||
findComponent,
|
||||
} from "builderStore/componentUtils"
|
||||
import { currentAsset } from "builderStore"
|
||||
import { onMount } from "svelte"
|
||||
import {
|
||||
getDatasourceForProvider,
|
||||
getSchemaForDatasource,
|
||||
} from "builderStore/dataBinding"
|
||||
|
||||
export let parameters
|
||||
|
||||
$: tables = findAllMatchingComponents($currentAsset?.props, component =>
|
||||
component._component.endsWith("table")
|
||||
).map(table => ({
|
||||
label: table._instanceName,
|
||||
value: table._id,
|
||||
}))
|
||||
|
||||
$: tableBlocks = findAllMatchingComponents($currentAsset?.props, component =>
|
||||
component._component.endsWith("tableblock")
|
||||
).map(block => ({
|
||||
label: block._instanceName,
|
||||
value: `${block._id}-table`,
|
||||
}))
|
||||
|
||||
$: componentOptions = tables.concat(tableBlocks)
|
||||
|
||||
const FORMATS = [
|
||||
{
|
||||
label: "CSV",
|
||||
|
@ -33,6 +24,32 @@
|
|||
},
|
||||
]
|
||||
|
||||
$: tables = findAllMatchingComponents($currentAsset?.props, component =>
|
||||
component._component.endsWith("table")
|
||||
).map(table => ({
|
||||
label: table._instanceName,
|
||||
value: table._id,
|
||||
}))
|
||||
$: tableBlocks = findAllMatchingComponents($currentAsset?.props, component =>
|
||||
component._component.endsWith("tableblock")
|
||||
).map(block => ({
|
||||
label: block._instanceName,
|
||||
value: `${block._id}-table`,
|
||||
}))
|
||||
$: componentOptions = tables.concat(tableBlocks)
|
||||
$: columnOptions = getColumnOptions(parameters.tableComponentId)
|
||||
|
||||
const getColumnOptions = tableId => {
|
||||
// Strip block suffix if block component
|
||||
if (tableId?.includes("-")) {
|
||||
tableId = tableId.split("-")[0]
|
||||
}
|
||||
const selectedTable = findComponent($currentAsset?.props, tableId)
|
||||
const datasource = getDatasourceForProvider($currentAsset, selectedTable)
|
||||
const { schema } = getSchemaForDatasource($currentAsset, datasource)
|
||||
return Object.keys(schema || {})
|
||||
}
|
||||
|
||||
onMount(() => {
|
||||
if (!parameters.type) {
|
||||
parameters.type = "csv"
|
||||
|
@ -53,10 +70,16 @@
|
|||
<Select
|
||||
bind:value={parameters.tableComponentId}
|
||||
options={componentOptions}
|
||||
on:change={() => (parameters.columns = [])}
|
||||
/>
|
||||
|
||||
<Label small>Export as</Label>
|
||||
<Select bind:value={parameters.type} options={FORMATS} />
|
||||
<Label small>Export columns</Label>
|
||||
<Multiselect
|
||||
placeholder="All columns"
|
||||
bind:value={parameters.columns}
|
||||
options={columnOptions}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
@ -80,7 +103,7 @@
|
|||
display: grid;
|
||||
column-gap: var(--spacing-xs);
|
||||
row-gap: var(--spacing-s);
|
||||
grid-template-columns: 70px 1fr;
|
||||
grid-template-columns: 90px 1fr;
|
||||
align-items: center;
|
||||
}
|
||||
</style>
|
||||
|
|
|
@ -270,6 +270,7 @@ const exportDataHandler = async action => {
|
|||
tableId: selection.tableId,
|
||||
rows: selection.selectedRows,
|
||||
format: action.parameters.type,
|
||||
columns: action.parameters.columns,
|
||||
})
|
||||
download(data, `${selection.tableId}.${action.parameters.type}`)
|
||||
} catch (error) {
|
||||
|
|
|
@ -65,12 +65,15 @@ export const buildRowEndpoints = API => ({
|
|||
* Exports rows.
|
||||
* @param tableId the table ID to export the rows from
|
||||
* @param rows the array of rows to export
|
||||
* @param format the format to export (csv or json)
|
||||
* @param columns which columns to export (all if undefined)
|
||||
*/
|
||||
exportRows: async ({ tableId, rows, format }) => {
|
||||
exportRows: async ({ tableId, rows, format, columns }) => {
|
||||
return await API.post({
|
||||
url: `/api/${tableId}/rows/exportRows?format=${format}`,
|
||||
body: {
|
||||
rows,
|
||||
columns,
|
||||
},
|
||||
parseResponse: async response => {
|
||||
return await response.text()
|
||||
|
|
Binary file not shown.
|
@ -0,0 +1,23 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Must be root to continue
|
||||
if [[ $(id -u) -ne 0 ]] ; then echo "Please run as root" ; exit 1 ; fi
|
||||
|
||||
# Allow for re-runs
|
||||
rm -rf /opt/oracle
|
||||
|
||||
echo "Installing oracle instant client"
|
||||
|
||||
# copy and unzip package
|
||||
mkdir -p /opt/oracle
|
||||
cp scripts/integrations/oracle/instantclient/linux/arm64/basiclite-19.10.zip /opt/oracle
|
||||
cd /opt/oracle
|
||||
unzip -qq basiclite-19.10.zip -d .
|
||||
rm *.zip
|
||||
mv instantclient* instantclient
|
||||
|
||||
# update runtime link path
|
||||
sh -c "echo /opt/oracle/instantclient > /etc/ld.so.conf.d/oracle-instantclient.conf"
|
||||
ldconfig /etc/ld.so.conf.d
|
||||
|
||||
echo "Installation complete"
|
|
@ -0,0 +1,10 @@
|
|||
#!/bin/bash
|
||||
SCRIPT_DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]:-$0}"; )" &> /dev/null && pwd 2> /dev/null; )"
|
||||
if [[ $TARGETARCH == arm* ]] ;
|
||||
then
|
||||
echo "Installing ARM Oracle instant client..."
|
||||
$SCRIPT_DIR/arm64/install.sh
|
||||
else
|
||||
echo "Installing x86-64 Oracle instant client..."
|
||||
$SCRIPT_DIR/x86-64/install.sh
|
||||
fi
|
|
@ -157,7 +157,8 @@ exports.validate = async () => {
|
|||
exports.exportRows = async ctx => {
|
||||
const { datasourceId } = breakExternalTableId(ctx.params.tableId)
|
||||
const db = getAppDB()
|
||||
let format = ctx.query.format
|
||||
const format = ctx.query.format
|
||||
const { columns } = ctx.request.body
|
||||
const datasource = await db.get(datasourceId)
|
||||
if (!datasource || !datasource.entities) {
|
||||
ctx.throw(400, "Datasource has not been configured for plus API.")
|
||||
|
@ -171,13 +172,27 @@ exports.exportRows = async ctx => {
|
|||
}
|
||||
|
||||
let result = await exports.search(ctx)
|
||||
let headers = Object.keys(result.rows[0])
|
||||
let rows = []
|
||||
|
||||
// Filter data to only specified columns if required
|
||||
if (columns && columns.length) {
|
||||
for (let i = 0; i < result.rows.length; i++) {
|
||||
rows[i] = {}
|
||||
for (let column of columns) {
|
||||
rows[i][column] = result.rows[i][column]
|
||||
}
|
||||
}
|
||||
} else {
|
||||
rows = result.rows
|
||||
}
|
||||
|
||||
let headers = Object.keys(rows[0])
|
||||
const exporter = exporters[format]
|
||||
const filename = `export.${format}`
|
||||
|
||||
// send down the file
|
||||
ctx.attachment(filename)
|
||||
return apiFileReturn(exporter(headers, result.rows))
|
||||
return apiFileReturn(exporter(headers, rows))
|
||||
}
|
||||
|
||||
exports.fetchEnrichedRow = async ctx => {
|
||||
|
|
|
@ -0,0 +1,4 @@
|
|||
#!/bin/bash
|
||||
sudo apt-get install -y qemu qemu-user-static
|
||||
docker buildx create --name budibase
|
||||
docker buildx use budibase
|
|
@ -1,11 +1,16 @@
|
|||
#!/bin/bash
|
||||
dir=$(pwd)
|
||||
mv dist /
|
||||
mv package.json /
|
||||
declare -a keep=("dist" "package.json" "yarn.lock" "client" "builder" "build" "pm2.config.js" "docker_run.sh")
|
||||
for moveDir in "${keep[@]}"
|
||||
do
|
||||
mv $moveDir / 2>/dev/null
|
||||
done
|
||||
cd /
|
||||
rm -r $dir
|
||||
mkdir $dir
|
||||
mv /dist $dir
|
||||
mv /package.json $dir
|
||||
for keepDir in "${keep[@]}"
|
||||
do
|
||||
mv /$keepDir $dir/ 2>/dev/null
|
||||
done
|
||||
cd $dir
|
||||
NODE_ENV=production yarn
|
||||
|
|
|
@ -0,0 +1,8 @@
|
|||
#!/bin/bash
|
||||
if [[ $TARGETARCH == arm* ]] ;
|
||||
then
|
||||
wget https://dl.min.io/server/minio/release/linux-arm64/minio
|
||||
else
|
||||
wget https://dl.min.io/server/minio/release/linux-amd64/minio
|
||||
fi
|
||||
chmod +x minio
|
Loading…
Reference in New Issue