cherry pick nginx change (#9316)

Co-authored-by: Rory Powell <rory.codes@gmail.com>
This commit is contained in:
Martin McKeaveney 2023-01-11 21:05:44 +00:00 committed by GitHub
parent 0c47054eab
commit 78e67df3dc
16 changed files with 81 additions and 218 deletions

View File

@ -38,17 +38,6 @@ jobs:
fi fi
echo "RELEASE_VERSION=$release_version" >> $GITHUB_ENV echo "RELEASE_VERSION=$release_version" >> $GITHUB_ENV
- name: Tag and release Proxy service docker image
run: |
docker login -u $DOCKER_USER -p $DOCKER_PASSWORD
yarn build:docker:proxy:prod
docker tag proxy-service budibase/proxy:$PROD_TAG
docker push budibase/proxy:$PROD_TAG
env:
DOCKER_USER: ${{ secrets.DOCKER_USERNAME }}
DOCKER_PASSWORD: ${{ secrets.DOCKER_API_KEY }}
PROD_TAG: k8s
- name: Configure AWS Credentials - name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@v1 uses: aws-actions/configure-aws-credentials@v1
with: with:

View File

@ -28,17 +28,6 @@ jobs:
release_version=$(cat lerna.json | jq -r '.version') release_version=$(cat lerna.json | jq -r '.version')
echo "RELEASE_VERSION=$release_version" >> $GITHUB_ENV echo "RELEASE_VERSION=$release_version" >> $GITHUB_ENV
- name: Tag and release Proxy service docker image
run: |
docker login -u $DOCKER_USER -p $DOCKER_PASSWORD
yarn build:docker:proxy:preprod
docker tag proxy-service budibase/proxy:$PREPROD_TAG
docker push budibase/proxy:$PREPROD_TAG
env:
DOCKER_USER: ${{ secrets.DOCKER_USERNAME }}
DOCKER_PASSWORD: ${{ secrets.DOCKER_API_KEY }}
PREPROD_TAG: k8s-preprod
- name: Pull values.yaml from budibase-infra - name: Pull values.yaml from budibase-infra
run: | run: |
curl -H "Authorization: token ${{ secrets.GH_PERSONAL_TOKEN }}" \ curl -H "Authorization: token ${{ secrets.GH_PERSONAL_TOKEN }}" \

View File

@ -29,17 +29,6 @@ jobs:
release_version=$(cat lerna.json | jq -r '.version') release_version=$(cat lerna.json | jq -r '.version')
echo "RELEASE_VERSION=$release_version" >> $GITHUB_ENV echo "RELEASE_VERSION=$release_version" >> $GITHUB_ENV
- name: Tag and release Proxy service docker image
run: |
docker login -u $DOCKER_USER -p $DOCKER_PASSWORD
yarn build:docker:proxy:release
docker tag proxy-service budibase/proxy:$RELEASE_TAG
docker push budibase/proxy:$RELEASE_TAG
env:
DOCKER_USER: ${{ secrets.DOCKER_USERNAME }}
DOCKER_PASSWORD: ${{ secrets.DOCKER_API_KEY }}
RELEASE_TAG: k8s-release
- name: Pull values.yaml from budibase-infra - name: Pull values.yaml from budibase-infra
run: | run: |
curl -H "Authorization: token ${{ secrets.GH_PERSONAL_TOKEN }}" \ curl -H "Authorization: token ${{ secrets.GH_PERSONAL_TOKEN }}" \

View File

@ -76,22 +76,25 @@ jobs:
DOCKER_USER: ${{ secrets.DOCKER_USERNAME }} DOCKER_USER: ${{ secrets.DOCKER_USERNAME }}
DOCKER_PASSWORD: ${{ secrets.DOCKER_API_KEY }} DOCKER_PASSWORD: ${{ secrets.DOCKER_API_KEY }}
- name: Get the latest budibase release version deploy-to-release-env:
needs: [release-images]
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Get the current budibase release version
id: version id: version
run: | run: |
release_version=$(cat lerna.json | jq -r '.version') release_version=$(cat lerna.json | jq -r '.version')
echo "RELEASE_VERSION=$release_version" >> $GITHUB_ENV echo "RELEASE_VERSION=$release_version" >> $GITHUB_ENV
- name: Tag and release Proxy service docker image - name: Configure AWS Credentials
run: | uses: aws-actions/configure-aws-credentials@v1
docker login -u $DOCKER_USER -p $DOCKER_PASSWORD with:
yarn build:docker:proxy:release aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
docker tag proxy-service budibase/proxy:$RELEASE_TAG aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
docker push budibase/proxy:$RELEASE_TAG aws-region: eu-west-1
env:
DOCKER_USER: ${{ secrets.DOCKER_USERNAME }}
DOCKER_PASSWORD: ${{ secrets.DOCKER_API_KEY }}
RELEASE_TAG: k8s-release
- name: Pull values.yaml from budibase-infra - name: Pull values.yaml from budibase-infra
run: | run: |

View File

@ -98,17 +98,6 @@ jobs:
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: eu-west-1 aws-region: eu-west-1
- name: Tag and release Proxy service docker image
run: |
docker login -u $DOCKER_USER -p $DOCKER_PASSWORD
yarn build:docker:proxy:preprod
docker tag proxy-service budibase/proxy:$PREPROD_TAG
docker push budibase/proxy:$PREPROD_TAG
env:
DOCKER_USER: ${{ secrets.DOCKER_USERNAME }}
DOCKER_PASSWORD: ${{ secrets.DOCKER_API_KEY }}
PREPROD_TAG: k8s-preprod
- name: Pull values.yaml from budibase-infra - name: Pull values.yaml from budibase-infra
run: | run: |
curl -H "Authorization: token ${{ secrets.GH_PERSONAL_TOKEN }}" \ curl -H "Authorization: token ${{ secrets.GH_PERSONAL_TOKEN }}" \

4
.gitignore vendored
View File

@ -66,8 +66,6 @@ typings/
.env .env
!qa-core/.env !qa-core/.env
!hosting/.env !hosting/.env
hosting/.generated-nginx.dev.conf
hosting/proxy/.generated-nginx.prod.conf
# parcel-bundler cache (https://parceljs.org/) # parcel-bundler cache (https://parceljs.org/)
.cache .cache
@ -105,5 +103,7 @@ stats.html
# TypeScript cache # TypeScript cache
*.tsbuildinfo *.tsbuildinfo
# plugins
budibase-component budibase-component
budibase-datasource budibase-datasource

View File

@ -28,11 +28,26 @@ spec:
app.kubernetes.io/name: budibase-proxy app.kubernetes.io/name: budibase-proxy
spec: spec:
containers: containers:
- image: budibase/proxy:{{ .Values.services.proxy.tag | default "k8s" }} - image: budibase/proxy:{{ .Values.globals.appVersion }}
imagePullPolicy: Always imagePullPolicy: Always
name: proxy-service name: proxy-service
ports: ports:
- containerPort: {{ .Values.services.proxy.port }} - containerPort: {{ .Values.services.proxy.port }}
env:
- name: APPS_UPSTREAM_URL
value: {{ tpl .Values.services.proxy.upstreams.apps . | quote }}
- name: WORKER_UPSTREAM_URL
value: {{ tpl .Values.services.proxy.upstreams.worker . | quote }}
- name: MINIO_UPSTREAM_URL
value: {{ tpl .Values.services.proxy.upstreams.minio . | quote }}
- name: COUCHDB_UPSTREAM_URL
value: {{ .Values.services.couchdb.url | default (tpl .Values.services.proxy.upstreams.couchdb .) | quote }}
- name: RESOLVER
{{ if .Values.services.proxy.resolver }}
value: {{ .Values.services.proxy.resolver }}
{{ else }}
value: kube-dns.kube-system.svc.{{ .Values.services.dns }}
{{ end }}
{{ with .Values.services.proxy.resources }} {{ with .Values.services.proxy.resources }}
resources: resources:
{{- toYaml . | nindent 10 }} {{- toYaml . | nindent 10 }}

View File

@ -124,6 +124,11 @@ services:
proxy: proxy:
port: 10000 port: 10000
replicaCount: 1 replicaCount: 1
upstreams:
apps: 'http://app-service.{{ .Release.Namespace }}.svc.{{ .Values.services.dns }}:{{ .Values.services.apps.port }}'
worker: 'http://worker-service.{{ .Release.Namespace }}.svc.{{ .Values.services.dns }}:{{ .Values.services.worker.port }}'
minio: 'http://minio-service.{{ .Release.Namespace }}.svc.{{ .Values.services.dns }}:{{ .Values.services.objectStore.port }}'
couchdb: 'http://{{ .Release.Name }}-svc-couchdb:{{ .Values.services.couchdb.port }}'
resources: {} resources: {}
apps: apps:

View File

@ -27,7 +27,7 @@ services:
restart: on-failure restart: on-failure
image: nginx:latest image: nginx:latest
volumes: volumes:
- ./.generated-nginx.dev.conf:/etc/nginx/nginx.conf - ./nginx.dev.conf:/etc/nginx/nginx.conf
- ./proxy/error.html:/usr/share/nginx/html/error.html - ./proxy/error.html:/usr/share/nginx/html/error.html
ports: ports:
- "${MAIN_PORT}:10000" - "${MAIN_PORT}:10000"
@ -36,6 +36,8 @@ services:
- couchdb-service - couchdb-service
extra_hosts: extra_hosts:
- "host.docker.internal:host-gateway" - "host.docker.internal:host-gateway"
environment:
- PROXY_ADDRESS=${PROXY_ADDRESS}
couchdb-service: couchdb-service:
# platform: linux/amd64 # platform: linux/amd64

View File

@ -82,6 +82,12 @@ services:
environment: environment:
- PROXY_RATE_LIMIT_WEBHOOKS_PER_SECOND=10 - PROXY_RATE_LIMIT_WEBHOOKS_PER_SECOND=10
- PROXY_RATE_LIMIT_API_PER_SECOND=20 - PROXY_RATE_LIMIT_API_PER_SECOND=20
- APPS_UPSTREAM_URL=http://app-service:4002
- WORKER_UPSTREAM_URL=http://worker-service:4003
- MINIO_UPSTREAM_URL=http://minio-service:9000
- COUCHDB_UPSTREAM_URL=http://couchdb-service:5984
- WATCHTOWER_UPSTREAM_URL=http://watchtower-service:8080
- RESOLVER=127.0.0.11
depends_on: depends_on:
- minio-service - minio-service
- worker-service - worker-service

View File

@ -25,17 +25,17 @@ http {
} }
upstream app-service { upstream app-service {
server {{address}}:4001; server ${PROXY_ADDRESS}:4001;
keepalive 32; keepalive 32;
} }
upstream worker-service { upstream worker-service {
server {{address}}:4002; server ${PROXY_ADDRESS}:4002;
keepalive 32; keepalive 32;
} }
upstream builder { upstream builder {
server {{address}}:3000; server ${PROXY_ADDRESS}:3000;
keepalive 32; keepalive 32;
} }

View File

@ -4,7 +4,7 @@ FROM nginx:latest
# use the default nginx behaviour for *.template files which are processed with envsubst # use the default nginx behaviour for *.template files which are processed with envsubst
# override the output dir to output directly to /etc/nginx instead of /etc/nginx/conf.d # override the output dir to output directly to /etc/nginx instead of /etc/nginx/conf.d
ENV NGINX_ENVSUBST_OUTPUT_DIR=/etc/nginx ENV NGINX_ENVSUBST_OUTPUT_DIR=/etc/nginx
COPY .generated-nginx.prod.conf /etc/nginx/templates/nginx.conf.template COPY nginx.prod.conf /etc/nginx/templates/nginx.conf.template
# IPv6 removal needs to happen after envsubst # IPv6 removal needs to happen after envsubst
RUN rm -rf /docker-entrypoint.d/10-listen-on-ipv6-by-default.sh RUN rm -rf /docker-entrypoint.d/10-listen-on-ipv6-by-default.sh
@ -17,3 +17,10 @@ COPY error.html /usr/share/nginx/html/error.html
# Default environment # Default environment
ENV PROXY_RATE_LIMIT_WEBHOOKS_PER_SECOND=10 ENV PROXY_RATE_LIMIT_WEBHOOKS_PER_SECOND=10
ENV PROXY_RATE_LIMIT_API_PER_SECOND=20 ENV PROXY_RATE_LIMIT_API_PER_SECOND=20
# Use docker-compose values as defaults for backwards compatibility
ENV APPS_UPSTREAM_URL=http://app-service:4002
ENV WORKER_UPSTREAM_URL=http://worker-service:4003
ENV MINIO_UPSTREAM_URL=http://minio-service:9000
ENV COUCHDB_UPSTREAM_URL=http://couchdb-service:5984
ENV WATCHTOWER_UPSTREAM_URL=http://watchtower-service:8080
ENV RESOLVER=127.0.0.11

View File

@ -23,7 +23,7 @@ http {
tcp_nodelay on; tcp_nodelay on;
server_tokens off; server_tokens off;
types_hash_max_size 2048; types_hash_max_size 2048;
resolver {{ resolver }} valid=10s ipv6=off; resolver ${RESOLVER} valid=10s ipv6=off;
# buffering # buffering
client_header_buffer_size 1k; client_header_buffer_size 1k;
@ -76,27 +76,23 @@ http {
add_header Content-Security-Policy "${csp_default}; ${csp_script}; ${csp_style}; ${csp_object}; ${csp_base_uri}; ${csp_connect}; ${csp_font}; ${csp_frame}; ${csp_img}; ${csp_manifest}; ${csp_media}; ${csp_worker};" always; add_header Content-Security-Policy "${csp_default}; ${csp_script}; ${csp_style}; ${csp_object}; ${csp_base_uri}; ${csp_connect}; ${csp_font}; ${csp_frame}; ${csp_img}; ${csp_manifest}; ${csp_media}; ${csp_worker};" always;
# upstreams # upstreams
set $apps {{ apps }}; set $apps ${APPS_UPSTREAM_URL};
set $worker {{ worker }}; set $worker ${WORKER_UPSTREAM_URL};
set $minio {{ minio }}; set $minio ${MINIO_UPSTREAM_URL};
set $couchdb {{ couchdb }}; set $couchdb ${COUCHDB_UPSTREAM_URL};
{{#if watchtower}} set $watchtower ${WATCHTOWER_UPSTREAM_URL};
set $watchtower {{ watchtower }};
{{/if}}
location /app { location /app {
proxy_pass http://$apps:4002; proxy_pass $apps;
} }
location = / { location = / {
proxy_pass http://$apps:4002; proxy_pass $apps;
} }
{{#if watchtower}}
location = /v1/update { location = /v1/update {
proxy_pass http://$watchtower:8080; proxy_pass $watchtower;
} }
{{/if}}
location ~ ^/(builder|app_) { location ~ ^/(builder|app_) {
proxy_http_version 1.1; proxy_http_version 1.1;
@ -107,19 +103,17 @@ http {
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $host; proxy_set_header Host $host;
proxy_pass http://$apps:4002; proxy_pass $apps;
} }
location ~ ^/api/(system|admin|global)/ { location ~ ^/api/(system|admin|global)/ {
proxy_set_header Host $host; proxy_set_header Host $host;
proxy_pass $worker;
proxy_pass http://$worker:4003;
} }
location /worker/ { location /worker/ {
proxy_set_header Host $host; proxy_set_header Host $host;
proxy_pass $worker;
proxy_pass http://$worker:4003;
rewrite ^/worker/(.*)$ /$1 break; rewrite ^/worker/(.*)$ /$1 break;
} }
@ -138,7 +132,7 @@ http {
proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_pass http://$apps:4002; proxy_pass $apps;
} }
location /api/ { location /api/ {
@ -157,7 +151,7 @@ http {
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $host; proxy_set_header Host $host;
proxy_pass http://$apps:4002; proxy_pass $apps;
} }
location /api/webhooks/ { location /api/webhooks/ {
@ -177,11 +171,11 @@ http {
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $host; proxy_set_header Host $host;
proxy_pass http://$apps:4002; proxy_pass $apps;
} }
location /db/ { location /db/ {
proxy_pass http://$couchdb:5984; proxy_pass $couchdb;
rewrite ^/db/(.*)$ /$1 break; rewrite ^/db/(.*)$ /$1 break;
} }
@ -191,7 +185,7 @@ http {
proxy_set_header Connection 'upgrade'; proxy_set_header Connection 'upgrade';
proxy_set_header Host $host; proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade; proxy_cache_bypass $http_upgrade;
proxy_pass http://$apps:4002; proxy_pass $apps;
} }
location / { location / {
@ -205,7 +199,7 @@ http {
proxy_set_header Connection ""; proxy_set_header Connection "";
chunked_transfer_encoding off; chunked_transfer_encoding off;
proxy_pass http://$minio:9000; proxy_pass $minio;
} }
client_header_timeout 60; client_header_timeout 60;

View File

@ -55,15 +55,11 @@
"test:e2e:ci:record": "lerna run cy:ci:record --stream", "test:e2e:ci:record": "lerna run cy:ci:record --stream",
"test:e2e:ci:notify": "lerna run cy:ci:notify", "test:e2e:ci:notify": "lerna run cy:ci:notify",
"build:specs": "lerna run specs", "build:specs": "lerna run specs",
"build:docker": "lerna run build:docker && npm run build:docker:proxy:compose && cd hosting/scripts/linux/ && ./release-to-docker-hub.sh $BUDIBASE_RELEASE_VERSION && cd -", "build:docker": "lerna run build:docker && npm run build:docker:proxy && cd hosting/scripts/linux/ && ./release-to-docker-hub.sh $BUDIBASE_RELEASE_VERSION && cd -",
"build:docker:pre": "lerna run build && lerna run predocker", "build:docker:pre": "lerna run build && lerna run predocker",
"build:docker:proxy": "docker build hosting/proxy -t proxy-service", "build:docker:proxy": "docker build hosting/proxy -t proxy-service",
"build:docker:proxy:compose": "node scripts/proxy/generateProxyConfig compose && npm run build:docker:proxy",
"build:docker:proxy:preprod": "node scripts/proxy/generateProxyConfig preprod && npm run build:docker:proxy",
"build:docker:proxy:release": "node scripts/proxy/generateProxyConfig release && npm run build:docker:proxy",
"build:docker:proxy:prod": "node scripts/proxy/generateProxyConfig prod && npm run build:docker:proxy",
"build:docker:selfhost": "lerna run build:docker && cd hosting/scripts/linux/ && ./release-to-docker-hub.sh latest && cd -", "build:docker:selfhost": "lerna run build:docker && cd hosting/scripts/linux/ && ./release-to-docker-hub.sh latest && cd -",
"build:docker:develop": "node scripts/pinVersions && lerna run build:docker && npm run build:docker:proxy:compose && cd hosting/scripts/linux/ && ./release-to-docker-hub.sh develop && cd -", "build:docker:develop": "node scripts/pinVersions && lerna run build:docker && npm run build:docker:proxy && cd hosting/scripts/linux/ && ./release-to-docker-hub.sh develop && cd -",
"build:docker:airgap": "node hosting/scripts/airgapped/airgappedDockerBuild", "build:docker:airgap": "node hosting/scripts/airgapped/airgappedDockerBuild",
"build:digitalocean": "cd hosting/digitalocean && ./build.sh && cd -", "build:digitalocean": "cd hosting/digitalocean && ./build.sh && cd -",
"build:docker:single:multiarch": "docker buildx build --platform linux/arm64,linux/amd64 -f hosting/single/Dockerfile -t budibase:latest .", "build:docker:single:multiarch": "docker buildx build --platform linux/arm64,linux/amd64 -f hosting/single/Dockerfile -t budibase:latest .",

View File

@ -3,7 +3,6 @@ const compose = require("docker-compose")
const path = require("path") const path = require("path")
const fs = require("fs") const fs = require("fs")
const isWsl = require("is-wsl") const isWsl = require("is-wsl")
const { processStringSync } = require("@budibase/string-templates")
function isLinux() { function isLinux() {
return !isWsl && process.platform !== "darwin" && process.platform !== "win32" return !isWsl && process.platform !== "darwin" && process.platform !== "win32"
@ -23,16 +22,6 @@ const Commands = {
} }
async function init() { async function init() {
// generate nginx file, always do this incase it has changed
const hostingPath = path.join(process.cwd(), "..", "..", "hosting")
const nginxHbsPath = path.join(hostingPath, "nginx.dev.conf.hbs")
const nginxOutputPath = path.join(hostingPath, ".generated-nginx.dev.conf")
const contents = fs.readFileSync(nginxHbsPath, "utf8")
const config = {
address: isLinux() ? "172.17.0.1" : "host.docker.internal",
}
fs.writeFileSync(nginxOutputPath, processStringSync(contents, config))
const envFilePath = path.join(process.cwd(), ".env") const envFilePath = path.join(process.cwd(), ".env")
if (!fs.existsSync(envFilePath)) { if (!fs.existsSync(envFilePath)) {
const envFileJson = { const envFileJson = {
@ -60,6 +49,7 @@ async function init() {
BB_ADMIN_USER_PASSWORD: "", BB_ADMIN_USER_PASSWORD: "",
PLUGINS_DIR: "", PLUGINS_DIR: "",
TENANT_FEATURE_FLAGS: "*:LICENSING,*:USER_GROUPS", TENANT_FEATURE_FLAGS: "*:LICENSING,*:USER_GROUPS",
PROXY_ADDRESS: isLinux() ? "172.17.0.1" : "host.docker.internal",
} }
let envFile = "" let envFile = ""
Object.keys(envFileJson).forEach(key => { Object.keys(envFileJson).forEach(key => {

View File

@ -1,111 +0,0 @@
#!/usr/bin/env node
const path = require("path")
const fs = require("fs")
function processStringSync(string, env) {
let output = ""
// process if statements
let removal = false
for (let line of string.split("\n")) {
if (new RegExp(`{{\/if}}`, "g").test(line)) {
removal = false
continue
}
if (!removal) {
const match = line.match(new RegExp(`{{#if (.*)}}`))
if (match) {
const key = match[1]
// check the if statement is true
if (!env[key]) {
removal = true
}
continue
}
output += line + "\n"
}
}
for (let key in env) {
// replace variables
const rgx = new RegExp(`{{\\s*${key}\\s*}}`, "g")
output = output.replace(rgx, env[key])
}
return output
}
const Configs = {
prod: {
apps: "app-service.budibase.svc.cluster.local",
worker: "worker-service.budibase.svc.cluster.local",
minio: "minio-service.budibase.svc.cluster.local",
couchdb: "budibase-prod-svc-couchdb",
resolver: "kube-dns.kube-system.svc.cluster.local"
},
preprod: {
apps: "app-service.budibase.svc.cluster.local",
worker: "worker-service.budibase.svc.cluster.local",
minio: "minio-service.budibase.svc.cluster.local",
couchdb: "budibase-preprod-svc-couchdb",
resolver: "kube-dns.kube-system.svc.cluster.local"
},
release: {
apps: "app-service.budibase.svc.cluster.local",
worker: "worker-service.budibase.svc.cluster.local",
minio: "minio-service.budibase.svc.cluster.local",
couchdb: "budibase-release-svc-couchdb",
resolver: "kube-dns.kube-system.svc.cluster.local"
},
compose: {
apps: "app-service",
worker: "worker-service",
minio: "minio-service",
couchdb: "couchdb-service",
watchtower: "watchtower-service",
resolver: "127.0.0.11"
},
}
const Commands = {
Prod: "prod",
Preprod: "preprod",
Release: "release",
Compose: "compose",
}
async function init(managementCommand) {
const config = Configs[managementCommand]
const hostingPath = path.join(process.cwd(), "hosting")
const nginxHbsPath = path.join(hostingPath, "nginx.prod.conf.hbs")
const nginxOutputPath = path.join(
hostingPath,
"proxy",
".generated-nginx.prod.conf"
)
const contents = fs.readFileSync(nginxHbsPath, "utf8")
fs.writeFileSync(nginxOutputPath, processStringSync(contents, config))
}
const managementCommand = process.argv.slice(2)[0]
if (
!managementCommand ||
!Object.values(Commands).some(command => managementCommand === command)
) {
throw new Error(
"You must supply either a 'compose', 'preprod' or 'prod' commmand to generate an NGINX config."
)
}
init(managementCommand)
.then(() => {
console.log("Done! 🎉")
})
.catch(err => {
console.error(
"Something went wrong while creating the nginx configuration",
err.message
)
})