Expanding on deployment, attempting to hide all the quota stuff behind layers that we can more easily abstract, as well as hiding away any AWS specific features in the index of deployment.
This commit is contained in:
parent
ac6b2aa6c8
commit
103161c7a8
|
@ -0,0 +1,66 @@
|
|||
const { getAppQuota } = require("./quota")
|
||||
const env = require("../../../environment")
|
||||
|
||||
/**
|
||||
* This is used to pass around information about the deployment that is occurring
|
||||
*/
|
||||
class Deployment {
|
||||
constructor(id, appId) {
|
||||
this._id = id
|
||||
this.appId = appId
|
||||
}
|
||||
|
||||
// purely so that we can do quota stuff outside the main deployment context
|
||||
async init() {
|
||||
if (!env.SELF_HOSTED) {
|
||||
this.setQuota(await getAppQuota(this.appId))
|
||||
}
|
||||
}
|
||||
|
||||
setQuota(quota) {
|
||||
this.quota = quota
|
||||
}
|
||||
|
||||
getQuota() {
|
||||
return this.quota
|
||||
}
|
||||
|
||||
getAppId() {
|
||||
return this.appId
|
||||
}
|
||||
|
||||
setVerification(verification) {
|
||||
this.verification = verification
|
||||
}
|
||||
|
||||
getVerification() {
|
||||
return this.verification
|
||||
}
|
||||
|
||||
setStatus(status, err = null) {
|
||||
this.status = status
|
||||
if (err) {
|
||||
this.err = err
|
||||
}
|
||||
}
|
||||
|
||||
getJSON() {
|
||||
const obj = {
|
||||
_id: this._id,
|
||||
appId: this.appId,
|
||||
status: this.status,
|
||||
}
|
||||
if (this.err) {
|
||||
obj.err = this.err
|
||||
}
|
||||
if (this.verification && this.verification.cfDistribution) {
|
||||
obj.cfDistribution = this.verification.cfDistribution
|
||||
}
|
||||
if (this.verification && this.verification.quota) {
|
||||
obj.quota = this.verification.quota
|
||||
}
|
||||
return obj
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = Deployment
|
|
@ -1,57 +1,26 @@
|
|||
const fs = require("fs")
|
||||
const { join } = require("../../../utilities/centralPath")
|
||||
const AWS = require("aws-sdk")
|
||||
const AwsDeploy = require("aws-sdk")
|
||||
const fetch = require("node-fetch")
|
||||
const sanitize = require("sanitize-s3-objectkey")
|
||||
const { budibaseAppsDir } = require("../../../utilities/budibaseDir")
|
||||
const PouchDB = require("../../../db")
|
||||
const env = require("../../../environment")
|
||||
|
||||
/**
|
||||
* Finalises the deployment, updating the quota for the user API key
|
||||
* The verification process returns the levels to update to.
|
||||
* Calls the "deployment-success" lambda.
|
||||
* @param {object} quota The usage quota levels returned from the verifyDeploy
|
||||
* @returns {Promise<object>} The usage has been updated against the user API key.
|
||||
*/
|
||||
exports.updateDeploymentQuota = async function(quota) {
|
||||
const DEPLOYMENT_SUCCESS_URL =
|
||||
env.DEPLOYMENT_CREDENTIALS_URL + "deploy/success"
|
||||
|
||||
const response = await fetch(DEPLOYMENT_SUCCESS_URL, {
|
||||
method: "POST",
|
||||
body: JSON.stringify({
|
||||
apiKey: env.BUDIBASE_API_KEY,
|
||||
quota,
|
||||
}),
|
||||
headers: {
|
||||
"Content-Type": "application/json",
|
||||
Accept: "application/json",
|
||||
},
|
||||
})
|
||||
|
||||
if (response.status !== 200) {
|
||||
throw new Error(`Error updating deployment quota for API Key`)
|
||||
}
|
||||
|
||||
return await response.json()
|
||||
}
|
||||
const { prepareUpload } = require("./utils")
|
||||
const { walkDir } = require("../../../utilities")
|
||||
|
||||
/**
|
||||
* Verifies the users API key and
|
||||
* Verifies that the deployment fits within the quota of the user
|
||||
* Links to the "check-api-key" lambda.
|
||||
* @param {String} appId - appId being deployed
|
||||
* @param {String} appId - appId being deployed
|
||||
* @param {quota} quota - current quota being changed with this application
|
||||
* @param {object} deployment - information about the active deployment, including the appId and quota.
|
||||
*/
|
||||
exports.verifyDeployment = async function({ appId, quota }) {
|
||||
exports.preDeployment = async function(deployment) {
|
||||
const response = await fetch(env.DEPLOYMENT_CREDENTIALS_URL, {
|
||||
method: "POST",
|
||||
body: JSON.stringify({
|
||||
apiKey: env.BUDIBASE_API_KEY,
|
||||
appId,
|
||||
quota,
|
||||
appId: deployment.getAppId(),
|
||||
quota: deployment.getQuota(),
|
||||
}),
|
||||
})
|
||||
|
||||
|
@ -68,7 +37,7 @@ exports.verifyDeployment = async function({ appId, quota }) {
|
|||
|
||||
// set credentials here, means any time we're verified we're ready to go
|
||||
if (json.credentials) {
|
||||
AWS.config.update({
|
||||
AwsDeploy.config.update({
|
||||
accessKeyId: json.credentials.AccessKeyId,
|
||||
secretAccessKey: json.credentials.SecretAccessKey,
|
||||
sessionToken: json.credentials.SessionToken,
|
||||
|
@ -78,57 +47,40 @@ exports.verifyDeployment = async function({ appId, quota }) {
|
|||
return json
|
||||
}
|
||||
|
||||
const CONTENT_TYPE_MAP = {
|
||||
html: "text/html",
|
||||
css: "text/css",
|
||||
js: "application/javascript",
|
||||
}
|
||||
|
||||
/**
|
||||
* Recursively walk a directory tree and execute a callback on all files.
|
||||
* @param {String} dirPath - Directory to traverse
|
||||
* @param {Function} callback - callback to execute on files
|
||||
* Finalises the deployment, updating the quota for the user API key
|
||||
* The verification process returns the levels to update to.
|
||||
* Calls the "deployment-success" lambda.
|
||||
* @param {object} deployment information about the active deployment, including the quota info.
|
||||
* @returns {Promise<object>} The usage has been updated against the user API key.
|
||||
*/
|
||||
function walkDir(dirPath, callback) {
|
||||
for (let filename of fs.readdirSync(dirPath)) {
|
||||
const filePath = `${dirPath}/${filename}`
|
||||
const stat = fs.lstatSync(filePath)
|
||||
exports.postDeployment = async function(deployment) {
|
||||
const DEPLOYMENT_SUCCESS_URL =
|
||||
env.DEPLOYMENT_CREDENTIALS_URL + "deploy/success"
|
||||
|
||||
if (stat.isFile()) {
|
||||
callback(filePath)
|
||||
} else {
|
||||
walkDir(filePath, callback)
|
||||
}
|
||||
const response = await fetch(DEPLOYMENT_SUCCESS_URL, {
|
||||
method: "POST",
|
||||
body: JSON.stringify({
|
||||
apiKey: env.BUDIBASE_API_KEY,
|
||||
quota: deployment.getQuota(),
|
||||
}),
|
||||
headers: {
|
||||
"Content-Type": "application/json",
|
||||
Accept: "application/json",
|
||||
},
|
||||
})
|
||||
|
||||
if (response.status !== 200) {
|
||||
throw new Error(`Error updating deployment quota for API Key`)
|
||||
}
|
||||
|
||||
return await response.json()
|
||||
}
|
||||
|
||||
async function prepareUploadForS3({ s3Key, metadata, s3, file }) {
|
||||
const extension = [...file.name.split(".")].pop()
|
||||
const fileBytes = fs.readFileSync(file.path)
|
||||
|
||||
const upload = await s3
|
||||
.upload({
|
||||
// windows filepaths need to be converted to forward slashes for s3
|
||||
Key: sanitize(s3Key).replace(/\\/g, "/"),
|
||||
Body: fileBytes,
|
||||
ContentType: file.type || CONTENT_TYPE_MAP[extension.toLowerCase()],
|
||||
Metadata: metadata,
|
||||
})
|
||||
.promise()
|
||||
|
||||
return {
|
||||
size: file.size,
|
||||
name: file.name,
|
||||
extension,
|
||||
url: upload.Location,
|
||||
key: upload.Key,
|
||||
}
|
||||
}
|
||||
|
||||
exports.prepareUploadForS3 = prepareUploadForS3
|
||||
|
||||
exports.uploadAppAssets = async function({ appId, bucket, accountId }) {
|
||||
const s3 = new AWS.S3({
|
||||
exports.deploy = async function(deployment) {
|
||||
const appId = deployment.getAppId()
|
||||
const { bucket, accountId } = deployment.getVerification()
|
||||
const s3 = new AwsDeploy.S3({
|
||||
params: {
|
||||
Bucket: bucket,
|
||||
},
|
||||
|
@ -143,7 +95,7 @@ exports.uploadAppAssets = async function({ appId, bucket, accountId }) {
|
|||
for (let page of appPages) {
|
||||
// Upload HTML, CSS and JS for each page of the web app
|
||||
walkDir(join(appAssetsPath, page), function(filePath) {
|
||||
const appAssetUpload = prepareUploadForS3({
|
||||
const appAssetUpload = prepareUpload({
|
||||
file: {
|
||||
path: filePath,
|
||||
name: [...filePath.split("/")].pop(),
|
||||
|
@ -168,7 +120,7 @@ exports.uploadAppAssets = async function({ appId, bucket, accountId }) {
|
|||
for (let file of fileUploads.uploads) {
|
||||
if (file.uploaded) continue
|
||||
|
||||
const attachmentUpload = prepareUploadForS3({
|
||||
const attachmentUpload = prepareUpload({
|
||||
file,
|
||||
s3Key: `assets/${appId}/attachments/${file.processedFileName}`,
|
||||
s3,
|
|
@ -1,13 +1,12 @@
|
|||
const CouchDB = require("pouchdb")
|
||||
const PouchDB = require("../../../db")
|
||||
const {
|
||||
uploadAppAssets,
|
||||
verifyDeployment,
|
||||
updateDeploymentQuota,
|
||||
} = require("./aws")
|
||||
const { DocumentTypes, SEPARATOR, UNICODE_MAX } = require("../../../db/utils")
|
||||
const newid = require("../../../db/newid")
|
||||
const env = require("../../../environment")
|
||||
const deployment = env.SELF_HOSTED
|
||||
? require("./selfDeploy")
|
||||
: require("./awsDeploy")
|
||||
const { deploy, preDeployment, postDeployment } = deployment
|
||||
const Deployment = require("./Deployment")
|
||||
|
||||
// the max time we can wait for an invalidation to complete before considering it failed
|
||||
const MAX_PENDING_TIME_MS = 30 * 60000
|
||||
|
@ -44,7 +43,9 @@ function replicate(local, remote) {
|
|||
})
|
||||
}
|
||||
|
||||
async function replicateCouch({ appId, session }) {
|
||||
async function replicateCouch(deployment) {
|
||||
const appId = deployment.getAppId()
|
||||
const { session } = deployment.getVerification()
|
||||
const localDb = new PouchDB(appId)
|
||||
const remoteDb = new CouchDB(`${env.DEPLOYMENT_DB_URL}/${appId}`, {
|
||||
fetch: function(url, opts) {
|
||||
|
@ -56,33 +57,10 @@ async function replicateCouch({ appId, session }) {
|
|||
return replicate(localDb, remoteDb)
|
||||
}
|
||||
|
||||
async function getCurrentInstanceQuota(appId) {
|
||||
const db = new PouchDB(appId)
|
||||
|
||||
const rows = await db.allDocs({
|
||||
startkey: DocumentTypes.ROW + SEPARATOR,
|
||||
endkey: DocumentTypes.ROW + SEPARATOR + UNICODE_MAX,
|
||||
})
|
||||
|
||||
const users = await db.allDocs({
|
||||
startkey: DocumentTypes.USER + SEPARATOR,
|
||||
endkey: DocumentTypes.USER + SEPARATOR + UNICODE_MAX,
|
||||
})
|
||||
|
||||
const existingRows = rows.rows.length
|
||||
const existingUsers = users.rows.length
|
||||
|
||||
const designDoc = await db.get("_design/database")
|
||||
|
||||
return {
|
||||
rows: existingRows,
|
||||
users: existingUsers,
|
||||
views: Object.keys(designDoc.views).length,
|
||||
}
|
||||
}
|
||||
|
||||
async function storeLocalDeploymentHistory(deployment) {
|
||||
const db = new PouchDB(deployment.appId)
|
||||
const appId = deployment.getAppId()
|
||||
const deploymentJSON = deployment.getJSON()
|
||||
const db = new PouchDB(appId)
|
||||
|
||||
let deploymentDoc
|
||||
try {
|
||||
|
@ -91,7 +69,7 @@ async function storeLocalDeploymentHistory(deployment) {
|
|||
deploymentDoc = { _id: "_local/deployments", history: {} }
|
||||
}
|
||||
|
||||
const deploymentId = deployment._id || newid()
|
||||
const deploymentId = deploymentJSON._id || newid()
|
||||
|
||||
// first time deployment
|
||||
if (!deploymentDoc.history[deploymentId])
|
||||
|
@ -99,7 +77,7 @@ async function storeLocalDeploymentHistory(deployment) {
|
|||
|
||||
deploymentDoc.history[deploymentId] = {
|
||||
...deploymentDoc.history[deploymentId],
|
||||
...deployment,
|
||||
...deploymentJSON,
|
||||
updatedAt: Date.now(),
|
||||
}
|
||||
|
||||
|
@ -111,43 +89,26 @@ async function storeLocalDeploymentHistory(deployment) {
|
|||
}
|
||||
|
||||
async function deployApp({ appId, deploymentId }) {
|
||||
const deployment = new Deployment(deploymentId, appId)
|
||||
try {
|
||||
const instanceQuota = await getCurrentInstanceQuota(appId)
|
||||
const verification = await verifyDeployment({
|
||||
appId,
|
||||
quota: instanceQuota,
|
||||
})
|
||||
await deployment.init()
|
||||
deployment.setVerification(await preDeployment(deployment))
|
||||
|
||||
console.log(`Uploading assets for appID ${appId} assets to s3..`)
|
||||
console.log(`Uploading assets for appID ${appId}..`)
|
||||
|
||||
await uploadAppAssets({
|
||||
appId,
|
||||
...verification,
|
||||
})
|
||||
await deploy(deployment)
|
||||
|
||||
// replicate the DB to the couchDB cluster in prod
|
||||
console.log("Replicating local PouchDB to remote..")
|
||||
await replicateCouch({
|
||||
appId,
|
||||
session: verification.couchDbSession,
|
||||
})
|
||||
await replicateCouch(deployment)
|
||||
|
||||
await updateDeploymentQuota(verification.quota)
|
||||
await postDeployment(deployment)
|
||||
|
||||
await storeLocalDeploymentHistory({
|
||||
_id: deploymentId,
|
||||
appId,
|
||||
cfDistribution: verification.cfDistribution,
|
||||
quota: verification.quota,
|
||||
status: DeploymentStatus.SUCCESS,
|
||||
})
|
||||
deployment.setStatus(DeploymentStatus.SUCCESS)
|
||||
await storeLocalDeploymentHistory(deployment)
|
||||
} catch (err) {
|
||||
await storeLocalDeploymentHistory({
|
||||
_id: deploymentId,
|
||||
appId,
|
||||
status: DeploymentStatus.FAILURE,
|
||||
err: err.message,
|
||||
})
|
||||
deployment.setStatus(DeploymentStatus.FAILURE, err.message)
|
||||
await storeLocalDeploymentHistory(deployment)
|
||||
throw new Error(`Deployment Failed: ${err.message}`)
|
||||
}
|
||||
}
|
||||
|
@ -188,7 +149,7 @@ exports.deployApp = async function(ctx) {
|
|||
status: DeploymentStatus.PENDING,
|
||||
})
|
||||
|
||||
deployApp({
|
||||
await deployApp({
|
||||
...ctx.user,
|
||||
deploymentId: deployment._id,
|
||||
})
|
||||
|
|
|
@ -0,0 +1,27 @@
|
|||
const PouchDB = require("../../../db")
|
||||
const { DocumentTypes, SEPARATOR, UNICODE_MAX } = require("../../../db/utils")
|
||||
|
||||
exports.getAppQuota = async function(appId) {
|
||||
const db = new PouchDB(appId)
|
||||
|
||||
const rows = await db.allDocs({
|
||||
startkey: DocumentTypes.ROW + SEPARATOR,
|
||||
endkey: DocumentTypes.ROW + SEPARATOR + UNICODE_MAX,
|
||||
})
|
||||
|
||||
const users = await db.allDocs({
|
||||
startkey: DocumentTypes.USER + SEPARATOR,
|
||||
endkey: DocumentTypes.USER + SEPARATOR + UNICODE_MAX,
|
||||
})
|
||||
|
||||
const existingRows = rows.rows.length
|
||||
const existingUsers = users.rows.length
|
||||
|
||||
const designDoc = await db.get("_design/database")
|
||||
|
||||
return {
|
||||
rows: existingRows,
|
||||
users: existingUsers,
|
||||
views: Object.keys(designDoc.views).length,
|
||||
}
|
||||
}
|
|
@ -0,0 +1,5 @@
|
|||
exports.preDeployment = async function(deployment) {}
|
||||
|
||||
exports.postDeployment = async function(deployment) {}
|
||||
|
||||
exports.deploy = async function(deployment) {}
|
|
@ -0,0 +1,31 @@
|
|||
const fs = require("fs")
|
||||
const sanitize = require("sanitize-s3-objectkey")
|
||||
|
||||
const CONTENT_TYPE_MAP = {
|
||||
html: "text/html",
|
||||
css: "text/css",
|
||||
js: "application/javascript",
|
||||
}
|
||||
|
||||
exports.prepareUpload = async function({ s3Key, metadata, s3, file }) {
|
||||
const extension = [...file.name.split(".")].pop()
|
||||
const fileBytes = fs.readFileSync(file.path)
|
||||
|
||||
const upload = await s3
|
||||
.upload({
|
||||
// windows file paths need to be converted to forward slashes for s3
|
||||
Key: sanitize(s3Key).replace(/\\/g, "/"),
|
||||
Body: fileBytes,
|
||||
ContentType: file.type || CONTENT_TYPE_MAP[extension.toLowerCase()],
|
||||
Metadata: metadata,
|
||||
})
|
||||
.promise()
|
||||
|
||||
return {
|
||||
size: file.size,
|
||||
name: file.name,
|
||||
extension,
|
||||
url: upload.Location,
|
||||
key: upload.Key,
|
||||
}
|
||||
}
|
|
@ -6,7 +6,7 @@ const fetch = require("node-fetch")
|
|||
const fs = require("fs-extra")
|
||||
const uuid = require("uuid")
|
||||
const AWS = require("aws-sdk")
|
||||
const { prepareUploadForS3 } = require("../deploy/aws")
|
||||
const { prepareUpload } = require("../deploy/utils")
|
||||
const handlebars = require("handlebars")
|
||||
const {
|
||||
budibaseAppsDir,
|
||||
|
@ -54,7 +54,7 @@ exports.uploadFile = async function(ctx) {
|
|||
const fileExtension = [...file.name.split(".")].pop()
|
||||
const processedFileName = `${uuid.v4()}.${fileExtension}`
|
||||
|
||||
return prepareUploadForS3({
|
||||
return prepareUpload({
|
||||
file,
|
||||
s3Key: `assets/${ctx.user.appId}/attachments/${processedFileName}`,
|
||||
s3,
|
||||
|
@ -235,5 +235,5 @@ exports.serveComponentLibrary = async function(ctx) {
|
|||
return
|
||||
}
|
||||
|
||||
await send(ctx, "/index.js", { root: componentLibraryPath })
|
||||
await send(ctx, "/awsDeploy.js", { root: componentLibraryPath })
|
||||
}
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
const env = require("../environment")
|
||||
const { DocumentTypes, SEPARATOR } = require("../db/utils")
|
||||
const fs = require("fs")
|
||||
|
||||
const APP_PREFIX = DocumentTypes.APP + SEPARATOR
|
||||
|
||||
|
@ -74,3 +75,21 @@ exports.setCookie = (ctx, name, value) => {
|
|||
exports.isClient = ctx => {
|
||||
return ctx.headers["x-budibase-type"] === "client"
|
||||
}
|
||||
|
||||
/**
|
||||
* Recursively walk a directory tree and execute a callback on all files.
|
||||
* @param {String} dirPath - Directory to traverse
|
||||
* @param {Function} callback - callback to execute on files
|
||||
*/
|
||||
exports.walkDir = (dirPath, callback) => {
|
||||
for (let filename of fs.readdirSync(dirPath)) {
|
||||
const filePath = `${dirPath}/${filename}`
|
||||
const stat = fs.lstatSync(filePath)
|
||||
|
||||
if (stat.isFile()) {
|
||||
callback(filePath)
|
||||
} else {
|
||||
exports.walkDir(filePath, callback)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue