Changing how invalidation is checked for Cloudfront cache, making sure that we don't lose state of the invalidation and can check it fully in the background.

This commit is contained in:
mike12345567 2020-10-20 15:06:34 +01:00
parent ce5dca72b4
commit 69743faa0d
3 changed files with 105 additions and 63 deletions

View File

@ -19,7 +19,7 @@
hour12: true,
},
}
const POLL_INTERVAL = 1000
const POLL_INTERVAL = 5000
export let appId

View File

@ -1,6 +1,5 @@
const fs = require("fs")
const { join } = require("../../../utilities/centralPath")
let { wait } = require("../../../utilities")
const AWS = require("aws-sdk")
const fetch = require("node-fetch")
const uuid = require("uuid")
@ -8,12 +7,6 @@ const { budibaseAppsDir } = require("../../../utilities/budibaseDir")
const PouchDB = require("../../../db")
const environment = require("../../../environment")
const MAX_INVALIDATE_WAIT_MS = 120000
const INVALIDATE_WAIT_PERIODS_MS = 5000
// export so main deploy functions can use too
exports.MAX_INVALIDATE_WAIT_MS = MAX_INVALIDATE_WAIT_MS
async function invalidateCDN(cfDistribution, appId) {
const cf = new AWS.CloudFront({})
const resp = await cf
@ -28,28 +21,24 @@ async function invalidateCDN(cfDistribution, appId) {
},
})
.promise()
let totalWaitTimeMs = 0
let complete = false
do {
try {
const state = await cf
.getInvalidation({
DistributionId: cfDistribution,
Id: resp.Invalidation.Id,
})
.promise()
if (state.Invalidation.Status === "Completed") {
complete = true
}
} catch (err) {
console.log()
}
await wait(INVALIDATE_WAIT_PERIODS_MS)
totalWaitTimeMs += INVALIDATE_WAIT_PERIODS_MS
} while (totalWaitTimeMs <= MAX_INVALIDATE_WAIT_MS && !complete)
if (!complete) {
throw "Unable to invalidate old app version"
return resp.Invalidation.Id
}
exports.isInvalidationComplete = async function(
distributionId,
invalidationId
) {
if (distributionId == null || invalidationId == null) {
return false
}
const cf = new AWS.CloudFront({})
const resp = await cf
.getInvalidation({
DistributionId: distributionId,
Id: invalidationId,
})
.promise()
return resp.Invalidation.Status === "Completed"
}
exports.updateDeploymentQuota = async function(quota) {
@ -102,6 +91,18 @@ exports.verifyDeployment = async function({ instanceId, appId, quota }) {
}
const json = await response.json()
if (json.errors) {
throw new Error(json.errors)
}
// set credentials here, means any time we're verified we're ready to go
if (json.credentials) {
AWS.config.update({
accessKeyId: json.credentials.AccessKeyId,
secretAccessKey: json.credentials.SecretAccessKey,
sessionToken: json.credentials.SessionToken,
})
}
return json
}
@ -157,17 +158,10 @@ exports.prepareUploadForS3 = prepareUploadForS3
exports.uploadAppAssets = async function({
appId,
instanceId,
credentials,
bucket,
cfDistribution,
accountId,
}) {
AWS.config.update({
accessKeyId: credentials.AccessKeyId,
secretAccessKey: credentials.SecretAccessKey,
sessionToken: credentials.SessionToken,
})
const s3 = new AWS.S3({
params: {
Bucket: bucket,
@ -225,7 +219,7 @@ exports.uploadAppAssets = async function({
try {
await Promise.all(uploads)
await invalidateCDN(cfDistribution, appId)
return await invalidateCDN(cfDistribution, appId)
} catch (err) {
console.error("Error uploading budibase app assets to s3", err)
throw err

View File

@ -4,17 +4,67 @@ const {
uploadAppAssets,
verifyDeployment,
updateDeploymentQuota,
MAX_INVALIDATE_WAIT_MS,
isInvalidationComplete,
} = require("./aws")
const { DocumentTypes, SEPARATOR, UNICODE_MAX } = require("../../../db/utils")
const newid = require("../../../db/newid")
// the max time we can wait for an invalidation to complete before considering it failed
const MAX_PENDING_TIME_MS = 30 * 60000
const DeploymentStatus = {
SUCCESS: "SUCCESS",
PENDING: "PENDING",
FAILURE: "FAILURE",
}
// checks that deployments are in a good state, any pending will be updated
async function checkAllDeployments(deployments, user) {
let updated = false
function update(deployment, status) {
deployment.status = status
delete deployment.invalidationId
delete deployment.cfDistribution
updated = true
}
for (let deployment of Object.values(deployments.history)) {
// check that no deployments have crashed etc and are now stuck
if (
deployment.status === DeploymentStatus.PENDING &&
Date.now() - deployment.updatedAt > MAX_PENDING_TIME_MS
) {
update(deployment, DeploymentStatus.FAILURE)
}
// if pending but not past failure point need to update them
else if (deployment.status === DeploymentStatus.PENDING) {
let complete = false
try {
complete = await isInvalidationComplete(
deployment.cfDistribution,
deployment.invalidationId
)
} catch (err) {
// system may have restarted, need to re-verify
if (err != null && err.code === "InvalidClientTokenId") {
await verifyDeployment({
...user,
quota: deployment.quota,
})
complete = await isInvalidationComplete(
deployment.cfDistribution,
deployment.invalidationId
)
}
}
if (complete) {
update(deployment, DeploymentStatus.SUCCESS)
}
}
}
return { updated, deployments }
}
function replicate(local, remote) {
return new Promise((resolve, reject) => {
const replication = local.sync(remote)
@ -102,7 +152,7 @@ async function storeLocalDeploymentHistory(deployment) {
async function deployApp({ instanceId, appId, clientId, deploymentId }) {
try {
const instanceQuota = await getCurrentInstanceQuota(instanceId)
const credentials = await verifyDeployment({
const verification = await verifyDeployment({
instanceId,
appId,
quota: instanceQuota,
@ -110,31 +160,36 @@ async function deployApp({ instanceId, appId, clientId, deploymentId }) {
console.log(`Uploading assets for appID ${appId} assets to s3..`)
if (credentials.errors) throw new Error(credentials.errors)
await uploadAppAssets({ clientId, appId, instanceId, ...credentials })
const invalidationId = await uploadAppAssets({
clientId,
appId,
instanceId,
...verification,
})
// replicate the DB to the couchDB cluster in prod
console.log("Replicating local PouchDB to remote..")
await replicateCouch({
instanceId,
clientId,
session: credentials.couchDbSession,
session: verification.couchDbSession,
})
await updateDeploymentQuota(credentials.quota)
await updateDeploymentQuota(verification.quota)
await storeLocalDeploymentHistory({
_id: deploymentId,
instanceId,
quota: credentials.quota,
status: "SUCCESS",
invalidationId,
cfDistribution: verification.cfDistribution,
quota: verification.quota,
status: DeploymentStatus.PENDING,
})
} catch (err) {
await storeLocalDeploymentHistory({
_id: deploymentId,
instanceId,
status: "FAILURE",
status: DeploymentStatus.FAILURE,
err: err.message,
})
throw new Error(`Deployment Failed: ${err.message}`)
@ -145,21 +200,14 @@ exports.fetchDeployments = async function(ctx) {
try {
const db = new PouchDB(ctx.user.instanceId)
const deploymentDoc = await db.get("_local/deployments")
// check that no deployments have crashed etc and are now stuck
let changed = false
for (let deployment of Object.values(deploymentDoc.history)) {
if (
deployment.status === DeploymentStatus.PENDING &&
Date.now() - deployment.updatedAt > MAX_INVALIDATE_WAIT_MS
) {
deployment.status = DeploymentStatus.FAILURE
changed = true
}
const { updated, deployments } = await checkAllDeployments(
deploymentDoc,
ctx.user
)
if (updated) {
await db.put(deployments)
}
if (changed) {
await db.put(deploymentDoc)
}
ctx.body = Object.values(deploymentDoc.history).reverse()
ctx.body = Object.values(deployments.history).reverse()
} catch (err) {
ctx.body = []
}
@ -185,10 +233,10 @@ exports.deployApp = async function(ctx) {
const deployment = await storeLocalDeploymentHistory({
instanceId: ctx.user.instanceId,
appId: ctx.user.appId,
status: "PENDING",
status: DeploymentStatus.PENDING,
})
deployApp({
await deployApp({
...ctx.user,
clientId,
deploymentId: deployment._id,