From c85bc88bf9099d2e4f9082e5bb2baff00fedeaa9 Mon Sep 17 00:00:00 2001 From: Martin McKeaveney Date: Mon, 16 Sep 2024 11:54:04 +0100 Subject: [PATCH] draft of openai impl --- packages/backend-core/src/configs/configs.ts | 9 ++++ .../server/src/automations/steps/openai.ts | 51 ++++++++++++++----- 2 files changed, 47 insertions(+), 13 deletions(-) diff --git a/packages/backend-core/src/configs/configs.ts b/packages/backend-core/src/configs/configs.ts index 0d189e3f7d..00ccee8c4e 100644 --- a/packages/backend-core/src/configs/configs.ts +++ b/packages/backend-core/src/configs/configs.ts @@ -1,4 +1,6 @@ import { + AIConfig, + AIInnerConfig, Config, ConfigType, GoogleConfig, @@ -254,3 +256,10 @@ export async function getSCIMConfig(): Promise { const config = await getConfig(ConfigType.SCIM) return config?.config } + +// AI +export async function getAIConfig(): Promise { + const config = await getConfig(ConfigType.AI) + return config?.config +} + diff --git a/packages/server/src/automations/steps/openai.ts b/packages/server/src/automations/steps/openai.ts index 1c148b2e73..d8017d0ceb 100644 --- a/packages/server/src/automations/steps/openai.ts +++ b/packages/server/src/automations/steps/openai.ts @@ -10,6 +10,7 @@ import { } from "@budibase/types" import { env } from "@budibase/backend-core" import * as automationUtils from "../automationUtils" +import * as pro from "@budibase/pro" enum Model { GPT_35_TURBO = "gpt-3.5-turbo", @@ -60,6 +61,23 @@ export const definition: AutomationStepDefinition = { }, } +async function legacyOpenAIPrompt(inputs: OpenAIStepInputs) { + const openai = new OpenAI({ + apiKey: env.OPENAI_API_KEY, + }) + + const completion = await openai.chat.completions.create({ + model: inputs.model, + messages: [ + { + role: "user", + content: inputs.prompt, + }, + ], + }) + return completion?.choices[0]?.message?.content +} + export async function run({ inputs, }: { @@ -81,20 +99,27 @@ export async function run({ } try { - const openai = new OpenAI({ - apiKey: env.OPENAI_API_KEY, - }) + let response + const customConfigsEnabled = await pro.features.isAICustomConfigsEnabled() + const budibaseAIEnabled = await pro.features.isBudibaseAIEnabled() - const completion = await openai.chat.completions.create({ - model: inputs.model, - messages: [ - { - role: "user", - content: inputs.prompt, - }, - ], - }) - const response = completion?.choices[0]?.message?.content + if (budibaseAIEnabled || customConfigsEnabled) { + // Enterprise has custom configs + // if custom configs are enabled full stop + // Don't use their budibase AI credits, unless it uses the budibase AI configuration + // TODO: grab the config from the database (maybe wrap this in the pro AI module) + // TODO: pass it into the model to execute the prompt + + // TODO: if in cloud and budibaseAI is enabled, use the standard budibase AI config + // Make sure it uses their credits + // Should be handled in the LLM wrapper in pro + const llm = new pro.ai.LLMWrapper() + await llm.init() + response = await llm.run(inputs.prompt) + } else { + // fallback to the default that uses the environment variable for backwards compat + response = await legacyOpenAIPrompt(inputs) + } return { response,