draft of openai impl
This commit is contained in:
parent
49a4e252c6
commit
c85bc88bf9
|
@ -1,4 +1,6 @@
|
||||||
import {
|
import {
|
||||||
|
AIConfig,
|
||||||
|
AIInnerConfig,
|
||||||
Config,
|
Config,
|
||||||
ConfigType,
|
ConfigType,
|
||||||
GoogleConfig,
|
GoogleConfig,
|
||||||
|
@ -254,3 +256,10 @@ export async function getSCIMConfig(): Promise<SCIMInnerConfig | undefined> {
|
||||||
const config = await getConfig<SCIMConfig>(ConfigType.SCIM)
|
const config = await getConfig<SCIMConfig>(ConfigType.SCIM)
|
||||||
return config?.config
|
return config?.config
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AI
|
||||||
|
export async function getAIConfig(): Promise<AIInnerConfig | undefined> {
|
||||||
|
const config = await getConfig<AIConfig>(ConfigType.AI)
|
||||||
|
return config?.config
|
||||||
|
}
|
||||||
|
|
||||||
|
|
|
@ -10,6 +10,7 @@ import {
|
||||||
} from "@budibase/types"
|
} from "@budibase/types"
|
||||||
import { env } from "@budibase/backend-core"
|
import { env } from "@budibase/backend-core"
|
||||||
import * as automationUtils from "../automationUtils"
|
import * as automationUtils from "../automationUtils"
|
||||||
|
import * as pro from "@budibase/pro"
|
||||||
|
|
||||||
enum Model {
|
enum Model {
|
||||||
GPT_35_TURBO = "gpt-3.5-turbo",
|
GPT_35_TURBO = "gpt-3.5-turbo",
|
||||||
|
@ -60,6 +61,23 @@ export const definition: AutomationStepDefinition = {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async function legacyOpenAIPrompt(inputs: OpenAIStepInputs) {
|
||||||
|
const openai = new OpenAI({
|
||||||
|
apiKey: env.OPENAI_API_KEY,
|
||||||
|
})
|
||||||
|
|
||||||
|
const completion = await openai.chat.completions.create({
|
||||||
|
model: inputs.model,
|
||||||
|
messages: [
|
||||||
|
{
|
||||||
|
role: "user",
|
||||||
|
content: inputs.prompt,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
})
|
||||||
|
return completion?.choices[0]?.message?.content
|
||||||
|
}
|
||||||
|
|
||||||
export async function run({
|
export async function run({
|
||||||
inputs,
|
inputs,
|
||||||
}: {
|
}: {
|
||||||
|
@ -81,20 +99,27 @@ export async function run({
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
const openai = new OpenAI({
|
let response
|
||||||
apiKey: env.OPENAI_API_KEY,
|
const customConfigsEnabled = await pro.features.isAICustomConfigsEnabled()
|
||||||
})
|
const budibaseAIEnabled = await pro.features.isBudibaseAIEnabled()
|
||||||
|
|
||||||
const completion = await openai.chat.completions.create({
|
if (budibaseAIEnabled || customConfigsEnabled) {
|
||||||
model: inputs.model,
|
// Enterprise has custom configs
|
||||||
messages: [
|
// if custom configs are enabled full stop
|
||||||
{
|
// Don't use their budibase AI credits, unless it uses the budibase AI configuration
|
||||||
role: "user",
|
// TODO: grab the config from the database (maybe wrap this in the pro AI module)
|
||||||
content: inputs.prompt,
|
// TODO: pass it into the model to execute the prompt
|
||||||
},
|
|
||||||
],
|
// TODO: if in cloud and budibaseAI is enabled, use the standard budibase AI config
|
||||||
})
|
// Make sure it uses their credits
|
||||||
const response = completion?.choices[0]?.message?.content
|
// Should be handled in the LLM wrapper in pro
|
||||||
|
const llm = new pro.ai.LLMWrapper()
|
||||||
|
await llm.init()
|
||||||
|
response = await llm.run(inputs.prompt)
|
||||||
|
} else {
|
||||||
|
// fallback to the default that uses the environment variable for backwards compat
|
||||||
|
response = await legacyOpenAIPrompt(inputs)
|
||||||
|
}
|
||||||
|
|
||||||
return {
|
return {
|
||||||
response,
|
response,
|
||||||
|
|
Loading…
Reference in New Issue