Merge branch 'master' into sort-table-names

This commit is contained in:
Andrew Kingston 2024-12-04 12:32:24 +00:00 committed by GitHub
commit a1dd7c58c0
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
8 changed files with 18 additions and 14 deletions

View File

@ -1,6 +1,6 @@
{
"$schema": "node_modules/lerna/schemas/lerna-schema.json",
"version": "3.2.23",
"version": "3.2.24",
"npmClient": "yarn",
"concurrency": 20,
"command": {

@ -1 +1 @@
Subproject commit e60f4b1b364fd49d2bb082f298757f83cb2032f0
Subproject commit 7b8789efd940d9f8e5be9927243b19f07361c445

View File

@ -48,7 +48,7 @@ jest.mock("@budibase/pro", () => ({
ai: {
LargeLanguageModel: {
forCurrentTenant: async () => ({
initialised: true,
llm: {},
run: jest.fn(() => `Mock LLM Response`),
buildPromptFromAIOperation: jest.fn(),
}),

View File

@ -52,7 +52,7 @@ jest.mock("@budibase/pro", () => ({
ai: {
LargeLanguageModel: {
forCurrentTenant: async () => ({
initialised: true,
llm: {},
run: jest.fn(() => `Mock LLM Response`),
buildPromptFromAIOperation: jest.fn(),
}),

View File

@ -106,13 +106,15 @@ export async function run({
(await features.flags.isEnabled(FeatureFlag.BUDIBASE_AI)) &&
(await pro.features.isBudibaseAIEnabled())
let llm
let llmWrapper
if (budibaseAIEnabled || customConfigsEnabled) {
llm = await pro.ai.LargeLanguageModel.forCurrentTenant(inputs.model)
llmWrapper = await pro.ai.LargeLanguageModel.forCurrentTenant(
inputs.model
)
}
response = llm?.initialised
? await llm.run(inputs.prompt)
response = llmWrapper?.llm
? await llmWrapper.run(inputs.prompt)
: await legacyOpenAIPrompt(inputs)
return {

View File

@ -27,7 +27,7 @@ jest.mock("@budibase/pro", () => ({
ai: {
LargeLanguageModel: {
forCurrentTenant: jest.fn().mockImplementation(() => ({
initialised: true,
llm: {},
init: jest.fn(),
run: jest.fn(),
})),

View File

@ -18,7 +18,7 @@ jest.mock("@budibase/pro", () => ({
ai: {
LargeLanguageModel: {
forCurrentTenant: async () => ({
initialised: true,
llm: {},
run: jest.fn(() => "response from LLM"),
buildPromptFromAIOperation: buildPromptMock,
}),

View File

@ -126,8 +126,10 @@ export async function processAIColumns<T extends Row | Row[]>(
const numRows = Array.isArray(inputRows) ? inputRows.length : 1
span?.addTags({ table_id: table._id, numRows })
const rows = Array.isArray(inputRows) ? inputRows : [inputRows]
const llm = await pro.ai.LargeLanguageModel.forCurrentTenant("gpt-4o-mini")
if (rows && llm.initialised) {
const llmWrapper = await pro.ai.LargeLanguageModel.forCurrentTenant(
"gpt-4o-mini"
)
if (rows && llmWrapper.llm) {
// Ensure we have snippet context
await context.ensureSnippetContext()
@ -151,14 +153,14 @@ export async function processAIColumns<T extends Row | Row[]>(
}
}
const prompt = llm.buildPromptFromAIOperation({
const prompt = llmWrapper.buildPromptFromAIOperation({
schema: aiSchema,
row,
})
return tracer.trace("processAIColumn", {}, async span => {
span?.addTags({ table_id: table._id, column })
const llmResponse = await llm.run(prompt!)
const llmResponse = await llmWrapper.run(prompt!)
return {
...row,
[column]: llmResponse,