From b18aa5e9cd98309aeaeac688275f60051c63c5ec Mon Sep 17 00:00:00 2001 From: Dimitri Glazkov Date: Tue, 14 Jan 2025 16:23:30 -0800 Subject: [PATCH] [a2] Add "Researcher", "Image Generator" and other changes. - **Point a2 to npm-proper registry** - **Teach a2 to push to board server.** - **Teach describers to refer to main graph, not sub-graph.** - **Teach Port collector to deal with empty schemas.** - **Add a few more exports.** - **Fix a but where updating subgraph metadata also updates main graph metadata.** - **Rename `Agent` to `Text Generator`.** - **docs(changeset): Add "Researcher", "Image Generator" and other changes.** - **Word-smithing.** --- .changeset/odd-buses-add.md | 5 + package-lock.json | 33 +-- ...oard.bgl.json => a2-no-board-old.bgl.json} | 0 packages/a2/bgl/a2.bgl.json | 140 ++++++++++-- packages/a2/bgl/agent-workbench.bgl.json | 209 ++++++++++++++++++ packages/a2/package.json | 11 +- packages/a2/scripts/push.ts | 56 +++++ packages/board-server/package.json | 2 +- packages/breadboard/src/handler.ts | 3 +- .../src/inspector/graph/describer-manager.ts | 4 +- .../breadboard/src/inspector/graph/ports.ts | 5 +- .../breadboard/src/utils/schema-differ.ts | 17 +- packages/discovery-types/package.json | 2 +- packages/example-board-server/package.json | 2 +- packages/filesystem-board-server/package.json | 2 +- packages/idb-board-server/package.json | 2 +- packages/manifest/bbm.schema.json | 4 + packages/mod-playground/package.json | 2 +- packages/remote-board-server/package.json | 2 +- packages/schema/breadboard.schema.json | 4 + packages/shared-ui/package.json | 2 +- packages/types/src/graph-descriptor.ts | 10 + packages/visual-editor/package.json | 2 +- packages/visual-editor/src/index.ts | 3 +- 24 files changed, 464 insertions(+), 58 deletions(-) create mode 100644 .changeset/odd-buses-add.md rename packages/a2/bgl/{a2-no-board.bgl.json => a2-no-board-old.bgl.json} (100%) create mode 100644 packages/a2/bgl/agent-workbench.bgl.json create mode 100644 packages/a2/scripts/push.ts diff --git a/.changeset/odd-buses-add.md b/.changeset/odd-buses-add.md new file mode 100644 index 0000000000..f5c5333a62 --- /dev/null +++ b/.changeset/odd-buses-add.md @@ -0,0 +1,5 @@ +--- +"@breadboard-ai/a2": minor +--- + +Add "Researcher", "Image Generator" and other changes. diff --git a/package-lock.json b/package-lock.json index 06546a3e07..c2752868b7 100644 --- a/package-lock.json +++ b/package-lock.json @@ -2721,9 +2721,10 @@ } }, "node_modules/@google-cloud/firestore": { - "version": "7.10.0", - "resolved": "https://registry.npmjs.org/@google-cloud/firestore/-/firestore-7.10.0.tgz", - "integrity": "sha512-VFNhdHvfnmqcHHs6YhmSNHHxQqaaD64GwiL0c+e1qz85S8SWZPC2XFRf8p9yHRTF40Kow424s1KBU9f0fdQa+Q==", + "version": "7.11.0", + "resolved": "https://registry.npmjs.org/@google-cloud/firestore/-/firestore-7.11.0.tgz", + "integrity": "sha512-88uZ+jLsp1aVMj7gh3EKYH1aulTAMFAp8sH/v5a9w8q8iqSG27RiWLoxSAFr/XocZ9hGiWH1kEnBw+zl3xAgNA==", + "license": "Apache-2.0", "dependencies": { "@opentelemetry/api": "^1.3.0", "fast-deep-equal": "^3.1.1", @@ -9098,7 +9099,9 @@ "license": "BSD-2-Clause" }, "node_modules/dotenv": { - "version": "16.4.5", + "version": "16.4.7", + "resolved": "https://registry.npmjs.org/dotenv/-/dotenv-16.4.7.tgz", + "integrity": "sha512-47qPchRCykZC03FhkYAhrvwU4xDBFIj1QPqaarj6mdM/hgUzfPHcpkHJOn3mJAufFeeAxAzeGsr5X0M4k6fLZQ==", "license": "BSD-2-Clause", "engines": { "node": ">=12" @@ -23356,6 +23359,7 @@ "resolved": "https://registry.npmjs.org/tsx/-/tsx-4.19.2.tgz", "integrity": "sha512-pOUl6Vo2LUq/bSa8S5q7b91cgNSjctn9ugq/+Mvow99qW6x/UZYwzxy/3NmqoT66eHYfCVvFvACC58UBPFf28g==", "devOptional": true, + "license": "MIT", "dependencies": { "esbuild": "~0.23.0", "get-tsconfig": "^4.7.5" @@ -25478,9 +25482,12 @@ "version": "0.1.0", "license": "Apache-2.0", "devDependencies": { + "@google-cloud/firestore": "^7.11.0", "@types/node": "^22.0.0", "@typescript-eslint/eslint-plugin": "^7.18.0", "@typescript-eslint/parser": "^7.18.0", + "dotenv": "^16.4.7", + "tsx": "^4.19.2", "typescript": "^5.6.3" } }, @@ -26090,7 +26097,7 @@ "@breadboard-ai/google-drive-kit": "0.4.1", "@breadboard-ai/jsandbox": "0.3.0", "@breadboard-ai/types": "0.4.0", - "@google-cloud/firestore": "^7.10.0", + "@google-cloud/firestore": "^7.11.0", "@google-cloud/secret-manager": "^5.6.0", "@google-cloud/storage": "^7.14.0", "@google-labs/breadboard": "^0.31.0", @@ -26598,7 +26605,7 @@ "version": "0.0.1", "license": "Apache-2.0", "dependencies": { - "dotenv": "^16.4.5", + "dotenv": "^16.4.7", "prettier": "^3.4.1" }, "devDependencies": { @@ -26633,7 +26640,7 @@ "@typescript-eslint/eslint-plugin": "^7.18.0", "@typescript-eslint/parser": "^7.18.0", "ava": "^5.2.0", - "dotenv": "^16.4.5", + "dotenv": "^16.4.7", "tsx": "^4.19.2", "typescript": "^5.6.3", "vite": "^6.0.2", @@ -26685,7 +26692,7 @@ "@typescript-eslint/eslint-plugin": "^7.18.0", "@typescript-eslint/parser": "^7.18.0", "ava": "^5.2.0", - "dotenv": "^16.4.5", + "dotenv": "^16.4.7", "tsx": "^4.19.2", "typescript": "^5.6.3", "vite": "^6.0.2", @@ -26835,7 +26842,7 @@ "@typescript-eslint/eslint-plugin": "^7.18.0", "@typescript-eslint/parser": "^7.18.0", "ava": "^5.2.0", - "dotenv": "^16.4.5", + "dotenv": "^16.4.7", "tsx": "^4.19.2", "typescript": "^5.6.3", "vite": "^6.0.2", @@ -26961,7 +26968,7 @@ "devDependencies": { "@esbuild-plugins/node-globals-polyfill": "^0.2.3", "@google-labs/tsconfig": "^0.0.1", - "dotenv": "^16.4.5", + "dotenv": "^16.4.7", "typescript": "^5.6.3", "vite": "^6.0.2", "vite-plugin-full-reload": "^1.2.0", @@ -27155,7 +27162,7 @@ "@typescript-eslint/eslint-plugin": "^7.18.0", "@typescript-eslint/parser": "^7.18.0", "ava": "^5.2.0", - "dotenv": "^16.4.5", + "dotenv": "^16.4.7", "tsx": "^4.19.2", "typescript": "^5.6.3", "vite": "^6.0.2", @@ -27225,7 +27232,7 @@ "@typescript-eslint/eslint-plugin": "^7.18.0", "@typescript-eslint/parser": "^7.18.0", "ava": "^5.2.0", - "dotenv": "^16.4.5", + "dotenv": "^16.4.7", "tsx": "^4.19.2", "typescript": "^5.6.3", "vite": "^6.0.2", @@ -27857,7 +27864,7 @@ "@typescript-eslint/eslint-plugin": "^7.18.0", "@typescript-eslint/parser": "^7.18.0", "ava": "^5.2.0", - "dotenv": "^16.4.5", + "dotenv": "^16.4.7", "tsx": "^4.19.2", "typescript": "^5.6.3", "vite": "^6.0.2", diff --git a/packages/a2/bgl/a2-no-board.bgl.json b/packages/a2/bgl/a2-no-board-old.bgl.json similarity index 100% rename from packages/a2/bgl/a2-no-board.bgl.json rename to packages/a2/bgl/a2-no-board-old.bgl.json diff --git a/packages/a2/bgl/a2.bgl.json b/packages/a2/bgl/a2.bgl.json index 1166f41b0b..8d13e5583a 100644 --- a/packages/a2/bgl/a2.bgl.json +++ b/packages/a2/bgl/a2.bgl.json @@ -1,6 +1,6 @@ { "title": "A2", - "description": "The next-generation framework for AI systems", + "description": "Calls an LLM and so much more. Insert real description here.", "version": "0.0.1", "nodes": [], "edges": [], @@ -31,7 +31,7 @@ "metadata": { "title": "common", "source": { - "code": "/**\n * @fileoverview Common types and code\n */\n\nexport type UserInput = unknown;\n\nexport type AgentInputs = {\n /**\n * Whether (true) or not (false) the agent is allowed to chat with user.\n */\n chat: boolean;\n /**\n * The incoming conversation context.\n */\n context: LLMContent[];\n /**\n * Accumulated work context. This is the internal conversation, a result\n * of talking with the user, for instance.\n * This context is discarded at the end of interacting with the agent.\n */\n work: LLMContent[];\n /**\n * Agent's job description.\n */\n description?: LLMContent;\n /**\n * Type of the task.\n */\n type: \"introduction\" | \"work\";\n /**\n * The board URL of the model\n */\n model: string;\n /**\n * The default model that is passed along by the manager\n */\n defaultModel: string;\n /**\n * The tools that the worker can use\n */\n tools?: string[];\n};\n\nexport type AgentContext = AgentInputs & {\n /**\n * Accumulating list of user inputs\n */\n userInputs: UserInput[];\n};\n", + "code": "/**\n * @fileoverview Common types and code\n */\n\nexport type UserInput = unknown;\n\nexport type AgentInputs = {\n /**\n * Whether (true) or not (false) the agent is allowed to chat with user.\n */\n chat: boolean;\n /**\n * The incoming conversation context.\n */\n context: LLMContent[];\n /**\n * Accumulated work context. This is the internal conversation, a result\n * of talking with the user, for instance.\n * This context is discarded at the end of interacting with the agent.\n */\n work: LLMContent[];\n /**\n * Agent's job description.\n */\n description?: LLMContent;\n /**\n * Type of the task.\n */\n type: \"introduction\" | \"work\";\n /**\n * The board URL of the model\n */\n model: string;\n /**\n * The default model that is passed along by the manager\n */\n defaultModel: string;\n /**\n * The tools that the worker can use\n */\n tools?: string[];\n};\n\nexport type AgentContext = AgentInputs & {\n /**\n * A unique identifier for the session.\n * Currently used to have a persistent part separator across conversation context\n */\n id: string;\n /**\n * Accumulating list of user inputs\n */\n userInputs: UserInput[];\n};\n", "language": "typescript" }, "description": "Common types and code", @@ -63,11 +63,11 @@ } }, "entry": { - "code": "/**\n * @fileoverview Manages the entry point: describer, passing the inputs, etc.\n */\nimport {} from \"./common\";\nimport { toLLMContent, defaultLLMContent } from \"./utils\";\nexport { invoke as default, describe };\nasync function invoke({ context, chat, defaultModel, model, tools, description, type = \"work\", }) {\n // Make sure it's a boolean.\n chat = !!chat;\n context ??= [];\n return {\n context: {\n chat,\n context,\n userInputs: [],\n defaultModel,\n model,\n description,\n tools,\n type,\n work: [],\n },\n };\n}\nasync function describe() {\n return {\n inputSchema: {\n type: \"object\",\n properties: {\n model: {\n type: \"object\",\n behavior: [\"board\"],\n title: \"Model\",\n },\n tools: {\n type: \"array\",\n items: {\n type: \"object\",\n behavior: [\"board\"],\n },\n title: \"Tools\",\n },\n description: {\n type: \"object\",\n behavior: [\"llm-content\", \"config\"],\n title: \"Job Description\",\n description: \"A detailed list of skills and capabilities of this worker.\",\n default: defaultLLMContent(),\n },\n context: {\n type: \"array\",\n items: { type: \"object\", behavior: [\"llm-content\"] },\n title: \"Context in\",\n },\n chat: {\n type: \"boolean\",\n title: \"Chat with User\",\n behavior: [\"config\"],\n description: \"When checked, the agent will talk with the user, asking to review work, requesting additional information, etc.\",\n },\n },\n },\n outputSchema: {\n type: \"object\",\n properties: {\n context: {\n type: \"array\",\n items: { type: \"object\", behavior: [\"llm-content\"] },\n title: \"Context out\",\n },\n },\n },\n };\n}\n", + "code": "/**\n * @fileoverview Manages the entry point: describer, passing the inputs, etc.\n */\nimport {} from \"./common\";\nimport { toLLMContent, defaultLLMContent } from \"./utils\";\nexport { invoke as default, describe };\nasync function invoke({ context, chat, defaultModel, description, type = \"work\", }) {\n // Make sure it's a boolean.\n chat = !!chat;\n context ??= [];\n return {\n context: {\n id: Math.random().toString(36).substring(2, 5),\n chat,\n context,\n userInputs: [],\n defaultModel,\n model: \"\",\n description,\n tools: [],\n type,\n work: [],\n },\n };\n}\nasync function describe() {\n return {\n inputSchema: {\n type: \"object\",\n properties: {\n description: {\n type: \"object\",\n behavior: [\"llm-content\", \"config\"],\n title: \"Instruction\",\n description: \"Give the model additional context on what to do, like specific rules/guidelines to adhere to or specify behavior separate from the provided context.\",\n default: defaultLLMContent(),\n },\n context: {\n type: \"array\",\n items: { type: \"object\", behavior: [\"llm-content\"] },\n title: \"Context in\",\n },\n \"p-chat\": {\n type: \"boolean\",\n title: \"Chat with User\",\n behavior: [\"config\"],\n description: \"When checked, the model will chat with the user, asking to review work, requesting additional information, etc.\",\n },\n \"p-critique\": {\n type: \"boolean\",\n title: \"Self-critique\",\n behavior: [\"config\"],\n description: \"When checked, the model will critique its output to to improve quality in exchange for taking a little bit more time.\",\n },\n },\n },\n outputSchema: {\n type: \"object\",\n properties: {\n context: {\n type: \"array\",\n items: { type: \"object\", behavior: [\"llm-content\"] },\n title: \"Context out\",\n },\n },\n },\n };\n}\n", "metadata": { "title": "entry", "source": { - "code": "/**\n * @fileoverview Manages the entry point: describer, passing the inputs, etc.\n */\n\nimport { type AgentContext, type AgentInputs } from \"./common\";\nimport { toLLMContent, defaultLLMContent } from \"./utils\";\n\nexport { invoke as default, describe };\n\ntype Outputs = {\n context: AgentContext;\n};\n\nasync function invoke({\n context,\n chat,\n defaultModel,\n model,\n tools,\n description,\n type = \"work\",\n}: AgentInputs): Promise {\n // Make sure it's a boolean.\n chat = !!chat;\n context ??= [];\n return {\n context: {\n chat,\n context,\n userInputs: [],\n defaultModel,\n model,\n description,\n tools,\n type,\n work: [],\n },\n };\n}\n\nasync function describe() {\n return {\n inputSchema: {\n type: \"object\",\n properties: {\n model: {\n type: \"object\",\n behavior: [\"board\"],\n title: \"Model\",\n },\n tools: {\n type: \"array\",\n items: {\n type: \"object\",\n behavior: [\"board\"],\n },\n title: \"Tools\",\n },\n description: {\n type: \"object\",\n behavior: [\"llm-content\", \"config\"],\n title: \"Job Description\",\n description:\n \"A detailed list of skills and capabilities of this worker.\",\n default: defaultLLMContent(),\n },\n context: {\n type: \"array\",\n items: { type: \"object\", behavior: [\"llm-content\"] },\n title: \"Context in\",\n },\n chat: {\n type: \"boolean\",\n title: \"Chat with User\",\n behavior: [\"config\"],\n description:\n \"When checked, the agent will talk with the user, asking to review work, requesting additional information, etc.\",\n },\n },\n } satisfies Schema,\n outputSchema: {\n type: \"object\",\n properties: {\n context: {\n type: \"array\",\n items: { type: \"object\", behavior: [\"llm-content\"] },\n title: \"Context out\",\n },\n },\n } satisfies Schema,\n };\n}\n", + "code": "/**\n * @fileoverview Manages the entry point: describer, passing the inputs, etc.\n */\n\nimport { type AgentContext, type AgentInputs } from \"./common\";\nimport { toLLMContent, defaultLLMContent } from \"./utils\";\n\nexport { invoke as default, describe };\n\ntype Outputs = {\n context: AgentContext;\n};\n\nasync function invoke({\n context,\n chat,\n defaultModel,\n description,\n type = \"work\",\n}: AgentInputs): Promise {\n // Make sure it's a boolean.\n chat = !!chat;\n context ??= [];\n return {\n context: {\n id: Math.random().toString(36).substring(2, 5),\n chat,\n context,\n userInputs: [],\n defaultModel,\n model: \"\",\n description,\n tools: [],\n type,\n work: [],\n },\n };\n}\n\nasync function describe() {\n return {\n inputSchema: {\n type: \"object\",\n properties: {\n description: {\n type: \"object\",\n behavior: [\"llm-content\", \"config\"],\n title: \"Instruction\",\n description:\n \"Give the model additional context on what to do, like specific rules/guidelines to adhere to or specify behavior separate from the provided context.\",\n default: defaultLLMContent(),\n },\n context: {\n type: \"array\",\n items: { type: \"object\", behavior: [\"llm-content\"] },\n title: \"Context in\",\n },\n \"p-chat\": {\n type: \"boolean\",\n title: \"Chat with User\",\n behavior: [\"config\"],\n description:\n \"When checked, the model will chat with the user, asking to review work, requesting additional information, etc.\",\n },\n \"p-critique\": {\n type: \"boolean\",\n title: \"Self-critique\",\n behavior: [\"config\"],\n description:\n \"When checked, the model will critique its output to to improve quality in exchange for taking a little bit more time.\",\n },\n },\n } satisfies Schema,\n outputSchema: {\n type: \"object\",\n properties: {\n context: {\n type: \"array\",\n items: { type: \"object\", behavior: [\"llm-content\"] },\n title: \"Context out\",\n },\n },\n } satisfies Schema,\n };\n}\n", "language": "typescript" }, "description": "Manages the entry point: describer, passing the inputs, etc.", @@ -123,11 +123,11 @@ } }, "worker-worker": { - "code": "/**\n * @fileoverview Performs assigned task. Part of the worker.\n */\nimport { toText, toLLMContent, ok, err } from \"./utils\";\nimport { defaultSafetySettings, } from \"./gemini\";\nimport { callGemini } from \"./gemini-client\";\nimport invokeBoard from \"@invoke\";\nimport output from \"@output\";\nexport { invoke as default, describe };\nfunction computeWorkMode(tools, summarize) {\n if (tools.length > 0) {\n return \"call-tools\";\n }\n if (summarize) {\n return \"summarize\";\n }\n return \"generate\";\n}\nclass StructuredResponse {\n id;\n prolog = \"\";\n epilog = \"\";\n body = \"\";\n constructor() {\n this.id = Math.random().toString(36).substring(2, 5);\n }\n get separator() {\n return ``;\n }\n parse(response) {\n const r = response;\n const part = r.candidates?.at(0)?.content?.parts?.at(0);\n if (!part || !(\"text\" in part)) {\n return { ok: false, error: \"No text in part\" };\n }\n const structure = part.text.split(this.separator);\n if (structure.length !== 3) {\n return { ok: false, error: \"The output must contain 3 parts\" };\n }\n this.prolog = structure[0];\n this.body = structure[1].trim();\n this.epilog = structure[2].trim();\n return { ok: true };\n }\n}\nasync function callTools(inputs, model, tools, retries) {\n inputs.body.tools = tools;\n inputs.body.toolConfig = {\n functionCallingConfig: {\n mode: \"ANY\",\n },\n };\n const response = await callGemini(inputs, model, (response) => {\n const r = response;\n if (r.candidates?.at(0)?.content)\n return { ok: true };\n return { ok: false, error: \"No content\" };\n }, retries);\n if (!ok(response)) {\n return toLLMContent(\"TODO: Handle Gemini error response\");\n }\n const r = response;\n return r.candidates?.at(0)?.content || toLLMContent(\"No valid response\");\n}\nasync function generate(inputs, model, responseManager, retries) {\n const response = await callGemini(inputs, model, (response) => {\n return responseManager.parse(response);\n }, retries);\n if (!ok(response)) {\n return response;\n }\n else {\n return { product: toLLMContent(responseManager.body, \"model\") };\n }\n}\nasync function invoke({ work, description, model, tools, summarize, chat, }) {\n // TODO: Make this a parameter.\n const retries = 5;\n const mode = computeWorkMode(tools, summarize);\n const responseManager = new StructuredResponse();\n const inputs = {\n body: {\n contents: work,\n systemInstruction: systemInstruction(responseManager.separator, description, mode, chat),\n safetySettings: defaultSafetySettings(),\n },\n };\n if (mode === \"call-tools\") {\n const product = await callTools(inputs, model, tools, retries);\n return { product };\n }\n else {\n const result = await generate(inputs, model, responseManager, retries);\n if (\"$error\" in result) {\n return result;\n }\n if (chat) {\n await output({\n schema: {\n type: \"object\",\n properties: {\n product: {\n type: \"object\",\n behavior: [\"llm-content\"],\n title: \"Draft\",\n },\n message: {\n type: \"string\",\n title: \"Requesting feedback\",\n format: \"markdown\",\n },\n },\n },\n message: responseManager.epilog,\n product: result.product,\n });\n }\n return result;\n }\n}\n/**\n * Returns the system instruction based on on the provided parameters.\n */\nfunction systemInstruction(separator, description, mode, chat) {\n const preamble = `Here is your job description:\n${toText(description)}\n\n`;\n const chatOrConclude = chat\n ? `Finally, ask the user to provide feedback on your output as a friendly assistant might.`\n : `Finally, you briefly summarize what the work product was and how it fulfills the task.`;\n const postamble = `\n\nToday is ${new Date()}`;\n const outputInstruction = `\nYour response must consist of three parts.\nFirst, you briefly explain what the work product will be and why it fulfills the specified task.\nThen, you present the work product only, without any additional conversation or comments about your output.\n${chatOrConclude}\n\nThe parts must be separated by the ${separator} tag.\n`;\n switch (mode) {\n case \"summarize\":\n return toLLMContent(` \n${preamble}\nSummarize the research results to fulfill the specified task.\n${outputInstruction}\n${postamble}`);\n case \"call-tools\":\n return toLLMContent(`\n${preamble}\nGenerate multiple function calls to fulfill the specified task.\n${postamble}`);\n case \"generate\":\n return toLLMContent(`\n${preamble}\nProvide the response that fulfills the specified task.\n${outputInstruction}\n${postamble}`);\n }\n}\nasync function describe() {\n return {\n inputSchema: {\n type: \"object\",\n properties: {\n work: {\n type: \"array\",\n items: { type: \"object\", behavior: [\"llm-content\"] },\n title: \"Work\",\n },\n description: {\n type: \"object\",\n behavior: [\"llm-content\"],\n title: \"Job Description\",\n },\n },\n },\n outputSchema: {\n type: \"object\",\n properties: {\n context: {\n type: \"object\",\n behavior: [\"llm-content\"],\n title: \"Work Product\",\n },\n },\n },\n };\n}\n", + "code": "/**\n * @fileoverview Performs assigned task. Part of the worker.\n */\nimport { toText, toLLMContent, ok, err } from \"./utils\";\nimport { defaultSafetySettings, } from \"./gemini\";\nimport { callGemini } from \"./gemini-client\";\nimport invokeBoard from \"@invoke\";\nimport output from \"@output\";\nexport { invoke as default, describe };\nfunction computeWorkMode(tools, summarize) {\n if (tools.length > 0) {\n return \"call-tools\";\n }\n if (summarize) {\n return \"summarize\";\n }\n return \"generate\";\n}\nclass StructuredResponse {\n id;\n prolog = \"\";\n epilog = \"\";\n body = \"\";\n response = undefined;\n constructor(id) {\n this.id = id;\n }\n get separator() {\n return ``;\n }\n parse(response) {\n const r = response;\n const part = r.candidates?.at(0)?.content?.parts?.at(0);\n if (!part || !(\"text\" in part)) {\n return { ok: false, error: \"No text in part\" };\n }\n this.response = r.candidates.at(0).content;\n const structure = part.text.split(this.separator);\n if (structure.length !== 3) {\n return { ok: false, error: \"The output must contain 3 parts\" };\n }\n this.prolog = structure[0];\n this.body = structure[1].trim();\n this.epilog = structure[2].trim();\n return { ok: true };\n }\n}\nasync function callTools(inputs, model, tools, retries) {\n inputs.body.tools = tools;\n inputs.body.toolConfig = {\n functionCallingConfig: {\n mode: \"ANY\",\n },\n };\n const response = await callGemini(inputs, model, (response) => {\n const r = response;\n if (r.candidates?.at(0)?.content)\n return { ok: true };\n return { ok: false, error: \"No content\" };\n }, retries);\n if (!ok(response)) {\n return toLLMContent(\"TODO: Handle Gemini error response\");\n }\n const r = response;\n return r.candidates?.at(0)?.content || toLLMContent(\"No valid response\");\n}\nasync function generate(inputs, model, responseManager, retries) {\n const response = await callGemini(inputs, model, (response) => {\n return responseManager.parse(response);\n }, retries);\n if (!ok(response)) {\n return response;\n }\n else {\n console.log(\"RESPONSE MANAGER\", responseManager);\n return {\n product: toLLMContent(responseManager.body, \"model\"),\n response: responseManager.response,\n };\n }\n}\nasync function invoke({ id, work, description, model, tools, summarize, chat, }) {\n // TODO: Make this a parameter.\n const retries = 5;\n const mode = computeWorkMode(tools, summarize);\n const responseManager = new StructuredResponse(id);\n const inputs = {\n body: {\n contents: work,\n systemInstruction: systemInstruction(responseManager.separator, description, mode, chat),\n safetySettings: defaultSafetySettings(),\n },\n };\n if (mode === \"call-tools\") {\n const product = await callTools(inputs, model, tools, retries);\n return { product, response: undefined };\n }\n else {\n const result = await generate(inputs, model, responseManager, retries);\n console.log(\"RESULT?\", result);\n if (\"$error\" in result) {\n return result;\n }\n if (chat) {\n await output({\n schema: {\n type: \"object\",\n properties: {\n product: {\n type: \"object\",\n behavior: [\"llm-content\"],\n title: \"Draft\",\n },\n message: {\n type: \"string\",\n title: \"Requesting feedback\",\n format: \"markdown\",\n },\n },\n },\n message: responseManager.epilog,\n product: result.product,\n });\n }\n console.log(\"RESULT\", result);\n return result;\n }\n}\n/**\n * Returns the system instruction based on on the provided parameters.\n */\nfunction systemInstruction(separator, description, mode, chat) {\n const preamble = `Here is your job description:\n${toText(description)}\n\n`;\n const chatOrConclude = chat\n ? `Finally, ask the user to provide feedback on your output as a friendly assistant might.`\n : `Finally, you briefly summarize what the work product was and how it fulfills the task.`;\n const postamble = `\n\nToday is ${new Date()}`;\n const outputInstruction = `\nYour response must consist of three parts. The parts must be separated by the ${separator} tag.\n\nFirst, briefly introduce the work product (\"Okay, here is ... \") and why it fulfills the specified task, followed by a ${separator} tag to separate the parts.\nThen, you present the work product only, without any additional conversation or comments about your output, followed by a ${separator} tag to separate the parts.\n${chatOrConclude}\n`;\n switch (mode) {\n case \"summarize\":\n return toLLMContent(` \n${preamble}\nSummarize the research results to fulfill the specified task.\n${outputInstruction}\n${postamble}`);\n case \"call-tools\":\n return toLLMContent(`\n${preamble}\nGenerate multiple function calls to fulfill the specified task.\n${postamble}`);\n case \"generate\":\n return toLLMContent(`\n${preamble}\nProvide the response that fulfills the specified task.\n${outputInstruction}\n${postamble}`);\n }\n}\nasync function describe() {\n return {\n inputSchema: {\n type: \"object\",\n properties: {\n work: {\n type: \"array\",\n items: { type: \"object\", behavior: [\"llm-content\"] },\n title: \"Work\",\n },\n description: {\n type: \"object\",\n behavior: [\"llm-content\"],\n title: \"Job Description\",\n },\n },\n },\n outputSchema: {\n type: \"object\",\n properties: {\n context: {\n type: \"object\",\n behavior: [\"llm-content\"],\n title: \"Work Product\",\n },\n },\n },\n };\n}\n", "metadata": { "title": "worker-worker", "source": { - "code": "/**\n * @fileoverview Performs assigned task. Part of the worker.\n */\nimport { toText, toLLMContent, ok, err } from \"./utils\";\nimport {\n type GeminiSchema,\n type GeminiInputs,\n type GeminiOutputs,\n type Tool,\n defaultSafetySettings,\n type GeminiAPIOutputs,\n} from \"./gemini\";\n\nimport { type ValidatoResult, callGemini } from \"./gemini-client\";\nimport invokeBoard from \"@invoke\";\nimport output from \"@output\";\n\nexport { invoke as default, describe };\n\ntype Inputs = {\n work: LLMContent[];\n description: LLMContent;\n model: string;\n tools: Tool[];\n summarize: boolean;\n chat: boolean;\n};\n\ntype Outputs = Outcome<{\n product: LLMContent;\n}>;\n\ntype WorkMode = \"generate\" | \"call-tools\" | \"summarize\";\n\nfunction computeWorkMode(tools: Tool[], summarize: boolean): WorkMode {\n if (tools.length > 0) {\n return \"call-tools\";\n }\n if (summarize) {\n return \"summarize\";\n }\n return \"generate\";\n}\n\nclass StructuredResponse {\n public readonly id: string;\n public prolog: string = \"\";\n public epilog: string = \"\";\n public body: string = \"\";\n\n constructor() {\n this.id = Math.random().toString(36).substring(2, 5);\n }\n\n get separator() {\n return ``;\n }\n\n parse(response: GeminiOutputs): ValidatoResult {\n const r = response as GeminiAPIOutputs;\n const part = r.candidates?.at(0)?.content?.parts?.at(0);\n if (!part || !(\"text\" in part)) {\n return { ok: false, error: \"No text in part\" };\n }\n const structure = part.text.split(this.separator);\n if (structure.length !== 3) {\n return { ok: false, error: \"The output must contain 3 parts\" };\n }\n this.prolog = structure[0];\n this.body = structure[1].trim();\n this.epilog = structure[2].trim();\n return { ok: true };\n }\n}\n\nasync function callTools(\n inputs: Omit,\n model: string,\n tools: Tool[],\n retries: number\n): Promise {\n inputs.body.tools = tools;\n inputs.body.toolConfig = {\n functionCallingConfig: {\n mode: \"ANY\",\n },\n };\n const response = await callGemini(\n inputs,\n model,\n (response) => {\n const r = response as GeminiAPIOutputs;\n if (r.candidates?.at(0)?.content) return { ok: true };\n return { ok: false, error: \"No content\" };\n },\n retries\n );\n if (!ok(response)) {\n return toLLMContent(\"TODO: Handle Gemini error response\");\n }\n const r = response as GeminiAPIOutputs;\n return r.candidates?.at(0)?.content || toLLMContent(\"No valid response\");\n}\n\nasync function generate(\n inputs: Omit,\n model: string,\n responseManager: StructuredResponse,\n retries: number\n): Promise {\n const response = await callGemini(\n inputs,\n model,\n (response) => {\n return responseManager.parse(response);\n },\n retries\n );\n if (!ok(response)) {\n return response;\n } else {\n return { product: toLLMContent(responseManager.body, \"model\") };\n }\n}\n\nasync function invoke({\n work,\n description,\n model,\n tools,\n summarize,\n chat,\n}: Inputs): Promise {\n // TODO: Make this a parameter.\n const retries = 5;\n const mode = computeWorkMode(tools, summarize);\n const responseManager = new StructuredResponse();\n const inputs: Omit = {\n body: {\n contents: work,\n systemInstruction: systemInstruction(\n responseManager.separator,\n description,\n mode,\n chat\n ),\n safetySettings: defaultSafetySettings(),\n },\n };\n if (mode === \"call-tools\") {\n const product = await callTools(inputs, model, tools, retries);\n return { product };\n } else {\n const result = await generate(inputs, model, responseManager, retries);\n if (\"$error\" in result) {\n return result;\n }\n if (chat) {\n await output({\n schema: {\n type: \"object\",\n properties: {\n product: {\n type: \"object\",\n behavior: [\"llm-content\"],\n title: \"Draft\",\n },\n message: {\n type: \"string\",\n title: \"Requesting feedback\",\n format: \"markdown\",\n },\n },\n },\n message: responseManager.epilog,\n product: result.product,\n });\n }\n return result;\n }\n}\n\n/**\n * Returns the system instruction based on on the provided parameters.\n */\nfunction systemInstruction(\n separator: string,\n description: LLMContent,\n mode: WorkMode,\n chat: boolean\n): LLMContent {\n const preamble = `Here is your job description:\n${toText(description)}\n\n`;\n const chatOrConclude = chat\n ? `Finally, ask the user to provide feedback on your output as a friendly assistant might.`\n : `Finally, you briefly summarize what the work product was and how it fulfills the task.`;\n\n const postamble = `\n\nToday is ${new Date()}`;\n const outputInstruction = `\nYour response must consist of three parts.\nFirst, you briefly explain what the work product will be and why it fulfills the specified task.\nThen, you present the work product only, without any additional conversation or comments about your output.\n${chatOrConclude}\n\nThe parts must be separated by the ${separator} tag.\n`;\n\n switch (mode) {\n case \"summarize\":\n return toLLMContent(` \n${preamble}\nSummarize the research results to fulfill the specified task.\n${outputInstruction}\n${postamble}`);\n\n case \"call-tools\":\n return toLLMContent(`\n${preamble}\nGenerate multiple function calls to fulfill the specified task.\n${postamble}`);\n\n case \"generate\":\n return toLLMContent(`\n${preamble}\nProvide the response that fulfills the specified task.\n${outputInstruction}\n${postamble}`);\n }\n}\n\nasync function describe() {\n return {\n inputSchema: {\n type: \"object\",\n properties: {\n work: {\n type: \"array\",\n items: { type: \"object\", behavior: [\"llm-content\"] },\n title: \"Work\",\n },\n description: {\n type: \"object\",\n behavior: [\"llm-content\"],\n title: \"Job Description\",\n },\n },\n } satisfies Schema,\n outputSchema: {\n type: \"object\",\n properties: {\n context: {\n type: \"object\",\n behavior: [\"llm-content\"],\n title: \"Work Product\",\n },\n },\n } satisfies Schema,\n };\n}\n", + "code": "/**\n * @fileoverview Performs assigned task. Part of the worker.\n */\nimport { toText, toLLMContent, ok, err } from \"./utils\";\nimport {\n type GeminiSchema,\n type GeminiInputs,\n type GeminiOutputs,\n type Tool,\n defaultSafetySettings,\n type GeminiAPIOutputs,\n} from \"./gemini\";\n\nimport { type ValidatoResult, callGemini } from \"./gemini-client\";\nimport invokeBoard from \"@invoke\";\nimport output from \"@output\";\n\nexport { invoke as default, describe };\n\ntype Inputs = {\n id: string;\n work: LLMContent[];\n description: LLMContent;\n model: string;\n tools: Tool[];\n summarize: boolean;\n chat: boolean;\n};\n\ntype Outputs = Outcome<{\n product: LLMContent;\n response?: LLMContent;\n}>;\n\ntype WorkMode = \"generate\" | \"call-tools\" | \"summarize\";\n\nfunction computeWorkMode(tools: Tool[], summarize: boolean): WorkMode {\n if (tools.length > 0) {\n return \"call-tools\";\n }\n if (summarize) {\n return \"summarize\";\n }\n return \"generate\";\n}\n\nclass StructuredResponse {\n public prolog: string = \"\";\n public epilog: string = \"\";\n public body: string = \"\";\n response: LLMContent | undefined = undefined;\n\n constructor(public readonly id: string) {}\n\n get separator() {\n return ``;\n }\n\n parse(response: GeminiOutputs): ValidatoResult {\n const r = response as GeminiAPIOutputs;\n const part = r.candidates?.at(0)?.content?.parts?.at(0);\n if (!part || !(\"text\" in part)) {\n return { ok: false, error: \"No text in part\" };\n }\n this.response = r.candidates.at(0)!.content!;\n const structure = part.text.split(this.separator);\n if (structure.length !== 3) {\n return { ok: false, error: \"The output must contain 3 parts\" };\n }\n this.prolog = structure[0];\n this.body = structure[1].trim();\n this.epilog = structure[2].trim();\n return { ok: true };\n }\n}\n\nasync function callTools(\n inputs: Omit,\n model: string,\n tools: Tool[],\n retries: number\n): Promise {\n inputs.body.tools = tools;\n inputs.body.toolConfig = {\n functionCallingConfig: {\n mode: \"ANY\",\n },\n };\n const response = await callGemini(\n inputs,\n model,\n (response) => {\n const r = response as GeminiAPIOutputs;\n if (r.candidates?.at(0)?.content) return { ok: true };\n return { ok: false, error: \"No content\" };\n },\n retries\n );\n if (!ok(response)) {\n return toLLMContent(\"TODO: Handle Gemini error response\");\n }\n const r = response as GeminiAPIOutputs;\n return r.candidates?.at(0)?.content || toLLMContent(\"No valid response\");\n}\n\nasync function generate(\n inputs: Omit,\n model: string,\n responseManager: StructuredResponse,\n retries: number\n): Promise {\n const response = await callGemini(\n inputs,\n model,\n (response) => {\n return responseManager.parse(response);\n },\n retries\n );\n if (!ok(response)) {\n return response;\n } else {\n console.log(\"RESPONSE MANAGER\", responseManager);\n return {\n product: toLLMContent(responseManager.body, \"model\"),\n response: responseManager.response!,\n };\n }\n}\n\nasync function invoke({\n id,\n work,\n description,\n model,\n tools,\n summarize,\n chat,\n}: Inputs): Promise {\n // TODO: Make this a parameter.\n const retries = 5;\n const mode = computeWorkMode(tools, summarize);\n const responseManager = new StructuredResponse(id);\n const inputs: Omit = {\n body: {\n contents: work,\n systemInstruction: systemInstruction(\n responseManager.separator,\n description,\n mode,\n chat\n ),\n safetySettings: defaultSafetySettings(),\n },\n };\n if (mode === \"call-tools\") {\n const product = await callTools(inputs, model, tools, retries);\n return { product, response: undefined };\n } else {\n const result = await generate(inputs, model, responseManager, retries);\n console.log(\"RESULT?\", result);\n if (\"$error\" in result) {\n return result;\n }\n if (chat) {\n await output({\n schema: {\n type: \"object\",\n properties: {\n product: {\n type: \"object\",\n behavior: [\"llm-content\"],\n title: \"Draft\",\n },\n message: {\n type: \"string\",\n title: \"Requesting feedback\",\n format: \"markdown\",\n },\n },\n },\n message: responseManager.epilog,\n product: result.product,\n });\n }\n console.log(\"RESULT\", result);\n return result;\n }\n}\n\n/**\n * Returns the system instruction based on on the provided parameters.\n */\nfunction systemInstruction(\n separator: string,\n description: LLMContent,\n mode: WorkMode,\n chat: boolean\n): LLMContent {\n const preamble = `Here is your job description:\n${toText(description)}\n\n`;\n const chatOrConclude = chat\n ? `Finally, ask the user to provide feedback on your output as a friendly assistant might.`\n : `Finally, you briefly summarize what the work product was and how it fulfills the task.`;\n\n const postamble = `\n\nToday is ${new Date()}`;\n const outputInstruction = `\nYour response must consist of three parts. The parts must be separated by the ${separator} tag.\n\nFirst, briefly introduce the work product (\"Okay, here is ... \") and why it fulfills the specified task, followed by a ${separator} tag to separate the parts.\nThen, you present the work product only, without any additional conversation or comments about your output, followed by a ${separator} tag to separate the parts.\n${chatOrConclude}\n`;\n\n switch (mode) {\n case \"summarize\":\n return toLLMContent(` \n${preamble}\nSummarize the research results to fulfill the specified task.\n${outputInstruction}\n${postamble}`);\n\n case \"call-tools\":\n return toLLMContent(`\n${preamble}\nGenerate multiple function calls to fulfill the specified task.\n${postamble}`);\n\n case \"generate\":\n return toLLMContent(`\n${preamble}\nProvide the response that fulfills the specified task.\n${outputInstruction}\n${postamble}`);\n }\n}\n\nasync function describe() {\n return {\n inputSchema: {\n type: \"object\",\n properties: {\n work: {\n type: \"array\",\n items: { type: \"object\", behavior: [\"llm-content\"] },\n title: \"Work\",\n },\n description: {\n type: \"object\",\n behavior: [\"llm-content\"],\n title: \"Job Description\",\n },\n },\n } satisfies Schema,\n outputSchema: {\n type: \"object\",\n properties: {\n context: {\n type: \"object\",\n behavior: [\"llm-content\"],\n title: \"Work Product\",\n },\n },\n } satisfies Schema,\n };\n}\n", "language": "typescript" }, "description": "Performs assigned task. Part of the worker.", @@ -135,11 +135,11 @@ } }, "gemini-client": { - "code": "import {} from \"./gemini\";\nimport { ok, err } from \"./utils\";\nimport invokeBoard from \"@invoke\";\nexport { callGemini };\nasync function callGemini(inputs, model, validator, retries) {\n // TODO: Add more nuanced logic around retries\n for (let i = 0; i < retries; ++i) {\n const response = (await invokeBoard({\n $board: model,\n $metadata: {\n title: \"Model\",\n description: `Invoking the model`,\n },\n ...inputs,\n }));\n if (!ok(response)) {\n const nextStep = i == retries ? \"bailing\" : \"will retry\";\n console.error(`Error from model, ${nextStep}`, response.$error);\n }\n else if (!validator(response)) {\n console.error(\"Invalid response\");\n }\n else {\n return response;\n }\n }\n return err(`Failed to get valid response after ${retries} tries`);\n}\n", + "code": "import invokeGemini, {} from \"./gemini\";\nimport { ok, err } from \"./utils\";\nexport { callGemini };\nasync function callGemini(inputs, model, validator, retries) {\n // TODO: Add more nuanced logic around retries\n for (let i = 0; i < retries; ++i) {\n const response = await invokeGemini(inputs);\n if (!ok(response)) {\n const nextStep = i == retries ? \"bailing\" : \"will retry\";\n console.error(`Error from model, ${nextStep}`, response.$error);\n }\n else if (!validator(response)) {\n console.error(\"Invalid response\");\n }\n else {\n return response;\n }\n }\n return err(`Failed to get valid response after ${retries} tries`);\n}\n", "metadata": { "title": "gemini-client", "source": { - "code": "import { type GeminiInputs, type GeminiOutputs } from \"./gemini\";\nimport { ok, err } from \"./utils\";\nimport invokeBoard from \"@invoke\";\n\nexport type ValidatoResult = { ok: true } | { ok: false; error: string };\nexport type ValidatorFunction = (response: GeminiOutputs) => ValidatoResult;\n\nexport { callGemini };\n\nasync function callGemini(\n inputs: Omit,\n model: string,\n validator: ValidatorFunction,\n retries: number\n): Promise> {\n // TODO: Add more nuanced logic around retries\n for (let i = 0; i < retries; ++i) {\n const response = (await invokeBoard({\n $board: model,\n $metadata: {\n title: \"Model\",\n description: `Invoking the model`,\n },\n ...inputs,\n })) as Outcome;\n if (!ok(response)) {\n const nextStep = i == retries ? \"bailing\" : \"will retry\";\n console.error(`Error from model, ${nextStep}`, response.$error);\n } else if (!validator(response)) {\n console.error(\"Invalid response\");\n } else {\n return response;\n }\n }\n return err(`Failed to get valid response after ${retries} tries`);\n}\n", + "code": "import invokeGemini, { type GeminiInputs, type GeminiOutputs } from \"./gemini\";\nimport { ok, err } from \"./utils\";\n\nexport type ValidatoResult = { ok: true } | { ok: false; error: string };\nexport type ValidatorFunction = (response: GeminiOutputs) => ValidatoResult;\n\nexport { callGemini };\n\nasync function callGemini(\n inputs: Omit,\n model: string,\n validator: ValidatorFunction,\n retries: number\n): Promise> {\n // TODO: Add more nuanced logic around retries\n for (let i = 0; i < retries; ++i) {\n const response = await invokeGemini(inputs);\n if (!ok(response)) {\n const nextStep = i == retries ? \"bailing\" : \"will retry\";\n console.error(`Error from model, ${nextStep}`, response.$error);\n } else if (!validator(response)) {\n console.error(\"Invalid response\");\n } else {\n return response;\n }\n }\n return err(`Failed to get valid response after ${retries} tries`);\n}\n", "language": "typescript" }, "description": "", @@ -147,27 +147,54 @@ } }, "agent-main": { - "code": "/**\n * @fileoverview The main body of the agent\n */\nimport output from \"@output\";\nimport {} from \"./common\";\nimport { ToolManager } from \"./tool-manager\";\nimport workerIntroducer from \"./worker-introducer\";\nimport workerWorker from \"./worker-worker\";\nimport { report } from \"./output\";\nimport { defaultLLMContent } from \"./utils\";\nimport invokeGraph from \"@invoke\";\nexport { invoke as default, describe };\nfunction toLLMContent(text, role = \"user\") {\n return { parts: [{ text }], role };\n}\nasync function invoke({ context }) {\n let { description, type, context: initialContext, model, defaultModel, tools, chat, work: workContext, } = context;\n if (!description) {\n const $error = \"No Job description supplied\";\n await report({\n actor: \"Agent\",\n name: $error,\n category: \"Runtime error\",\n details: `In order to run, Agent needs to have a job description. Please let it know what it's good at.`,\n });\n return { $error };\n }\n if (!model) {\n model = defaultModel;\n }\n if (!model) {\n const $error = \"Model was not supplied\";\n await report({\n actor: \"Agent\",\n name: $error,\n category: \"Runtime error\",\n details: `In order to run, Agent neeeds a model to be connected to it. \nPlease drag one (\"Gemini\" should work) from the list of components over to the \"Model\"\nport`,\n });\n console.error(\"MAIN ERROR\", $error);\n return { $error };\n }\n const toolManager = new ToolManager();\n if (!(await toolManager.initialize(tools))) {\n const $error = `Problem initializing tools. \nThe following errors were encountered: ${toolManager.errors.join(\",\")}`;\n console.error(\"MAIN ERROR\", $error, toolManager.errors);\n return { $error };\n }\n switch (type) {\n case \"introduction\": {\n const response = await workerIntroducer({\n description,\n model,\n tools: toolManager.list(),\n });\n if (response.$error) {\n console.error(\"INTRODUCER ERROR\", response.$error);\n return {\n $error: response.$error,\n };\n }\n return {\n done: [response.introduction || toLLMContent(\"No valid response\")],\n };\n }\n case \"work\": {\n const work = [...initialContext, ...workContext];\n const userInputs = context.userInputs;\n const response = await workerWorker({\n description,\n work,\n model,\n tools: toolManager.list(),\n summarize: false,\n chat,\n });\n if (\"$error\" in response) {\n console.error(\"ERROR FROM WORKER\", response.$error);\n return {\n $error: response.$error,\n };\n }\n const workerResponse = response.product;\n const toolResults = [];\n await toolManager.processResponse(workerResponse, async ($board, args) => {\n const result = await invokeGraph({\n $board,\n ...args,\n });\n toolResults.push(result);\n });\n if (toolResults.length > 0) {\n const summary = await workerWorker({\n description,\n work: [\n ...toolResults.map((toolResult) => toLLMContent(JSON.stringify(toolResult))),\n ],\n model,\n tools: [],\n summarize: true,\n chat: false,\n });\n if (\"$error\" in summary) {\n console.error(\"ERROR FROM SUMMARY\", summary.$error);\n return {\n $error: summary.$error,\n };\n }\n const summaryResponse = summary.product;\n return { done: [summaryResponse] };\n }\n else if (chat && context.userInputs.length == 0) {\n const toInput = {\n type: \"object\",\n properties: {\n request: {\n type: \"object\",\n title: \"Please provide feedback\",\n behavior: [\"transient\", \"llm-content\"],\n examples: [defaultLLMContent()],\n },\n },\n };\n return {\n toInput,\n context: { ...context, work: [...workContext, workerResponse] },\n };\n }\n return { done: [workerResponse] };\n }\n default:\n return {\n done: [\n { parts: [{ text: \"Unknown task type\" }] },\n ],\n };\n }\n // TODO: Bring back once the rest is ported in.\n // const userInputs = context.userInputs;\n // if (!context.chat || userInputs.length > 2) {\n // return {\n // done: context.userInputs.map((item) => toLLMContent(item as string)),\n // };\n // }\n // console.log(\"INPUTS\", context);\n // await output({\n // schema: {\n // type: \"object\",\n // properties: {\n // message: {\n // type: \"string\",\n // format: \"markdown\",\n // },\n // },\n // },\n // message: \"**HELLO** THERE\",\n // });\n // const toInput: Schema = {\n // type: \"object\",\n // properties: {\n // request: {\n // type: \"string\",\n // title: \"Request\",\n // description: \"Answer me this\",\n // behavior: [\"transient\"],\n // },\n // },\n // };\n // return { toInput, context };\n}\nasync function describe() {\n return {\n inputSchema: {\n type: \"object\",\n properties: {\n context: {\n type: \"object\",\n title: \"Agent Context\",\n },\n },\n },\n outputSchema: {\n type: \"object\",\n properties: {\n toInput: {\n type: \"object\",\n title: \"Input Schema\",\n },\n context: {\n type: \"object\",\n title: \"Agent Context\",\n },\n done: {\n type: \"array\",\n items: { type: \"object\", behavior: [\"llm-content\"] },\n title: \"Done\",\n },\n },\n },\n };\n}\n", + "code": "/**\n * @fileoverview The main body of the agent\n */\nimport output from \"@output\";\nimport {} from \"./common\";\nimport { ToolManager } from \"./tool-manager\";\nimport workerIntroducer from \"./worker-introducer\";\nimport workerWorker from \"./worker-worker\";\nimport { report } from \"./output\";\nimport { defaultLLMContent } from \"./utils\";\nimport invokeGraph from \"@invoke\";\nexport { invoke as default, describe };\nfunction toLLMContent(text, role = \"user\") {\n return { parts: [{ text }], role };\n}\nasync function invoke({ context }) {\n let { id, description, type, context: initialContext, model, defaultModel, tools, chat, work: workContext, } = context;\n if (!description) {\n const $error = \"No Job description supplied\";\n await report({\n actor: \"Agent\",\n name: $error,\n category: \"Runtime error\",\n details: `In order to run, Agent needs to have a job description. Please let it know what it's good at.`,\n });\n return { $error };\n }\n // For now, use the internal Gemini module to invoke\n // TODO: Add back the capability to invoke model boards\n // if (!model) {\n // model = defaultModel;\n // }\n // if (!model) {\n // const $error = \"Model was not supplied\";\n // await report({\n // actor: \"Agent\",\n // name: $error,\n // category: \"Runtime error\",\n // details: `In order to run, Agent neeeds a model to be connected to it.\n // Please drag one (\"Gemini\" should work) from the list of components over to the \"Model\"\n // port`,\n // });\n // return { $error };\n // }\n const toolManager = new ToolManager();\n if (!(await toolManager.initialize(tools))) {\n const $error = `Problem initializing tools. \nThe following errors were encountered: ${toolManager.errors.join(\",\")}`;\n console.error(\"MAIN ERROR\", $error, toolManager.errors);\n return { $error };\n }\n switch (type) {\n case \"introduction\": {\n const response = await workerIntroducer({\n description,\n model,\n tools: toolManager.list(),\n });\n if (response.$error) {\n console.error(\"INTRODUCER ERROR\", response.$error);\n return {\n $error: response.$error,\n };\n }\n return {\n done: [response.introduction || toLLMContent(\"No valid response\")],\n };\n }\n case \"work\": {\n const work = [...initialContext, ...workContext];\n const userInputs = context.userInputs;\n const response = await workerWorker({\n id,\n description,\n work,\n model,\n tools: toolManager.list(),\n summarize: false,\n chat,\n });\n if (\"$error\" in response) {\n console.error(\"ERROR FROM WORKER\", response.$error);\n return {\n $error: response.$error,\n };\n }\n const workerResponse = response.product;\n const toolResults = [];\n await toolManager.processResponse(workerResponse, async ($board, args) => {\n const result = await invokeGraph({\n $board,\n ...args,\n });\n toolResults.push(result);\n });\n if (toolResults.length > 0) {\n const summary = await workerWorker({\n id,\n description,\n work: [\n ...toolResults.map((toolResult) => toLLMContent(JSON.stringify(toolResult))),\n ],\n model,\n tools: [],\n summarize: true,\n chat: false,\n });\n if (\"$error\" in summary) {\n console.error(\"ERROR FROM SUMMARY\", summary.$error);\n return {\n $error: summary.$error,\n };\n }\n const summaryResponse = summary.product;\n return { done: [summaryResponse] };\n }\n else if (chat && context.userInputs.length == 0) {\n const toInput = {\n type: \"object\",\n properties: {\n request: {\n type: \"object\",\n title: \"Please provide feedback\",\n behavior: [\"transient\", \"llm-content\"],\n examples: [defaultLLMContent()],\n },\n },\n };\n return {\n toInput,\n context: { ...context, work: [...workContext, response.response] },\n };\n }\n return { done: [workerResponse] };\n }\n default:\n return {\n done: [\n { parts: [{ text: \"Unknown task type\" }] },\n ],\n };\n }\n const userInputs = context.userInputs;\n if (!context.chat || userInputs.length > 2) {\n return {\n done: context.userInputs.map((item) => toLLMContent(item)),\n };\n }\n console.log(\"INPUTS\", context);\n await output({\n schema: {\n type: \"object\",\n properties: {\n message: {\n type: \"string\",\n format: \"markdown\",\n },\n },\n },\n message: \"**HELLO** THERE\",\n });\n const toInput = {\n type: \"object\",\n properties: {\n request: {\n type: \"string\",\n title: \"Request\",\n description: \"Answer me this\",\n behavior: [\"transient\"],\n },\n },\n };\n return { toInput, context };\n}\nasync function describe() {\n return {\n inputSchema: {\n type: \"object\",\n properties: {\n context: {\n type: \"object\",\n title: \"Agent Context\",\n },\n },\n },\n outputSchema: {\n type: \"object\",\n properties: {\n toInput: {\n type: \"object\",\n title: \"Input Schema\",\n },\n context: {\n type: \"object\",\n title: \"Agent Context\",\n },\n done: {\n type: \"array\",\n items: { type: \"object\", behavior: [\"llm-content\"] },\n title: \"Done\",\n },\n },\n },\n };\n}\n", "metadata": { "title": "agent-main", "source": { - "code": "/**\n * @fileoverview The main body of the agent\n */\nimport output from \"@output\";\nimport { type AgentContext } from \"./common\";\nimport { ToolManager } from \"./tool-manager\";\nimport workerIntroducer from \"./worker-introducer\";\nimport workerWorker from \"./worker-worker\";\nimport { report } from \"./output\";\nimport { defaultLLMContent } from \"./utils\";\nimport invokeGraph from \"@invoke\";\n\nexport { invoke as default, describe };\n\ntype Inputs = {\n context: AgentContext;\n};\n\ntype Outputs = {\n $error?: string;\n context?: AgentContext;\n toInput?: Schema;\n done?: LLMContent[];\n};\n\nfunction toLLMContent(\n text: string,\n role: LLMContent[\"role\"] = \"user\"\n): LLMContent {\n return { parts: [{ text }], role };\n}\n\nasync function invoke({ context }: Inputs): Promise {\n let {\n description,\n type,\n context: initialContext,\n model,\n defaultModel,\n tools,\n chat,\n work: workContext,\n } = context;\n if (!description) {\n const $error = \"No Job description supplied\";\n await report({\n actor: \"Agent\",\n name: $error,\n category: \"Runtime error\",\n details: `In order to run, Agent needs to have a job description. Please let it know what it's good at.`,\n });\n return { $error };\n }\n if (!model) {\n model = defaultModel;\n }\n if (!model) {\n const $error = \"Model was not supplied\";\n\n await report({\n actor: \"Agent\",\n name: $error,\n category: \"Runtime error\",\n details: `In order to run, Agent neeeds a model to be connected to it. \nPlease drag one (\"Gemini\" should work) from the list of components over to the \"Model\"\nport`,\n });\n console.error(\"MAIN ERROR\", $error);\n return { $error };\n }\n const toolManager = new ToolManager();\n if (!(await toolManager.initialize(tools))) {\n const $error = `Problem initializing tools. \nThe following errors were encountered: ${toolManager.errors.join(\",\")}`;\n console.error(\"MAIN ERROR\", $error, toolManager.errors);\n return { $error };\n }\n\n switch (type) {\n case \"introduction\": {\n const response = await workerIntroducer({\n description,\n model,\n tools: toolManager.list(),\n });\n if (response.$error) {\n console.error(\"INTRODUCER ERROR\", response.$error);\n return {\n $error: response.$error,\n };\n }\n return {\n done: [response.introduction || toLLMContent(\"No valid response\")],\n };\n }\n case \"work\": {\n const work = [...initialContext, ...workContext];\n const userInputs = context.userInputs as LLMContent[];\n const response = await workerWorker({\n description,\n work,\n model,\n tools: toolManager.list(),\n summarize: false,\n chat,\n });\n if (\"$error\" in response) {\n console.error(\"ERROR FROM WORKER\", response.$error);\n return {\n $error: response.$error,\n };\n }\n const workerResponse = response.product;\n const toolResults: object[] = [];\n await toolManager.processResponse(\n workerResponse,\n async ($board, args) => {\n const result = await invokeGraph({\n $board,\n ...args,\n });\n toolResults.push(result);\n }\n );\n if (toolResults.length > 0) {\n const summary = await workerWorker({\n description,\n work: [\n ...toolResults.map((toolResult) =>\n toLLMContent(JSON.stringify(toolResult))\n ),\n ],\n model,\n tools: [],\n summarize: true,\n chat: false,\n });\n if (\"$error\" in summary) {\n console.error(\"ERROR FROM SUMMARY\", summary.$error);\n return {\n $error: summary.$error,\n };\n }\n const summaryResponse = summary.product;\n return { done: [summaryResponse] };\n } else if (chat && context.userInputs.length == 0) {\n const toInput: Schema = {\n type: \"object\",\n properties: {\n request: {\n type: \"object\",\n title: \"Please provide feedback\",\n behavior: [\"transient\", \"llm-content\"],\n examples: [defaultLLMContent()],\n },\n },\n };\n return {\n toInput,\n context: { ...context, work: [...workContext, workerResponse] },\n };\n }\n return { done: [workerResponse] };\n }\n default:\n return {\n done: [\n { parts: [{ text: \"Unknown task type\" }] },\n ] satisfies LLMContent[],\n };\n }\n // TODO: Bring back once the rest is ported in.\n // const userInputs = context.userInputs;\n // if (!context.chat || userInputs.length > 2) {\n // return {\n // done: context.userInputs.map((item) => toLLMContent(item as string)),\n // };\n // }\n // console.log(\"INPUTS\", context);\n // await output({\n // schema: {\n // type: \"object\",\n // properties: {\n // message: {\n // type: \"string\",\n // format: \"markdown\",\n // },\n // },\n // },\n // message: \"**HELLO** THERE\",\n // });\n // const toInput: Schema = {\n // type: \"object\",\n // properties: {\n // request: {\n // type: \"string\",\n // title: \"Request\",\n // description: \"Answer me this\",\n // behavior: [\"transient\"],\n // },\n // },\n // };\n // return { toInput, context };\n}\n\nasync function describe() {\n return {\n inputSchema: {\n type: \"object\",\n properties: {\n context: {\n type: \"object\",\n title: \"Agent Context\",\n },\n },\n } satisfies Schema,\n outputSchema: {\n type: \"object\",\n properties: {\n toInput: {\n type: \"object\",\n title: \"Input Schema\",\n },\n context: {\n type: \"object\",\n title: \"Agent Context\",\n },\n done: {\n type: \"array\",\n items: { type: \"object\", behavior: [\"llm-content\"] },\n title: \"Done\",\n },\n },\n } satisfies Schema,\n };\n}\n", + "code": "/**\n * @fileoverview The main body of the agent\n */\nimport output from \"@output\";\nimport { type AgentContext } from \"./common\";\nimport { ToolManager } from \"./tool-manager\";\nimport workerIntroducer from \"./worker-introducer\";\nimport workerWorker from \"./worker-worker\";\nimport { report } from \"./output\";\nimport { defaultLLMContent } from \"./utils\";\nimport invokeGraph from \"@invoke\";\n\nexport { invoke as default, describe };\n\ntype Inputs = {\n context: AgentContext;\n};\n\ntype Outputs = {\n $error?: string;\n context?: AgentContext;\n toInput?: Schema;\n done?: LLMContent[];\n};\n\nfunction toLLMContent(\n text: string,\n role: LLMContent[\"role\"] = \"user\"\n): LLMContent {\n return { parts: [{ text }], role };\n}\n\nasync function invoke({ context }: Inputs): Promise {\n let {\n id,\n description,\n type,\n context: initialContext,\n model,\n defaultModel,\n tools,\n chat,\n work: workContext,\n } = context;\n if (!description) {\n const $error = \"No Job description supplied\";\n await report({\n actor: \"Agent\",\n name: $error,\n category: \"Runtime error\",\n details: `In order to run, Agent needs to have a job description. Please let it know what it's good at.`,\n });\n return { $error };\n }\n // For now, use the internal Gemini module to invoke\n // TODO: Add back the capability to invoke model boards\n // if (!model) {\n // model = defaultModel;\n // }\n // if (!model) {\n // const $error = \"Model was not supplied\";\n\n // await report({\n // actor: \"Agent\",\n // name: $error,\n // category: \"Runtime error\",\n // details: `In order to run, Agent neeeds a model to be connected to it.\n // Please drag one (\"Gemini\" should work) from the list of components over to the \"Model\"\n // port`,\n // });\n // return { $error };\n // }\n const toolManager = new ToolManager();\n if (!(await toolManager.initialize(tools))) {\n const $error = `Problem initializing tools. \nThe following errors were encountered: ${toolManager.errors.join(\",\")}`;\n console.error(\"MAIN ERROR\", $error, toolManager.errors);\n return { $error };\n }\n\n switch (type) {\n case \"introduction\": {\n const response = await workerIntroducer({\n description,\n model,\n tools: toolManager.list(),\n });\n if (response.$error) {\n console.error(\"INTRODUCER ERROR\", response.$error);\n return {\n $error: response.$error,\n };\n }\n return {\n done: [response.introduction || toLLMContent(\"No valid response\")],\n };\n }\n case \"work\": {\n const work = [...initialContext, ...workContext];\n const userInputs = context.userInputs as LLMContent[];\n const response = await workerWorker({\n id,\n description,\n work,\n model,\n tools: toolManager.list(),\n summarize: false,\n chat,\n });\n if (\"$error\" in response) {\n console.error(\"ERROR FROM WORKER\", response.$error);\n return {\n $error: response.$error,\n };\n }\n const workerResponse = response.product;\n const toolResults: object[] = [];\n await toolManager.processResponse(\n workerResponse,\n async ($board, args) => {\n const result = await invokeGraph({\n $board,\n ...args,\n });\n toolResults.push(result);\n }\n );\n if (toolResults.length > 0) {\n const summary = await workerWorker({\n id,\n description,\n work: [\n ...toolResults.map((toolResult) =>\n toLLMContent(JSON.stringify(toolResult))\n ),\n ],\n model,\n tools: [],\n summarize: true,\n chat: false,\n });\n if (\"$error\" in summary) {\n console.error(\"ERROR FROM SUMMARY\", summary.$error);\n return {\n $error: summary.$error,\n };\n }\n const summaryResponse = summary.product;\n return { done: [summaryResponse] };\n } else if (chat && context.userInputs.length == 0) {\n const toInput: Schema = {\n type: \"object\",\n properties: {\n request: {\n type: \"object\",\n title: \"Please provide feedback\",\n behavior: [\"transient\", \"llm-content\"],\n examples: [defaultLLMContent()],\n },\n },\n };\n return {\n toInput,\n context: { ...context, work: [...workContext, response.response!] },\n };\n }\n return { done: [workerResponse] };\n }\n default:\n return {\n done: [\n { parts: [{ text: \"Unknown task type\" }] },\n ] satisfies LLMContent[],\n };\n }\n\n const userInputs = context.userInputs;\n if (!context.chat || userInputs.length > 2) {\n return {\n done: context.userInputs.map((item) => toLLMContent(item as string)),\n };\n }\n console.log(\"INPUTS\", context);\n await output({\n schema: {\n type: \"object\",\n properties: {\n message: {\n type: \"string\",\n format: \"markdown\",\n },\n },\n },\n message: \"**HELLO** THERE\",\n });\n const toInput: Schema = {\n type: \"object\",\n properties: {\n request: {\n type: \"string\",\n title: \"Request\",\n description: \"Answer me this\",\n behavior: [\"transient\"],\n },\n },\n };\n return { toInput, context };\n}\n\nasync function describe() {\n return {\n inputSchema: {\n type: \"object\",\n properties: {\n context: {\n type: \"object\",\n title: \"Agent Context\",\n },\n },\n } satisfies Schema,\n outputSchema: {\n type: \"object\",\n properties: {\n toInput: {\n type: \"object\",\n title: \"Input Schema\",\n },\n context: {\n type: \"object\",\n title: \"Agent Context\",\n },\n done: {\n type: \"array\",\n items: { type: \"object\", behavior: [\"llm-content\"] },\n title: \"Done\",\n },\n },\n } satisfies Schema,\n };\n}\n", "language": "typescript" }, "description": "The main body of the agent", "runnable": true } + }, + "researcher": { + "code": "/**\n * @fileoverview Scours the Internet according to your plan.\n */\nexport { invoke as default, describe };\nasync function invoke({ context }) {\n return { context };\n}\nasync function describe() {\n return {\n inputSchema: {\n type: \"object\",\n properties: {\n context: {\n type: \"array\",\n items: { type: \"object\", behavior: [\"llm-content\"] },\n title: \"Context in\",\n },\n plan: {\n type: \"object\",\n behavior: [\"llm-content\", \"config\"],\n title: \"Research Plan\",\n description: \"Provide an outline of what to research, what areas to cover, etc.\",\n },\n },\n },\n outputSchema: {\n type: \"object\",\n properties: {\n context: {\n type: \"array\",\n items: { type: \"object\", behavior: [\"llm-content\"] },\n title: \"Context out\",\n },\n },\n },\n };\n}\n", + "metadata": { + "title": "Researcher", + "source": { + "code": "/**\n * @fileoverview Scours the Internet according to your plan.\n */\n\nexport { invoke as default, describe };\n\nasync function invoke({ context }: { context: LLMContent[] }) {\n return { context };\n}\n\nasync function describe() {\n return {\n inputSchema: {\n type: \"object\",\n properties: {\n context: {\n type: \"array\",\n items: { type: \"object\", behavior: [\"llm-content\"] },\n title: \"Context in\",\n },\n plan: {\n type: \"object\",\n behavior: [\"llm-content\", \"config\"],\n title: \"Research Plan\",\n description:\n \"Provide an outline of what to research, what areas to cover, etc.\",\n },\n },\n } satisfies Schema,\n outputSchema: {\n type: \"object\",\n properties: {\n context: {\n type: \"array\",\n items: { type: \"object\", behavior: [\"llm-content\"] },\n title: \"Context out\",\n },\n },\n } satisfies Schema,\n };\n}\n", + "language": "typescript" + }, + "description": "Scours the Internet according to your plan.", + "runnable": true + } + }, + "image-generator": { + "code": "/**\n * @fileoverview Generates an image using supplied context.\n */\nimport gemini, {} from \"./gemini\";\nimport { err, ok } from \"./utils\";\nexport { invoke as default, describe };\nasync function invoke({ context, }) {\n const contents = context && Array.isArray(context) && context.length > 0\n ? [context.at(-1)]\n : undefined;\n if (!contents) {\n return err(\"Must supply context as input\");\n }\n const result = await gemini({\n model: \"gemini-2.0-flash-exp\",\n body: {\n contents,\n generationConfig: {\n responseModalities: [\"TEXT\", \"IMAGE\"],\n },\n },\n });\n if (!ok(result)) {\n return result;\n }\n if (\"context\" in result) {\n return err(\"Invalid output from Gemini -- must be candidates\");\n }\n const content = result.candidates.at(0)?.content;\n if (!content) {\n return err(\"No content\");\n }\n return { context: [content] };\n}\nasync function describe() {\n return {\n inputSchema: {\n type: \"object\",\n properties: {\n context: {\n type: \"array\",\n items: { type: \"object\", behavior: [\"llm-content\"] },\n title: \"Context in\",\n },\n },\n },\n outputSchema: {\n type: \"object\",\n properties: {\n context: {\n type: \"array\",\n items: { type: \"object\", behavior: [\"llm-content\"] },\n title: \"Context out\",\n },\n },\n },\n };\n}\n", + "metadata": { + "title": "Image Generator", + "source": { + "code": "/**\n * @fileoverview Generates an image using supplied context.\n */\n\nimport gemini, { type GeminiOutputs, type GeminiInputs } from \"./gemini\";\nimport { err, ok } from \"./utils\";\n\nexport { invoke as default, describe };\n\nasync function invoke({\n context,\n}: {\n context: LLMContent[];\n}): Promise> {\n const contents =\n context && Array.isArray(context) && context.length > 0\n ? [context.at(-1)!]\n : undefined;\n if (!contents) {\n return err(\"Must supply context as input\");\n }\n const result = await gemini({\n model: \"gemini-2.0-flash-exp\",\n body: {\n contents,\n generationConfig: {\n responseModalities: [\"TEXT\", \"IMAGE\"],\n },\n },\n });\n if (!ok(result)) {\n return result;\n }\n if (\"context\" in result) {\n return err(\"Invalid output from Gemini -- must be candidates\");\n }\n\n const content = result.candidates.at(0)?.content;\n if (!content) {\n return err(\"No content\");\n }\n\n return { context: [content] };\n}\n\nasync function describe() {\n return {\n inputSchema: {\n type: \"object\",\n properties: {\n context: {\n type: \"array\",\n items: { type: \"object\", behavior: [\"llm-content\"] },\n title: \"Context in\",\n },\n },\n } satisfies Schema,\n outputSchema: {\n type: \"object\",\n properties: {\n context: {\n type: \"array\",\n items: { type: \"object\", behavior: [\"llm-content\"] },\n title: \"Context out\",\n },\n },\n } satisfies Schema,\n };\n}\n", + "language": "typescript" + }, + "description": "Generates an image using supplied context.", + "runnable": true + } } }, "exports": [ "#module:gemini", - "#daf082ca-c1aa-4aff-b2c8-abeb984ab66c" + "#daf082ca-c1aa-4aff-b2c8-abeb984ab66c", + "#module:researcher", + "#module:image-generator" ], "graphs": { "daf082ca-c1aa-4aff-b2c8-abeb984ab66c": { - "title": "Agent", - "description": "A blank board. Use it as a starting point for your creations.", + "title": "Text Generator", + "description": "Calls an LLM and so much more. Insert real description here.", "version": "0.0.1", + "describer": "module:entry", "nodes": [ { "type": "output", @@ -193,8 +220,8 @@ }, "metadata": { "visual": { - "x": 337.9921875, - "y": 4.67578125, + "x": 708, + "y": 44, "collapsed": "expanded" } } @@ -204,19 +231,94 @@ "type": "#module:entry", "metadata": { "visual": { - "x": -82.0078125, - "y": -253.32421875, + "x": -47, + "y": -72, "collapsed": "expanded" }, "title": "entry" } + }, + { + "id": "board-d340ad8f", + "type": "#module:agent-main", + "metadata": { + "visual": { + "x": 332, + "y": -6, + "collapsed": "expanded" + }, + "title": "agent-main" + } + }, + { + "id": "board-1946064a", + "type": "#module:join", + "metadata": { + "visual": { + "x": 953, + "y": -248, + "collapsed": "expanded" + }, + "title": "join" + } + }, + { + "type": "input", + "id": "input", + "metadata": { + "visual": { + "x": 713, + "y": 170, + "collapsed": "advanced" + }, + "title": "input" + } + } + ], + "edges": [ + { + "from": "board-f138aa03", + "to": "board-d340ad8f", + "out": "context", + "in": "context" + }, + { + "from": "board-d340ad8f", + "to": "output", + "out": "done", + "in": "context" + }, + { + "from": "input", + "to": "board-1946064a", + "out": "request", + "in": "request" + }, + { + "from": "board-d340ad8f", + "to": "input", + "out": "toInput", + "in": "schema" + }, + { + "from": "board-d340ad8f", + "to": "board-1946064a", + "out": "context", + "in": "context" + }, + { + "from": "board-1946064a", + "to": "board-d340ad8f", + "out": "context", + "in": "context" } ], - "edges": [], "metadata": { "visual": { "minimized": false - } + }, + "describer": "module:entry", + "tags": [] } } } diff --git a/packages/a2/bgl/agent-workbench.bgl.json b/packages/a2/bgl/agent-workbench.bgl.json new file mode 100644 index 0000000000..7860408ff0 --- /dev/null +++ b/packages/a2/bgl/agent-workbench.bgl.json @@ -0,0 +1,209 @@ +{ + "title": "A2 Agent Workbench", + "description": "A blank board. Use it as a starting point for your creations.", + "version": "0.0.1", + "nodes": [ + { + "id": "content-10fef410", + "type": "content", + "metadata": { + "visual": { + "x": -594, + "y": -238, + "collapsed": "expanded" + }, + "title": "Content", + "logLevel": "debug" + }, + "configuration": { + "role": "user", + "template": { + "role": "user", + "parts": [ + { + "text": "The pros and cons of using AIs to generate blog posts" + } + ] + } + } + }, + { + "id": "a2-7e19872d", + "type": "file://bgl/a2.bgl.json#daf082ca-c1aa-4aff-b2c8-abeb984ab66c", + "metadata": { + "visual": { + "x": 177, + "y": -246, + "collapsed": "expanded" + }, + "title": "Outline Writer", + "logLevel": "debug" + }, + "configuration": { + "description": { + "parts": [ + { + "text": "You are a blog post outline writer. Your job is to take topic provided and come up with an outline for a blog post. \n\nThe outline will weave together the following details:\n- The basic overview of the topic\n- Historical perspective, if applicable\n- Current opinions on the topic, if applicable\n- Any controversies that might be surrounding the topic\n- Any future developments around the topic" + } + ], + "role": "user" + } + } + }, + { + "id": "a2-cb7ec1eb", + "type": "file://bgl/a2.bgl.json#module:researcher", + "metadata": { + "visual": { + "x": -202.99999999999994, + "y": -232.00000000000006, + "collapsed": "expanded" + }, + "title": "Researcher", + "logLevel": "debug" + }, + "configuration": { + "plan": { + "role": "user", + "parts": [ + { + "text": "Research the topic for a blog post. A well-done research should include:\n\n- The basic overview of the topic\n- Historical perspective, if applicable\n- Current opinions on the topic, if applicable\n- Any controversies that might be surrounding the topic\n- Any future developments around the topic" + } + ] + } + } + }, + { + "id": "a2-25320551", + "type": "file://bgl/a2.bgl.json#daf082ca-c1aa-4aff-b2c8-abeb984ab66c", + "metadata": { + "visual": { + "x": 711, + "y": -49, + "collapsed": "expanded" + }, + "title": "Post Writer", + "logLevel": "debug" + }, + "configuration": { + "description": { + "parts": [ + { + "text": "You are a blog post writer. Your job is to take the provided outline and all of the raw research collected so far in the conversation context and produce a blog post that incorporates all of the research into one coherent and detailed piece of writing.\n\nThe post will follow the outline and further enrich it, with an interesting story that is based on the raw research, a hypothetical example (if applicable), a couple of different angles to look at the particular aspect of the topic, and a conclusion that also acts as a segue to the next part of the blog. \n\nThe writing style is elegant, with a bit of levity. It comes across as musings of an intellectual who is trying to examine the topic from various angles." + } + ], + "role": "user" + } + } + }, + { + "id": "a2-c317510b", + "type": "file://bgl/a2.bgl.json#daf082ca-c1aa-4aff-b2c8-abeb984ab66c", + "metadata": { + "visual": { + "x": 557.9999999999993, + "y": -359.00000000000273, + "collapsed": "expanded" + }, + "title": "Banner Prompt Writer", + "logLevel": "debug" + }, + "configuration": { + "description": { + "parts": [ + { + "text": "You are an image prompt writer. Given an outline of a blog post, you come up with an wildly abstract art that reflects the ideas in the outline. This art will serve as the heading of the blog post." + } + ], + "role": "user" + } + } + }, + { + "id": "a2-f41834f9", + "type": "file://bgl/a2.bgl.json#module:image-generator", + "metadata": { + "visual": { + "x": 898, + "y": -205, + "collapsed": "expanded" + }, + "title": "Banner Image Generator", + "logLevel": "debug" + }, + "configuration": {} + }, + { + "id": "content-086a7fe8", + "type": "content", + "metadata": { + "visual": { + "x": 1239, + "y": -160.0000000000001, + "collapsed": "expanded" + }, + "title": "Content", + "logLevel": "debug" + }, + "configuration": { + "role": "user", + "template": { + "role": "user", + "parts": [ + { + "text": "{{image}}\n{{post}}" + } + ] + } + } + } + ], + "edges": [ + { + "from": "content-10fef410", + "to": "a2-cb7ec1eb", + "out": "context", + "in": "context" + }, + { + "from": "a2-cb7ec1eb", + "to": "a2-7e19872d", + "out": "context", + "in": "context" + }, + { + "from": "a2-7e19872d", + "to": "a2-25320551", + "out": "context", + "in": "context" + }, + { + "from": "a2-7e19872d", + "to": "a2-c317510b", + "out": "context", + "in": "context" + }, + { + "from": "a2-c317510b", + "to": "a2-f41834f9", + "out": "context", + "in": "context" + }, + { + "from": "a2-f41834f9", + "to": "content-086a7fe8", + "out": "context", + "in": "p-image" + }, + { + "from": "a2-25320551", + "to": "content-086a7fe8", + "out": "context", + "in": "p-post" + } + ], + "metadata": { + "tags": [], + "visual": {} + } +} \ No newline at end of file diff --git a/packages/a2/package.json b/packages/a2/package.json index 70524420da..c065cf52e4 100644 --- a/packages/a2/package.json +++ b/packages/a2/package.json @@ -1,14 +1,14 @@ { "name": "@breadboard-ai/a2", - "publishConfig": { - "registry": "https://wombat-dressing-room.appspot.com" - }, "version": "0.1.0", "description": "Breadboard Components for building AI systems", "exports": { "gemini": "./bgl/a2-gemini.bgl.json" }, "type": "module", + "scripts": { + "push": "tsx scripts/push.ts" + }, "repository": { "directory": "packages/a2", "type": "git", @@ -22,9 +22,12 @@ }, "homepage": "https://github.com/breadboard-ai/breadboard#readme", "devDependencies": { + "@google-cloud/firestore": "^7.11.0", + "@types/node": "^22.0.0", "@typescript-eslint/eslint-plugin": "^7.18.0", "@typescript-eslint/parser": "^7.18.0", - "@types/node": "^22.0.0", + "dotenv": "^16.4.7", + "tsx": "^4.19.2", "typescript": "^5.6.3" } } diff --git a/packages/a2/scripts/push.ts b/packages/a2/scripts/push.ts new file mode 100644 index 0000000000..1d11008a68 --- /dev/null +++ b/packages/a2/scripts/push.ts @@ -0,0 +1,56 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { Firestore } from "@google-cloud/firestore"; +import dotenv from "dotenv"; +import { readdir, readFile } from "fs/promises"; +import { dirname, join } from "path"; +import { fileURLToPath } from "url"; + +dotenv.config(); + +const MODULE_DIR = dirname(fileURLToPath(import.meta.url)); +const ROOT_DIR = join(MODULE_DIR, ".."); +const BGL_DIR = join(ROOT_DIR, "bgl"); +const USER_ID = "shared"; + +const BGL_SUFFIX = ".bgl.json"; + +function whatToPush(entry: string) { + if (!entry.endsWith(BGL_SUFFIX)) return false; + entry = entry.slice(0, -BGL_SUFFIX.length); + if (entry.endsWith("workbench")) return false; + if (entry.endsWith("wip")) return false; + if (entry.endsWith("old")) return false; + if (entry.endsWith("test")) return false; + return true; +} + +async function main() { + const bgls = (await readdir(BGL_DIR)).filter(whatToPush); + + const databaseId = process.env["FIRESTORE_DB_NAME"] || "board-server"; + const database = new Firestore({ databaseId }); + + for (const bgl of bgls) { + const graph = await readFile(join(BGL_DIR, bgl), "utf-8"); + const descriptor = JSON.parse(graph); + const { + title, + metadata: { tags }, + description, + } = descriptor; + console.log(`Pushing ${bgl}: ${title}, ${tags}, ${description}`); + await database.doc(`/workspaces/${USER_ID}/boards/${bgl}`).set({ + graph, + tags, + title, + description, + }); + } +} + +main(); diff --git a/packages/board-server/package.json b/packages/board-server/package.json index d8c5a4ca26..ddfc13359c 100644 --- a/packages/board-server/package.json +++ b/packages/board-server/package.json @@ -209,7 +209,7 @@ "@breadboard-ai/google-drive-kit": "0.4.1", "@breadboard-ai/jsandbox": "0.3.0", "@breadboard-ai/types": "0.4.0", - "@google-cloud/firestore": "^7.10.0", + "@google-cloud/firestore": "^7.11.0", "@google-cloud/secret-manager": "^5.6.0", "@google-cloud/storage": "^7.14.0", "@google-labs/breadboard": "^0.31.0", diff --git a/packages/breadboard/src/handler.ts b/packages/breadboard/src/handler.ts index 5ff967fc26..48ee69ef74 100644 --- a/packages/breadboard/src/handler.ts +++ b/packages/breadboard/src/handler.ts @@ -103,9 +103,10 @@ export async function getGraphHandlerFromMutableGraph( const result = store.addByURL(type, [], { outerGraph: mutable.graph, }); + const latest = await store.getLatest(result.mutable); return new GraphBasedNodeHandler( { - graph: result.mutable.graph, + graph: latest.graph, subGraphId: result.graphId, moduleId: result.moduleId, }, diff --git a/packages/breadboard/src/inspector/graph/describer-manager.ts b/packages/breadboard/src/inspector/graph/describer-manager.ts index 185f98038a..147d1b9e6f 100644 --- a/packages/breadboard/src/inspector/graph/describer-manager.ts +++ b/packages/breadboard/src/inspector/graph/describer-manager.ts @@ -348,7 +348,7 @@ class GraphDescriberManager { if (this.handle.main() === moduleId) { result = await invokeMainDescriber( sandbox, - this.handle.graph(), + this.mutable.graph, inputs, inputSchema, outputSchema @@ -357,7 +357,7 @@ class GraphDescriberManager { result = await invokeDescriber( moduleId, sandbox, - this.handle.graph(), + this.mutable.graph, inputs, inputSchema, outputSchema diff --git a/packages/breadboard/src/inspector/graph/ports.ts b/packages/breadboard/src/inspector/graph/ports.ts index 27f68b0fcf..837b8c4d70 100644 --- a/packages/breadboard/src/inspector/graph/ports.ts +++ b/packages/breadboard/src/inspector/graph/ports.ts @@ -91,6 +91,7 @@ export const collectPorts = ( } return type === EdgeType.In ? edge.in : edge.out; }); + schema ??= {}; const fixed = schema.additionalProperties === false; const schemaPortNames = Object.keys(schema.properties || {}); if (addErrorPort) { @@ -281,7 +282,7 @@ function describerResultToPorts( const incoming = node.incoming(); const outgoing = node.outgoing(); const inputs: InspectablePortList = { - fixed: described.inputSchema.additionalProperties === false, + fixed: described.inputSchema?.additionalProperties === false, ports: collectPorts( EdgeType.In, incoming, @@ -298,7 +299,7 @@ function describerResultToPorts( const addErrorPort = node.descriptor.type !== "input" && node.descriptor.type !== "output"; const outputs: InspectablePortList = { - fixed: described.outputSchema.additionalProperties === false, + fixed: described.outputSchema?.additionalProperties === false, ports: collectPorts( EdgeType.Out, outgoing, diff --git a/packages/breadboard/src/utils/schema-differ.ts b/packages/breadboard/src/utils/schema-differ.ts index d0255b0294..95eaa12542 100644 --- a/packages/breadboard/src/utils/schema-differ.ts +++ b/packages/breadboard/src/utils/schema-differ.ts @@ -36,8 +36,7 @@ class SchemaDiffer implements SchemaDiff { public readonly existing: Schema, public readonly incoming: Schema ) { - this.additionalPropsChanged = - existing.additionalProperties !== incoming.additionalProperties; + this.additionalPropsChanged = this.computeAdditionPropsChanged(); } computeDiff(): void { @@ -63,9 +62,15 @@ class SchemaDiffer implements SchemaDiff { }; } + computeAdditionPropsChanged() { + const incoming = !!this.incoming?.additionalProperties; + const existing = !!this.existing?.additionalProperties; + return incoming !== existing; + } + computeRequiredChanges() { - const existing = this.existing.required || []; - const incoming = this.incoming.required || []; + const existing = this.existing?.required || []; + const incoming = this.incoming?.required || []; const existingSet = new Set(existing); for (const name of incoming) { @@ -79,8 +84,8 @@ class SchemaDiffer implements SchemaDiff { } computePropertyChanges() { - const existing = this.existing.properties || {}; - const incoming = this.incoming.properties || {}; + const existing = this.existing?.properties || {}; + const incoming = this.incoming?.properties || {}; const all = new Set([...Object.keys(existing), ...Object.keys(incoming)]); for (const name of all) { diff --git a/packages/discovery-types/package.json b/packages/discovery-types/package.json index 4aa8a879e4..7e410ecb11 100644 --- a/packages/discovery-types/package.json +++ b/packages/discovery-types/package.json @@ -105,7 +105,7 @@ "typescript": "^5.6.3" }, "dependencies": { - "dotenv": "^16.4.5", + "dotenv": "^16.4.7", "prettier": "^3.4.1" } } diff --git a/packages/example-board-server/package.json b/packages/example-board-server/package.json index d04f88a9d7..a37b93be77 100644 --- a/packages/example-board-server/package.json +++ b/packages/example-board-server/package.json @@ -81,7 +81,7 @@ "@typescript-eslint/eslint-plugin": "^7.18.0", "@typescript-eslint/parser": "^7.18.0", "ava": "^5.2.0", - "dotenv": "^16.4.5", + "dotenv": "^16.4.7", "tsx": "^4.19.2", "typescript": "^5.6.3", "vite": "^6.0.2", diff --git a/packages/filesystem-board-server/package.json b/packages/filesystem-board-server/package.json index 71633fd77a..984e057acc 100644 --- a/packages/filesystem-board-server/package.json +++ b/packages/filesystem-board-server/package.json @@ -79,7 +79,7 @@ "@typescript-eslint/eslint-plugin": "^7.18.0", "@typescript-eslint/parser": "^7.18.0", "ava": "^5.2.0", - "dotenv": "^16.4.5", + "dotenv": "^16.4.7", "tsx": "^4.19.2", "typescript": "^5.6.3", "vite": "^6.0.2", diff --git a/packages/idb-board-server/package.json b/packages/idb-board-server/package.json index 8301236af3..771b14f075 100644 --- a/packages/idb-board-server/package.json +++ b/packages/idb-board-server/package.json @@ -79,7 +79,7 @@ "@typescript-eslint/eslint-plugin": "^7.18.0", "@typescript-eslint/parser": "^7.18.0", "ava": "^5.2.0", - "dotenv": "^16.4.5", + "dotenv": "^16.4.7", "tsx": "^4.19.2", "typescript": "^5.6.3", "vite": "^6.0.2", diff --git a/packages/manifest/bbm.schema.json b/packages/manifest/bbm.schema.json index 6b64a5557c..3394890980 100644 --- a/packages/manifest/bbm.schema.json +++ b/packages/manifest/bbm.schema.json @@ -170,6 +170,10 @@ "$ref": "#/definitions/InputValues", "description": "Arguments that are passed to the graph, useful to bind values to graphs." }, + "describer": { + "description": "The URL of the graph that will act as the describer for this graph. Can be a relative URL and refer to a sub-graph within this graph.\n\nThe describers in the format of \"module:name\" will be interpreted as \"use the `describe` export of the module named `name` to describe this graph\".", + "type": "string" + }, "description": { "description": "The description of the graph.", "type": "string" diff --git a/packages/mod-playground/package.json b/packages/mod-playground/package.json index 913c4ab7e9..a8c3c02701 100644 --- a/packages/mod-playground/package.json +++ b/packages/mod-playground/package.json @@ -131,7 +131,7 @@ "devDependencies": { "@esbuild-plugins/node-globals-polyfill": "^0.2.3", "@google-labs/tsconfig": "^0.0.1", - "dotenv": "^16.4.5", + "dotenv": "^16.4.7", "typescript": "^5.6.3", "vite": "^6.0.2", "vite-plugin-full-reload": "^1.2.0", diff --git a/packages/remote-board-server/package.json b/packages/remote-board-server/package.json index 5beec8c848..bffd634646 100644 --- a/packages/remote-board-server/package.json +++ b/packages/remote-board-server/package.json @@ -79,7 +79,7 @@ "@typescript-eslint/eslint-plugin": "^7.18.0", "@typescript-eslint/parser": "^7.18.0", "ava": "^5.2.0", - "dotenv": "^16.4.5", + "dotenv": "^16.4.7", "tsx": "^4.19.2", "typescript": "^5.6.3", "vite": "^6.0.2", diff --git a/packages/schema/breadboard.schema.json b/packages/schema/breadboard.schema.json index fcb4c8097b..04af885f59 100644 --- a/packages/schema/breadboard.schema.json +++ b/packages/schema/breadboard.schema.json @@ -86,6 +86,10 @@ "type": "string", "description": "Version of the graph. [semver](https://semver.org/) format is encouraged." }, + "describer": { + "type": "string", + "description": "The URL of the graph that will act as the describer for this graph. Can be a relative URL and refer to a sub-graph within this graph.\n\nThe describers in the format of \"module:name\" will be interpreted as \"use the `describe` export of the module named `name` to describe this graph\"." + }, "metadata": { "$ref": "#/definitions/GraphMetadata", "description": "Metadata associated with the graph." diff --git a/packages/shared-ui/package.json b/packages/shared-ui/package.json index e3e77bc23d..2ee5c180f5 100644 --- a/packages/shared-ui/package.json +++ b/packages/shared-ui/package.json @@ -103,7 +103,7 @@ "@typescript-eslint/eslint-plugin": "^7.18.0", "@typescript-eslint/parser": "^7.18.0", "ava": "^5.2.0", - "dotenv": "^16.4.5", + "dotenv": "^16.4.7", "tsx": "^4.19.2", "typescript": "^5.6.3", "vite": "^6.0.2", diff --git a/packages/types/src/graph-descriptor.ts b/packages/types/src/graph-descriptor.ts index 0bc90095d1..257eb41fbf 100644 --- a/packages/types/src/graph-descriptor.ts +++ b/packages/types/src/graph-descriptor.ts @@ -231,6 +231,16 @@ export type GraphInlineMetadata = { * [semver](https://semver.org/) format is encouraged. */ version?: string; + /** + * The URL of the graph that will act as the describer for + * this graph. Can be a relative URL and refer to a sub-graph + * within this graph. + * + * The describers in the format of "module:name" will be interpreted as + * "use the `describe` export of the module named `name` to describe this + * graph". + */ + describer?: string; }; /** diff --git a/packages/visual-editor/package.json b/packages/visual-editor/package.json index 913cebdc33..f66ba92b8c 100644 --- a/packages/visual-editor/package.json +++ b/packages/visual-editor/package.json @@ -315,7 +315,7 @@ "@typescript-eslint/eslint-plugin": "^7.18.0", "@typescript-eslint/parser": "^7.18.0", "ava": "^5.2.0", - "dotenv": "^16.4.5", + "dotenv": "^16.4.7", "tsx": "^4.19.2", "typescript": "^5.6.3", "vite": "^6.0.2", diff --git a/packages/visual-editor/src/index.ts b/packages/visual-editor/src/index.ts index fb2c448bd8..b30799b097 100644 --- a/packages/visual-editor/src/index.ts +++ b/packages/visual-editor/src/index.ts @@ -1634,8 +1634,7 @@ export class Main extends LitElement { evt.isTool, evt.isComponent ); - } - if (evt.moduleId) { + } else if (evt.moduleId) { await this.#runtime.edit.updateModuleInfo( tab, evt.moduleId,