From 89a87ef6c9a57d5a7039167007e74322bd6f3230 Mon Sep 17 00:00:00 2001 From: Dimitri Glazkov Date: Tue, 14 Jan 2025 16:05:22 -0800 Subject: [PATCH] Word-smithing. --- packages/a2/bgl/a2.bgl.json | 10 +++++----- packages/a2/bgl/agent-workbench.bgl.json | 18 +++++++++--------- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/packages/a2/bgl/a2.bgl.json b/packages/a2/bgl/a2.bgl.json index 5847cb9cb8..8d13e5583a 100644 --- a/packages/a2/bgl/a2.bgl.json +++ b/packages/a2/bgl/a2.bgl.json @@ -63,11 +63,11 @@ } }, "entry": { - "code": "/**\n * @fileoverview Manages the entry point: describer, passing the inputs, etc.\n */\nimport {} from \"./common\";\nimport { toLLMContent, defaultLLMContent } from \"./utils\";\nexport { invoke as default, describe };\nasync function invoke({ context, chat, defaultModel, description, type = \"work\", }) {\n // Make sure it's a boolean.\n chat = !!chat;\n context ??= [];\n return {\n context: {\n id: Math.random().toString(36).substring(2, 5),\n chat,\n context,\n userInputs: [],\n defaultModel,\n model: \"\",\n description,\n tools: [],\n type,\n work: [],\n },\n };\n}\nasync function describe() {\n return {\n inputSchema: {\n type: \"object\",\n properties: {\n description: {\n type: \"object\",\n behavior: [\"llm-content\", \"config\"],\n title: \"Job Description\",\n description: \"A detailed list of skills and capabilities of this agent.\",\n default: defaultLLMContent(),\n },\n context: {\n type: \"array\",\n items: { type: \"object\", behavior: [\"llm-content\"] },\n title: \"Context in\",\n },\n \"p-chat\": {\n type: \"boolean\",\n title: \"Chat with User\",\n behavior: [\"config\"],\n description: \"When checked, the agent will talk with the user, asking to review work, requesting additional information, etc.\",\n },\n \"p-critique\": {\n type: \"boolean\",\n title: \"Self-critique\",\n behavior: [\"config\"],\n description: \"When checked, the agent will critique itself to ensure the best quality output, in exchange for taking a little bit more time.\",\n },\n },\n },\n outputSchema: {\n type: \"object\",\n properties: {\n context: {\n type: \"array\",\n items: { type: \"object\", behavior: [\"llm-content\"] },\n title: \"Context out\",\n },\n },\n },\n };\n}\n", + "code": "/**\n * @fileoverview Manages the entry point: describer, passing the inputs, etc.\n */\nimport {} from \"./common\";\nimport { toLLMContent, defaultLLMContent } from \"./utils\";\nexport { invoke as default, describe };\nasync function invoke({ context, chat, defaultModel, description, type = \"work\", }) {\n // Make sure it's a boolean.\n chat = !!chat;\n context ??= [];\n return {\n context: {\n id: Math.random().toString(36).substring(2, 5),\n chat,\n context,\n userInputs: [],\n defaultModel,\n model: \"\",\n description,\n tools: [],\n type,\n work: [],\n },\n };\n}\nasync function describe() {\n return {\n inputSchema: {\n type: \"object\",\n properties: {\n description: {\n type: \"object\",\n behavior: [\"llm-content\", \"config\"],\n title: \"Instruction\",\n description: \"Give the model additional context on what to do, like specific rules/guidelines to adhere to or specify behavior separate from the provided context.\",\n default: defaultLLMContent(),\n },\n context: {\n type: \"array\",\n items: { type: \"object\", behavior: [\"llm-content\"] },\n title: \"Context in\",\n },\n \"p-chat\": {\n type: \"boolean\",\n title: \"Chat with User\",\n behavior: [\"config\"],\n description: \"When checked, the model will chat with the user, asking to review work, requesting additional information, etc.\",\n },\n \"p-critique\": {\n type: \"boolean\",\n title: \"Self-critique\",\n behavior: [\"config\"],\n description: \"When checked, the model will critique its output to to improve quality in exchange for taking a little bit more time.\",\n },\n },\n },\n outputSchema: {\n type: \"object\",\n properties: {\n context: {\n type: \"array\",\n items: { type: \"object\", behavior: [\"llm-content\"] },\n title: \"Context out\",\n },\n },\n },\n };\n}\n", "metadata": { "title": "entry", "source": { - "code": "/**\n * @fileoverview Manages the entry point: describer, passing the inputs, etc.\n */\n\nimport { type AgentContext, type AgentInputs } from \"./common\";\nimport { toLLMContent, defaultLLMContent } from \"./utils\";\n\nexport { invoke as default, describe };\n\ntype Outputs = {\n context: AgentContext;\n};\n\nasync function invoke({\n context,\n chat,\n defaultModel,\n description,\n type = \"work\",\n}: AgentInputs): Promise {\n // Make sure it's a boolean.\n chat = !!chat;\n context ??= [];\n return {\n context: {\n id: Math.random().toString(36).substring(2, 5),\n chat,\n context,\n userInputs: [],\n defaultModel,\n model: \"\",\n description,\n tools: [],\n type,\n work: [],\n },\n };\n}\n\nasync function describe() {\n return {\n inputSchema: {\n type: \"object\",\n properties: {\n description: {\n type: \"object\",\n behavior: [\"llm-content\", \"config\"],\n title: \"Job Description\",\n description:\n \"A detailed list of skills and capabilities of this agent.\",\n default: defaultLLMContent(),\n },\n context: {\n type: \"array\",\n items: { type: \"object\", behavior: [\"llm-content\"] },\n title: \"Context in\",\n },\n \"p-chat\": {\n type: \"boolean\",\n title: \"Chat with User\",\n behavior: [\"config\"],\n description:\n \"When checked, the agent will talk with the user, asking to review work, requesting additional information, etc.\",\n },\n \"p-critique\": {\n type: \"boolean\",\n title: \"Self-critique\",\n behavior: [\"config\"],\n description:\n \"When checked, the agent will critique itself to ensure the best quality output, in exchange for taking a little bit more time.\",\n },\n },\n } satisfies Schema,\n outputSchema: {\n type: \"object\",\n properties: {\n context: {\n type: \"array\",\n items: { type: \"object\", behavior: [\"llm-content\"] },\n title: \"Context out\",\n },\n },\n } satisfies Schema,\n };\n}\n", + "code": "/**\n * @fileoverview Manages the entry point: describer, passing the inputs, etc.\n */\n\nimport { type AgentContext, type AgentInputs } from \"./common\";\nimport { toLLMContent, defaultLLMContent } from \"./utils\";\n\nexport { invoke as default, describe };\n\ntype Outputs = {\n context: AgentContext;\n};\n\nasync function invoke({\n context,\n chat,\n defaultModel,\n description,\n type = \"work\",\n}: AgentInputs): Promise {\n // Make sure it's a boolean.\n chat = !!chat;\n context ??= [];\n return {\n context: {\n id: Math.random().toString(36).substring(2, 5),\n chat,\n context,\n userInputs: [],\n defaultModel,\n model: \"\",\n description,\n tools: [],\n type,\n work: [],\n },\n };\n}\n\nasync function describe() {\n return {\n inputSchema: {\n type: \"object\",\n properties: {\n description: {\n type: \"object\",\n behavior: [\"llm-content\", \"config\"],\n title: \"Instruction\",\n description:\n \"Give the model additional context on what to do, like specific rules/guidelines to adhere to or specify behavior separate from the provided context.\",\n default: defaultLLMContent(),\n },\n context: {\n type: \"array\",\n items: { type: \"object\", behavior: [\"llm-content\"] },\n title: \"Context in\",\n },\n \"p-chat\": {\n type: \"boolean\",\n title: \"Chat with User\",\n behavior: [\"config\"],\n description:\n \"When checked, the model will chat with the user, asking to review work, requesting additional information, etc.\",\n },\n \"p-critique\": {\n type: \"boolean\",\n title: \"Self-critique\",\n behavior: [\"config\"],\n description:\n \"When checked, the model will critique its output to to improve quality in exchange for taking a little bit more time.\",\n },\n },\n } satisfies Schema,\n outputSchema: {\n type: \"object\",\n properties: {\n context: {\n type: \"array\",\n items: { type: \"object\", behavior: [\"llm-content\"] },\n title: \"Context out\",\n },\n },\n } satisfies Schema,\n };\n}\n", "language": "typescript" }, "description": "Manages the entry point: describer, passing the inputs, etc.", @@ -159,14 +159,14 @@ } }, "researcher": { - "code": "/**\n * @fileoverview Add a description for your module here.\n */\nexport { invoke as default, describe };\nasync function invoke({ context }) {\n return { context };\n}\nasync function describe() {\n return {\n inputSchema: {\n type: \"object\",\n properties: {\n context: {\n type: \"array\",\n items: { type: \"object\", behavior: [\"llm-content\"] },\n title: \"Context in\",\n },\n plan: {\n type: \"object\",\n behavior: [\"llm-content\", \"config\"],\n title: \"Research Plan\",\n description: \"Provide an outline of what to research, what areas to cover, etc.\",\n },\n },\n },\n outputSchema: {\n type: \"object\",\n properties: {\n context: {\n type: \"array\",\n items: { type: \"object\", behavior: [\"llm-content\"] },\n title: \"Context out\",\n },\n },\n },\n };\n}\n", + "code": "/**\n * @fileoverview Scours the Internet according to your plan.\n */\nexport { invoke as default, describe };\nasync function invoke({ context }) {\n return { context };\n}\nasync function describe() {\n return {\n inputSchema: {\n type: \"object\",\n properties: {\n context: {\n type: \"array\",\n items: { type: \"object\", behavior: [\"llm-content\"] },\n title: \"Context in\",\n },\n plan: {\n type: \"object\",\n behavior: [\"llm-content\", \"config\"],\n title: \"Research Plan\",\n description: \"Provide an outline of what to research, what areas to cover, etc.\",\n },\n },\n },\n outputSchema: {\n type: \"object\",\n properties: {\n context: {\n type: \"array\",\n items: { type: \"object\", behavior: [\"llm-content\"] },\n title: \"Context out\",\n },\n },\n },\n };\n}\n", "metadata": { "title": "Researcher", "source": { - "code": "/**\n * @fileoverview Add a description for your module here.\n */\n\nexport { invoke as default, describe };\n\nasync function invoke({ context }: { context: LLMContent[] }) {\n return { context };\n}\n\nasync function describe() {\n return {\n inputSchema: {\n type: \"object\",\n properties: {\n context: {\n type: \"array\",\n items: { type: \"object\", behavior: [\"llm-content\"] },\n title: \"Context in\",\n },\n plan: {\n type: \"object\",\n behavior: [\"llm-content\", \"config\"],\n title: \"Research Plan\",\n description:\n \"Provide an outline of what to research, what areas to cover, etc.\",\n },\n },\n } satisfies Schema,\n outputSchema: {\n type: \"object\",\n properties: {\n context: {\n type: \"array\",\n items: { type: \"object\", behavior: [\"llm-content\"] },\n title: \"Context out\",\n },\n },\n } satisfies Schema,\n };\n}\n", + "code": "/**\n * @fileoverview Scours the Internet according to your plan.\n */\n\nexport { invoke as default, describe };\n\nasync function invoke({ context }: { context: LLMContent[] }) {\n return { context };\n}\n\nasync function describe() {\n return {\n inputSchema: {\n type: \"object\",\n properties: {\n context: {\n type: \"array\",\n items: { type: \"object\", behavior: [\"llm-content\"] },\n title: \"Context in\",\n },\n plan: {\n type: \"object\",\n behavior: [\"llm-content\", \"config\"],\n title: \"Research Plan\",\n description:\n \"Provide an outline of what to research, what areas to cover, etc.\",\n },\n },\n } satisfies Schema,\n outputSchema: {\n type: \"object\",\n properties: {\n context: {\n type: \"array\",\n items: { type: \"object\", behavior: [\"llm-content\"] },\n title: \"Context out\",\n },\n },\n } satisfies Schema,\n };\n}\n", "language": "typescript" }, - "description": "Add a description for your module here.", + "description": "Scours the Internet according to your plan.", "runnable": true } }, diff --git a/packages/a2/bgl/agent-workbench.bgl.json b/packages/a2/bgl/agent-workbench.bgl.json index f279fdb374..1e94f979d5 100644 --- a/packages/a2/bgl/agent-workbench.bgl.json +++ b/packages/a2/bgl/agent-workbench.bgl.json @@ -8,8 +8,8 @@ "type": "content", "metadata": { "visual": { - "x": -642.9999999999999, - "y": -185.00000000000045, + "x": -642.9999999999998, + "y": -185.0000000000009, "collapsed": "expanded" }, "title": "Content", @@ -32,8 +32,8 @@ "type": "file://bgl/a2.bgl.json#daf082ca-c1aa-4aff-b2c8-abeb984ab66c", "metadata": { "visual": { - "x": 135, - "y": -180.00000000000003, + "x": 135.00000000000023, + "y": -180, "collapsed": "expanded" }, "title": "Outline Writer", @@ -56,7 +56,7 @@ "metadata": { "visual": { "x": -255, - "y": -166.00000000000048, + "y": -166.0000000000009, "collapsed": "expanded" }, "title": "Researcher", @@ -102,7 +102,7 @@ "metadata": { "visual": { "x": 557.9999999999995, - "y": -359.0000000000023, + "y": -359.00000000000273, "collapsed": "expanded" }, "title": "Banner Prompt Writer", @@ -124,8 +124,8 @@ "type": "file://bgl/a2.bgl.json#module:image-generator", "metadata": { "visual": { - "x": 933.9999999999997, - "y": -218.00000000000185, + "x": 933.9999999999998, + "y": -218.00000000000182, "collapsed": "expanded" }, "title": "Banner Image Generator", @@ -138,7 +138,7 @@ "type": "content", "metadata": { "visual": { - "x": 1364.0000000000005, + "x": 1364.0000000000007, "y": -236.00000000000182, "collapsed": "expanded" },