Skip to content

Commit

Permalink
Word-smithing.
Browse files Browse the repository at this point in the history
  • Loading branch information
dglazkov committed Jan 15, 2025
1 parent c8f5327 commit 89a87ef
Show file tree
Hide file tree
Showing 2 changed files with 14 additions and 14 deletions.
10 changes: 5 additions & 5 deletions packages/a2/bgl/a2.bgl.json
Original file line number Diff line number Diff line change
Expand Up @@ -63,11 +63,11 @@
}
},
"entry": {
"code": "/**\n * @fileoverview Manages the entry point: describer, passing the inputs, etc.\n */\nimport {} from \"./common\";\nimport { toLLMContent, defaultLLMContent } from \"./utils\";\nexport { invoke as default, describe };\nasync function invoke({ context, chat, defaultModel, description, type = \"work\", }) {\n // Make sure it's a boolean.\n chat = !!chat;\n context ??= [];\n return {\n context: {\n id: Math.random().toString(36).substring(2, 5),\n chat,\n context,\n userInputs: [],\n defaultModel,\n model: \"\",\n description,\n tools: [],\n type,\n work: [],\n },\n };\n}\nasync function describe() {\n return {\n inputSchema: {\n type: \"object\",\n properties: {\n description: {\n type: \"object\",\n behavior: [\"llm-content\", \"config\"],\n title: \"Job Description\",\n description: \"A detailed list of skills and capabilities of this agent.\",\n default: defaultLLMContent(),\n },\n context: {\n type: \"array\",\n items: { type: \"object\", behavior: [\"llm-content\"] },\n title: \"Context in\",\n },\n \"p-chat\": {\n type: \"boolean\",\n title: \"Chat with User\",\n behavior: [\"config\"],\n description: \"When checked, the agent will talk with the user, asking to review work, requesting additional information, etc.\",\n },\n \"p-critique\": {\n type: \"boolean\",\n title: \"Self-critique\",\n behavior: [\"config\"],\n description: \"When checked, the agent will critique itself to ensure the best quality output, in exchange for taking a little bit more time.\",\n },\n },\n },\n outputSchema: {\n type: \"object\",\n properties: {\n context: {\n type: \"array\",\n items: { type: \"object\", behavior: [\"llm-content\"] },\n title: \"Context out\",\n },\n },\n },\n };\n}\n",
"code": "/**\n * @fileoverview Manages the entry point: describer, passing the inputs, etc.\n */\nimport {} from \"./common\";\nimport { toLLMContent, defaultLLMContent } from \"./utils\";\nexport { invoke as default, describe };\nasync function invoke({ context, chat, defaultModel, description, type = \"work\", }) {\n // Make sure it's a boolean.\n chat = !!chat;\n context ??= [];\n return {\n context: {\n id: Math.random().toString(36).substring(2, 5),\n chat,\n context,\n userInputs: [],\n defaultModel,\n model: \"\",\n description,\n tools: [],\n type,\n work: [],\n },\n };\n}\nasync function describe() {\n return {\n inputSchema: {\n type: \"object\",\n properties: {\n description: {\n type: \"object\",\n behavior: [\"llm-content\", \"config\"],\n title: \"Instruction\",\n description: \"Give the model additional context on what to do, like specific rules/guidelines to adhere to or specify behavior separate from the provided context.\",\n default: defaultLLMContent(),\n },\n context: {\n type: \"array\",\n items: { type: \"object\", behavior: [\"llm-content\"] },\n title: \"Context in\",\n },\n \"p-chat\": {\n type: \"boolean\",\n title: \"Chat with User\",\n behavior: [\"config\"],\n description: \"When checked, the model will chat with the user, asking to review work, requesting additional information, etc.\",\n },\n \"p-critique\": {\n type: \"boolean\",\n title: \"Self-critique\",\n behavior: [\"config\"],\n description: \"When checked, the model will critique its output to to improve quality in exchange for taking a little bit more time.\",\n },\n },\n },\n outputSchema: {\n type: \"object\",\n properties: {\n context: {\n type: \"array\",\n items: { type: \"object\", behavior: [\"llm-content\"] },\n title: \"Context out\",\n },\n },\n },\n };\n}\n",
"metadata": {
"title": "entry",
"source": {
"code": "/**\n * @fileoverview Manages the entry point: describer, passing the inputs, etc.\n */\n\nimport { type AgentContext, type AgentInputs } from \"./common\";\nimport { toLLMContent, defaultLLMContent } from \"./utils\";\n\nexport { invoke as default, describe };\n\ntype Outputs = {\n context: AgentContext;\n};\n\nasync function invoke({\n context,\n chat,\n defaultModel,\n description,\n type = \"work\",\n}: AgentInputs): Promise<Outputs> {\n // Make sure it's a boolean.\n chat = !!chat;\n context ??= [];\n return {\n context: {\n id: Math.random().toString(36).substring(2, 5),\n chat,\n context,\n userInputs: [],\n defaultModel,\n model: \"\",\n description,\n tools: [],\n type,\n work: [],\n },\n };\n}\n\nasync function describe() {\n return {\n inputSchema: {\n type: \"object\",\n properties: {\n description: {\n type: \"object\",\n behavior: [\"llm-content\", \"config\"],\n title: \"Job Description\",\n description:\n \"A detailed list of skills and capabilities of this agent.\",\n default: defaultLLMContent(),\n },\n context: {\n type: \"array\",\n items: { type: \"object\", behavior: [\"llm-content\"] },\n title: \"Context in\",\n },\n \"p-chat\": {\n type: \"boolean\",\n title: \"Chat with User\",\n behavior: [\"config\"],\n description:\n \"When checked, the agent will talk with the user, asking to review work, requesting additional information, etc.\",\n },\n \"p-critique\": {\n type: \"boolean\",\n title: \"Self-critique\",\n behavior: [\"config\"],\n description:\n \"When checked, the agent will critique itself to ensure the best quality output, in exchange for taking a little bit more time.\",\n },\n },\n } satisfies Schema,\n outputSchema: {\n type: \"object\",\n properties: {\n context: {\n type: \"array\",\n items: { type: \"object\", behavior: [\"llm-content\"] },\n title: \"Context out\",\n },\n },\n } satisfies Schema,\n };\n}\n",
"code": "/**\n * @fileoverview Manages the entry point: describer, passing the inputs, etc.\n */\n\nimport { type AgentContext, type AgentInputs } from \"./common\";\nimport { toLLMContent, defaultLLMContent } from \"./utils\";\n\nexport { invoke as default, describe };\n\ntype Outputs = {\n context: AgentContext;\n};\n\nasync function invoke({\n context,\n chat,\n defaultModel,\n description,\n type = \"work\",\n}: AgentInputs): Promise<Outputs> {\n // Make sure it's a boolean.\n chat = !!chat;\n context ??= [];\n return {\n context: {\n id: Math.random().toString(36).substring(2, 5),\n chat,\n context,\n userInputs: [],\n defaultModel,\n model: \"\",\n description,\n tools: [],\n type,\n work: [],\n },\n };\n}\n\nasync function describe() {\n return {\n inputSchema: {\n type: \"object\",\n properties: {\n description: {\n type: \"object\",\n behavior: [\"llm-content\", \"config\"],\n title: \"Instruction\",\n description:\n \"Give the model additional context on what to do, like specific rules/guidelines to adhere to or specify behavior separate from the provided context.\",\n default: defaultLLMContent(),\n },\n context: {\n type: \"array\",\n items: { type: \"object\", behavior: [\"llm-content\"] },\n title: \"Context in\",\n },\n \"p-chat\": {\n type: \"boolean\",\n title: \"Chat with User\",\n behavior: [\"config\"],\n description:\n \"When checked, the model will chat with the user, asking to review work, requesting additional information, etc.\",\n },\n \"p-critique\": {\n type: \"boolean\",\n title: \"Self-critique\",\n behavior: [\"config\"],\n description:\n \"When checked, the model will critique its output to to improve quality in exchange for taking a little bit more time.\",\n },\n },\n } satisfies Schema,\n outputSchema: {\n type: \"object\",\n properties: {\n context: {\n type: \"array\",\n items: { type: \"object\", behavior: [\"llm-content\"] },\n title: \"Context out\",\n },\n },\n } satisfies Schema,\n };\n}\n",
"language": "typescript"
},
"description": "Manages the entry point: describer, passing the inputs, etc.",
Expand Down Expand Up @@ -159,14 +159,14 @@
}
},
"researcher": {
"code": "/**\n * @fileoverview Add a description for your module here.\n */\nexport { invoke as default, describe };\nasync function invoke({ context }) {\n return { context };\n}\nasync function describe() {\n return {\n inputSchema: {\n type: \"object\",\n properties: {\n context: {\n type: \"array\",\n items: { type: \"object\", behavior: [\"llm-content\"] },\n title: \"Context in\",\n },\n plan: {\n type: \"object\",\n behavior: [\"llm-content\", \"config\"],\n title: \"Research Plan\",\n description: \"Provide an outline of what to research, what areas to cover, etc.\",\n },\n },\n },\n outputSchema: {\n type: \"object\",\n properties: {\n context: {\n type: \"array\",\n items: { type: \"object\", behavior: [\"llm-content\"] },\n title: \"Context out\",\n },\n },\n },\n };\n}\n",
"code": "/**\n * @fileoverview Scours the Internet according to your plan.\n */\nexport { invoke as default, describe };\nasync function invoke({ context }) {\n return { context };\n}\nasync function describe() {\n return {\n inputSchema: {\n type: \"object\",\n properties: {\n context: {\n type: \"array\",\n items: { type: \"object\", behavior: [\"llm-content\"] },\n title: \"Context in\",\n },\n plan: {\n type: \"object\",\n behavior: [\"llm-content\", \"config\"],\n title: \"Research Plan\",\n description: \"Provide an outline of what to research, what areas to cover, etc.\",\n },\n },\n },\n outputSchema: {\n type: \"object\",\n properties: {\n context: {\n type: \"array\",\n items: { type: \"object\", behavior: [\"llm-content\"] },\n title: \"Context out\",\n },\n },\n },\n };\n}\n",
"metadata": {
"title": "Researcher",
"source": {
"code": "/**\n * @fileoverview Add a description for your module here.\n */\n\nexport { invoke as default, describe };\n\nasync function invoke({ context }: { context: LLMContent[] }) {\n return { context };\n}\n\nasync function describe() {\n return {\n inputSchema: {\n type: \"object\",\n properties: {\n context: {\n type: \"array\",\n items: { type: \"object\", behavior: [\"llm-content\"] },\n title: \"Context in\",\n },\n plan: {\n type: \"object\",\n behavior: [\"llm-content\", \"config\"],\n title: \"Research Plan\",\n description:\n \"Provide an outline of what to research, what areas to cover, etc.\",\n },\n },\n } satisfies Schema,\n outputSchema: {\n type: \"object\",\n properties: {\n context: {\n type: \"array\",\n items: { type: \"object\", behavior: [\"llm-content\"] },\n title: \"Context out\",\n },\n },\n } satisfies Schema,\n };\n}\n",
"code": "/**\n * @fileoverview Scours the Internet according to your plan.\n */\n\nexport { invoke as default, describe };\n\nasync function invoke({ context }: { context: LLMContent[] }) {\n return { context };\n}\n\nasync function describe() {\n return {\n inputSchema: {\n type: \"object\",\n properties: {\n context: {\n type: \"array\",\n items: { type: \"object\", behavior: [\"llm-content\"] },\n title: \"Context in\",\n },\n plan: {\n type: \"object\",\n behavior: [\"llm-content\", \"config\"],\n title: \"Research Plan\",\n description:\n \"Provide an outline of what to research, what areas to cover, etc.\",\n },\n },\n } satisfies Schema,\n outputSchema: {\n type: \"object\",\n properties: {\n context: {\n type: \"array\",\n items: { type: \"object\", behavior: [\"llm-content\"] },\n title: \"Context out\",\n },\n },\n } satisfies Schema,\n };\n}\n",
"language": "typescript"
},
"description": "Add a description for your module here.",
"description": "Scours the Internet according to your plan.",
"runnable": true
}
},
Expand Down
18 changes: 9 additions & 9 deletions packages/a2/bgl/agent-workbench.bgl.json
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,8 @@
"type": "content",
"metadata": {
"visual": {
"x": -642.9999999999999,
"y": -185.00000000000045,
"x": -642.9999999999998,
"y": -185.0000000000009,
"collapsed": "expanded"
},
"title": "Content",
Expand All @@ -32,8 +32,8 @@
"type": "file://bgl/a2.bgl.json#daf082ca-c1aa-4aff-b2c8-abeb984ab66c",
"metadata": {
"visual": {
"x": 135,
"y": -180.00000000000003,
"x": 135.00000000000023,
"y": -180,
"collapsed": "expanded"
},
"title": "Outline Writer",
Expand All @@ -56,7 +56,7 @@
"metadata": {
"visual": {
"x": -255,
"y": -166.00000000000048,
"y": -166.0000000000009,
"collapsed": "expanded"
},
"title": "Researcher",
Expand Down Expand Up @@ -102,7 +102,7 @@
"metadata": {
"visual": {
"x": 557.9999999999995,
"y": -359.0000000000023,
"y": -359.00000000000273,
"collapsed": "expanded"
},
"title": "Banner Prompt Writer",
Expand All @@ -124,8 +124,8 @@
"type": "file://bgl/a2.bgl.json#module:image-generator",
"metadata": {
"visual": {
"x": 933.9999999999997,
"y": -218.00000000000185,
"x": 933.9999999999998,
"y": -218.00000000000182,
"collapsed": "expanded"
},
"title": "Banner Image Generator",
Expand All @@ -138,7 +138,7 @@
"type": "content",
"metadata": {
"visual": {
"x": 1364.0000000000005,
"x": 1364.0000000000007,
"y": -236.00000000000182,
"collapsed": "expanded"
},
Expand Down

0 comments on commit 89a87ef

Please sign in to comment.