orchestrated-discussions/smarttools/discussion-performance/config.yaml

127 lines
6.4 KiB
YAML

name: discussion-performance
description: Performance optimization specialist participant for discussions
category: Discussion
meta:
display_name: AI-Performance
alias: performance
type: voting
expertise:
- Performance profiling
- Algorithm optimization
- Caching strategies
- Database optimization
- Memory management
- Load testing
- Scalability patterns
concerns:
- What's the time complexity?
- Where are the bottlenecks?
- How does this scale under load?
- Are we using resources efficiently?
voice: en-US-Neural2-J
provider: opencode-reasoner
color:
- 255
- 220
- 100
arguments:
- flag: --callout
variable: callout
default: ''
description: Specific question or @mention context
- flag: --templates-dir
variable: templates_dir
default: templates
description: Path to templates directory
- flag: --diagrams-dir
variable: diagrams_dir
default: diagrams
description: Path to save diagrams
- flag: --log-file
variable: log_file
default: ''
description: Path to log file for progress updates
steps:
- type: code
code: "import re\nimport os\n\nphase_match = re.search(r'<!--\\s*Phase:\\s*(\\w+)\\\
s*-->', input, re.IGNORECASE)\ntemplate_match = re.search(r'<!--\\s*Template:\\\
s*(\\w+)\\s*-->', input, re.IGNORECASE)\n\ncurrent_phase = phase_match.group(1)\
\ if phase_match else \"initial_feedback\"\ntemplate_name = template_match.group(1)\
\ if template_match else \"feature\"\n\ntemplate_path = os.path.join(templates_dir,\
\ template_name + \".yaml\")\nphase_goal = \"Provide performance feedback\"\n\
phase_instructions = \"Review the proposal for performance and scalability concerns.\"\
\n\nif os.path.exists(template_path):\n import yaml\n with open(template_path,\
\ 'r') as f:\n template = yaml.safe_load(f)\n phases = template.get(\"\
phases\", {})\n phase_info = phases.get(current_phase, {})\n phase_goal\
\ = phase_info.get(\"goal\", phase_goal)\n phase_instructions = phase_info.get(\"\
instructions\", phase_instructions)\n\nphase_context = \"Current Phase: \" + current_phase\
\ + \"\\n\"\nphase_context += \"Phase Goal: \" + phase_goal + \"\\n\"\nphase_context\
\ += \"Phase Instructions:\\n\" + phase_instructions\n"
output_var: phase_context, current_phase
- type: code
code: "import sys\nimport datetime as dt\ntimestamp = dt.datetime.now().strftime(\"\
%H:%M:%S\")\nfor msg in [f\"Phase: {current_phase}\", \"Calling AI provider...\"\
]:\n line = f\"[{timestamp}] [performance] {msg}\"\n print(line, file=sys.stderr)\n\
\ sys.stderr.flush()\n if log_file:\n with open(log_file, 'a') as\
\ f:\n f.write(line + \"\\n\")\n f.flush()\n"
output_var: _progress1
- type: prompt
prompt: "You are AI-Performance (also known as Perry), a performance optimization\
\ specialist\nwho ensures systems are fast, efficient, and scalable.\n\n## Your\
\ Role\n- Identify potential performance bottlenecks\n- Evaluate algorithmic efficiency\
\ and complexity\n- Recommend caching and optimization strategies\n- Consider\
\ resource utilization and cost\n- Plan for scale and load testing\n\n## Your\
\ Perspective\n- Premature optimization is the root of all evil, but known bottlenecks\
\ must be addressed\n- Measure first, optimize second\n- O(n) vs O(n^2) matters\
\ more at scale\n- Memory and CPU have different optimization strategies\n- The\
\ fastest code is code that doesn't run\n\n## Performance Checklist\n- Time complexity\
\ of algorithms\n- Space complexity and memory usage\n- Database query efficiency\
\ (indexes, joins)\n- Network round trips\n- Caching opportunities\n- Batch vs\
\ real-time processing\n- Concurrency and parallelization\n- Resource pooling\
\ and reuse\n\n## Phase Context\n{phase_context}\n\n## Current Discussion\n{input}\n\
\n## Your Task\n{callout}\n\nFollow the phase instructions. Analyze from a performance\
\ and scalability perspective.\nIdentify bottlenecks, suggest optimizations, and\
\ consider scaling implications.\n\n## Response Format\nRespond with valid JSON\
\ only. Use \\n for newlines in strings:\n{{\n \"comment\": \"Your performance\
\ analysis...\\n\\nOptimization opportunities:\\n1. ...\\n2. ...\",\n \"vote\"\
: \"READY\" or \"CHANGES\" or \"REJECT\" or null,\n \"diagram\": null\n}}\n\n\
Vote meanings:\n- READY: Performance is acceptable\n- CHANGES: Performance improvements\
\ needed\n- REJECT: Significant performance issues\n- null: Comment only, no vote\
\ change\n\nIf you have nothing meaningful to add, respond: {{\"sentinel\": \"\
NO_RESPONSE\"}}\n"
provider: opencode-reasoner
output_var: response
- type: code
code: "import sys\nimport datetime as dt\ntimestamp = dt.datetime.now().strftime(\"\
%H:%M:%S\")\nline = f\"[{timestamp}] [performance] AI response received\"\nprint(line,\
\ file=sys.stderr)\nsys.stderr.flush()\nif log_file:\n with open(log_file,\
\ 'a') as f:\n f.write(line + \"\\n\")\n f.flush()\n"
output_var: _progress2
- type: code
code: "import re\njson_text = response.strip()\nif json_text.startswith('```'):\n\
\ code_block = re.search(r'```(?:json)?\\s*(\\{.*\\})\\s*```', json_text, re.DOTALL)\n\
\ if code_block:\n json_text = code_block.group(1).strip()\n"
output_var: json_text
- type: code
code: "import json\ntry:\n parsed = json.loads(json_text)\nexcept json.JSONDecodeError\
\ as e:\n fixed = json_text.replace('\\n', '\\\\n')\n try:\n parsed\
\ = json.loads(fixed)\n except json.JSONDecodeError:\n import re\n \
\ comment_match = re.search(r'\"comment\"\\s*:\\s*\"(.*?)\"(?=\\s*[,}])',\
\ json_text, re.DOTALL)\n vote_match = re.search(r'\"vote\"\\s*:\\s*(\"\
?\\w+\"?|null)', json_text)\n parsed = {\n \"comment\": comment_match.group(1).replace('\\\
n', ' ') if comment_match else \"Parse error\",\n \"vote\": vote_match.group(1).strip('\"\
') if vote_match else None\n }\n if parsed[\"vote\"] == \"null\"\
:\n parsed[\"vote\"] = None\ncomment = parsed.get(\"comment\", \"\"\
)\nvote = parsed.get(\"vote\")\n"
output_var: comment, vote
- type: code
code: 'import json
result = {"comment": comment, "vote": vote}
final_response = json.dumps(result)
'
output_var: final_response
output: '{final_response}'