From 37f8eac93225251d1e4b00b7d78e7d9e9ac7cf6a Mon Sep 17 00:00:00 2001 From: Greg Date: Sun, 22 Mar 2026 11:23:00 +0100 Subject: [PATCH] Initial commit: CFTC COT Explorer FastAPI application that ingests CFTC Commitments of Traders data into SQLite and exposes it via a REST API with analytics endpoints (screener, percentile rank, concentration). Includes CLI for historical and weekly data ingestion, Docker setup, and a frontend. Co-Authored-By: Claude Sonnet 4.6 --- .claude/commands/bmad-agent-bmad-master.md | 16 + .claude/commands/bmad-brainstorming.md | 7 + .../commands/bmad-editorial-review-prose.md | 10 + .../bmad-editorial-review-structure.md | 10 + .claude/commands/bmad-help.md | 10 + .claude/commands/bmad-index-docs.md | 10 + .claude/commands/bmad-party-mode.md | 7 + .../bmad-review-adversarial-general.md | 10 + .claude/commands/bmad-shard-doc.md | 10 + .gitignore | 55 ++ Dockerfile | 18 + README.md | 147 ++++ .../brainstorming-session-2026-02-22.md | 47 ++ _bmad/_config/agent-manifest.csv | 2 + .../agents/core-bmad-master.customize.yaml | 41 ++ _bmad/_config/bmad-help.csv | 9 + _bmad/_config/files-manifest.csv | 31 + _bmad/_config/ides/claude-code.yaml | 5 + _bmad/_config/manifest.yaml | 14 + _bmad/_config/task-manifest.csv | 7 + _bmad/_config/tool-manifest.csv | 1 + _bmad/_config/workflow-manifest.csv | 3 + _bmad/core/agents/bmad-master.md | 56 ++ _bmad/core/config.yaml | 9 + _bmad/core/module-help.csv | 9 + _bmad/core/tasks/editorial-review-prose.xml | 102 +++ .../core/tasks/editorial-review-structure.xml | 209 ++++++ _bmad/core/tasks/help.md | 85 +++ _bmad/core/tasks/index-docs.xml | 65 ++ .../core/tasks/review-adversarial-general.xml | 48 ++ _bmad/core/tasks/shard-doc.xml | 108 +++ _bmad/core/tasks/workflow.xml | 235 +++++++ .../advanced-elicitation/methods.csv | 51 ++ .../advanced-elicitation/workflow.xml | 117 ++++ .../workflows/brainstorming/brain-methods.csv | 62 ++ .../steps/step-01-session-setup.md | 197 ++++++ .../brainstorming/steps/step-01b-continue.md | 122 ++++ .../steps/step-02a-user-selected.md | 225 +++++++ .../steps/step-02b-ai-recommended.md | 237 +++++++ .../steps/step-02c-random-selection.md | 209 ++++++ .../steps/step-02d-progressive-flow.md | 264 ++++++++ .../steps/step-03-technique-execution.md | 399 +++++++++++ .../steps/step-04-idea-organization.md | 303 +++++++++ .../core/workflows/brainstorming/template.md | 15 + .../core/workflows/brainstorming/workflow.md | 58 ++ .../party-mode/steps/step-01-agent-loading.md | 138 ++++ .../steps/step-02-discussion-orchestration.md | 187 ++++++ .../party-mode/steps/step-03-graceful-exit.md | 168 +++++ _bmad/core/workflows/party-mode/workflow.md | 194 ++++++ app/__init__.py | 0 app/api/__init__.py | 0 app/api/main.py | 38 ++ app/api/models.py | 130 ++++ app/api/routes/__init__.py | 0 app/api/routes/analytics.py | 195 ++++++ app/api/routes/commodities.py | 64 ++ app/api/routes/positions.py | 299 +++++++++ app/api/routes/reports.py | 62 ++ app/db.py | 26 + app/ingestion/__init__.py | 0 app/ingestion/cli.py | 143 ++++ app/ingestion/importer.py | 419 ++++++++++++ app/ingestion/parser.py | 618 +++++++++++++++++ cftc_cot_analysis_2026-02-17.md | 115 ++++ cftc_downloader.py | 153 +++++ docker-compose.yml | 38 ++ frontend/app.js | 625 ++++++++++++++++++ frontend/index.html | 188 ++++++ frontend/style.css | 517 +++++++++++++++ package.json | 5 + requirements.txt | 5 + schema.sql | 183 +++++ scripts/cron_entrypoint.sh | 22 + scripts/crontab | 2 + 74 files changed, 8189 insertions(+) create mode 100644 .claude/commands/bmad-agent-bmad-master.md create mode 100644 .claude/commands/bmad-brainstorming.md create mode 100644 .claude/commands/bmad-editorial-review-prose.md create mode 100644 .claude/commands/bmad-editorial-review-structure.md create mode 100644 .claude/commands/bmad-help.md create mode 100644 .claude/commands/bmad-index-docs.md create mode 100644 .claude/commands/bmad-party-mode.md create mode 100644 .claude/commands/bmad-review-adversarial-general.md create mode 100644 .claude/commands/bmad-shard-doc.md create mode 100644 .gitignore create mode 100644 Dockerfile create mode 100644 README.md create mode 100644 _bmad-output/brainstorming/brainstorming-session-2026-02-22.md create mode 100644 _bmad/_config/agent-manifest.csv create mode 100644 _bmad/_config/agents/core-bmad-master.customize.yaml create mode 100644 _bmad/_config/bmad-help.csv create mode 100644 _bmad/_config/files-manifest.csv create mode 100644 _bmad/_config/ides/claude-code.yaml create mode 100644 _bmad/_config/manifest.yaml create mode 100644 _bmad/_config/task-manifest.csv create mode 100644 _bmad/_config/tool-manifest.csv create mode 100644 _bmad/_config/workflow-manifest.csv create mode 100644 _bmad/core/agents/bmad-master.md create mode 100644 _bmad/core/config.yaml create mode 100644 _bmad/core/module-help.csv create mode 100644 _bmad/core/tasks/editorial-review-prose.xml create mode 100644 _bmad/core/tasks/editorial-review-structure.xml create mode 100644 _bmad/core/tasks/help.md create mode 100644 _bmad/core/tasks/index-docs.xml create mode 100644 _bmad/core/tasks/review-adversarial-general.xml create mode 100644 _bmad/core/tasks/shard-doc.xml create mode 100644 _bmad/core/tasks/workflow.xml create mode 100644 _bmad/core/workflows/advanced-elicitation/methods.csv create mode 100644 _bmad/core/workflows/advanced-elicitation/workflow.xml create mode 100644 _bmad/core/workflows/brainstorming/brain-methods.csv create mode 100644 _bmad/core/workflows/brainstorming/steps/step-01-session-setup.md create mode 100644 _bmad/core/workflows/brainstorming/steps/step-01b-continue.md create mode 100644 _bmad/core/workflows/brainstorming/steps/step-02a-user-selected.md create mode 100644 _bmad/core/workflows/brainstorming/steps/step-02b-ai-recommended.md create mode 100644 _bmad/core/workflows/brainstorming/steps/step-02c-random-selection.md create mode 100644 _bmad/core/workflows/brainstorming/steps/step-02d-progressive-flow.md create mode 100644 _bmad/core/workflows/brainstorming/steps/step-03-technique-execution.md create mode 100644 _bmad/core/workflows/brainstorming/steps/step-04-idea-organization.md create mode 100644 _bmad/core/workflows/brainstorming/template.md create mode 100644 _bmad/core/workflows/brainstorming/workflow.md create mode 100644 _bmad/core/workflows/party-mode/steps/step-01-agent-loading.md create mode 100644 _bmad/core/workflows/party-mode/steps/step-02-discussion-orchestration.md create mode 100644 _bmad/core/workflows/party-mode/steps/step-03-graceful-exit.md create mode 100644 _bmad/core/workflows/party-mode/workflow.md create mode 100644 app/__init__.py create mode 100644 app/api/__init__.py create mode 100644 app/api/main.py create mode 100644 app/api/models.py create mode 100644 app/api/routes/__init__.py create mode 100644 app/api/routes/analytics.py create mode 100644 app/api/routes/commodities.py create mode 100644 app/api/routes/positions.py create mode 100644 app/api/routes/reports.py create mode 100644 app/db.py create mode 100644 app/ingestion/__init__.py create mode 100644 app/ingestion/cli.py create mode 100644 app/ingestion/importer.py create mode 100644 app/ingestion/parser.py create mode 100644 cftc_cot_analysis_2026-02-17.md create mode 100644 cftc_downloader.py create mode 100644 docker-compose.yml create mode 100644 frontend/app.js create mode 100644 frontend/index.html create mode 100644 frontend/style.css create mode 100644 package.json create mode 100644 requirements.txt create mode 100644 schema.sql create mode 100755 scripts/cron_entrypoint.sh create mode 100644 scripts/crontab diff --git a/.claude/commands/bmad-agent-bmad-master.md b/.claude/commands/bmad-agent-bmad-master.md new file mode 100644 index 0000000..fcf0a08 --- /dev/null +++ b/.claude/commands/bmad-agent-bmad-master.md @@ -0,0 +1,16 @@ +--- +name: 'bmad-master' +description: 'bmad-master agent' +disable-model-invocation: true +--- + +You must fully embody this agent's persona and follow all activation instructions exactly as specified. NEVER break character until given an exit command. + + +1. LOAD the FULL agent file from {project-root}/_bmad/core/agents/bmad-master.md +2. READ its entire contents - this contains the complete agent persona, menu, and instructions +3. FOLLOW every step in the section precisely +4. DISPLAY the welcome/greeting as instructed +5. PRESENT the numbered menu +6. WAIT for user input before proceeding + diff --git a/.claude/commands/bmad-brainstorming.md b/.claude/commands/bmad-brainstorming.md new file mode 100644 index 0000000..7256342 --- /dev/null +++ b/.claude/commands/bmad-brainstorming.md @@ -0,0 +1,7 @@ +--- +name: 'brainstorming' +description: 'Facilitate interactive brainstorming sessions using diverse creative techniques and ideation methods' +disable-model-invocation: true +--- + +IT IS CRITICAL THAT YOU FOLLOW THIS COMMAND: LOAD the FULL @{project-root}/_bmad/core/workflows/brainstorming/workflow.md, READ its entire contents and follow its directions exactly! diff --git a/.claude/commands/bmad-editorial-review-prose.md b/.claude/commands/bmad-editorial-review-prose.md new file mode 100644 index 0000000..3b6c00c --- /dev/null +++ b/.claude/commands/bmad-editorial-review-prose.md @@ -0,0 +1,10 @@ +--- +name: 'editorial-review-prose' +description: 'Clinical copy-editor that reviews text for communication issues' +--- + +# editorial-review-prose + +Read the entire task file at: {project-root}/_bmad/core/tasks/editorial-review-prose.xml + +Follow all instructions in the task file exactly as written. diff --git a/.claude/commands/bmad-editorial-review-structure.md b/.claude/commands/bmad-editorial-review-structure.md new file mode 100644 index 0000000..f1128a5 --- /dev/null +++ b/.claude/commands/bmad-editorial-review-structure.md @@ -0,0 +1,10 @@ +--- +name: 'editorial-review-structure' +description: 'Structural editor that proposes cuts, reorganization, and simplification while preserving comprehension' +--- + +# editorial-review-structure + +Read the entire task file at: {project-root}/_bmad/core/tasks/editorial-review-structure.xml + +Follow all instructions in the task file exactly as written. diff --git a/.claude/commands/bmad-help.md b/.claude/commands/bmad-help.md new file mode 100644 index 0000000..70af2fc --- /dev/null +++ b/.claude/commands/bmad-help.md @@ -0,0 +1,10 @@ +--- +name: 'help' +description: 'Get unstuck by showing what workflow steps come next or answering questions about what to do' +--- + +# help + +Read the entire task file at: {project-root}/_bmad/core/tasks/help.md + +Follow all instructions in the task file exactly as written. diff --git a/.claude/commands/bmad-index-docs.md b/.claude/commands/bmad-index-docs.md new file mode 100644 index 0000000..e0d5b82 --- /dev/null +++ b/.claude/commands/bmad-index-docs.md @@ -0,0 +1,10 @@ +--- +name: 'index-docs' +description: 'Generates or updates an index.md of all documents in the specified directory' +--- + +# index-docs + +Read the entire task file at: {project-root}/_bmad/core/tasks/index-docs.xml + +Follow all instructions in the task file exactly as written. diff --git a/.claude/commands/bmad-party-mode.md b/.claude/commands/bmad-party-mode.md new file mode 100644 index 0000000..f04b814 --- /dev/null +++ b/.claude/commands/bmad-party-mode.md @@ -0,0 +1,7 @@ +--- +name: 'party-mode' +description: 'Orchestrates group discussions between all installed BMAD agents, enabling natural multi-agent conversations' +disable-model-invocation: true +--- + +IT IS CRITICAL THAT YOU FOLLOW THIS COMMAND: LOAD the FULL @{project-root}/_bmad/core/workflows/party-mode/workflow.md, READ its entire contents and follow its directions exactly! diff --git a/.claude/commands/bmad-review-adversarial-general.md b/.claude/commands/bmad-review-adversarial-general.md new file mode 100644 index 0000000..17e1a33 --- /dev/null +++ b/.claude/commands/bmad-review-adversarial-general.md @@ -0,0 +1,10 @@ +--- +name: 'review-adversarial-general' +description: 'Cynically review content and produce findings' +--- + +# review-adversarial-general + +Read the entire task file at: {project-root}/_bmad/core/tasks/review-adversarial-general.xml + +Follow all instructions in the task file exactly as written. diff --git a/.claude/commands/bmad-shard-doc.md b/.claude/commands/bmad-shard-doc.md new file mode 100644 index 0000000..0a38656 --- /dev/null +++ b/.claude/commands/bmad-shard-doc.md @@ -0,0 +1,10 @@ +--- +name: 'shard-doc' +description: 'Splits large markdown documents into smaller, organized files based on level 2 (default) sections' +--- + +# shard-doc + +Read the entire task file at: {project-root}/_bmad/core/tasks/shard-doc.xml + +Follow all instructions in the task file exactly as written. diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..8ffe590 --- /dev/null +++ b/.gitignore @@ -0,0 +1,55 @@ +# ── Local data (not tracked) ────────────────────────────────────────────────── +data/ +*.db +*.db-shm +*.db-wal + +# ── Python ──────────────────────────────────────────────────────────────────── +__pycache__/ +*.py[cod] +*.pyo +*.pyd +*.so +*.egg +*.egg-info/ +dist/ +build/ +.eggs/ +.mypy_cache/ +.ruff_cache/ +.pytest_cache/ +.coverage +htmlcov/ +*.log + +# Virtual environments +.venv/ +venv/ +env/ +ENV/ + +# ── Node / npm ──────────────────────────────────────────────────────────────── +node_modules/ +npm-debug.log* +yarn-debug.log* +yarn-error.log* +package-lock.json + +# ── Frontend build output ───────────────────────────────────────────────────── +frontend/dist/ +frontend/build/ + +# ── Environment & secrets ───────────────────────────────────────────────────── +.env +.env.* +!.env.example + +# ── OS & editor artifacts ───────────────────────────────────────────────────── +.DS_Store +Thumbs.db +desktop.ini +.idea/ +.vscode/ +*.swp +*.swo +*~ diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..7ccac51 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,18 @@ +FROM python:3.12-slim + +WORKDIR /app + +# curl for healthcheck; cron for scheduled weekly download +RUN apt-get update && apt-get install -y --no-install-recommends \ + curl \ + cron \ + && rm -rf /var/lib/apt/lists/* + +COPY requirements.txt . +RUN pip install --no-cache-dir -r requirements.txt + +COPY . . + +RUN mkdir -p /data + +EXPOSE 8000 diff --git a/README.md b/README.md new file mode 100644 index 0000000..31aea65 --- /dev/null +++ b/README.md @@ -0,0 +1,147 @@ +# CFTC COT Explorer + +A tool for ingesting, storing, and querying CFTC (Commodity Futures Trading Commission) Commitments of Traders (COT) report data. Provides a REST API backed by a SQLite database. + +## Overview + +The CFTC publishes weekly COT reports showing the positioning of commercial, non-commercial, and non-reportable traders across futures markets. This project: + +- Parses the weekly HTML reports and historical ZIP archives from the CFTC website +- Stores the data in a SQLite database +- Exposes the data via a FastAPI REST API with analytics endpoints + +## Project Structure + +``` +. +├── app/ +│ ├── db.py # Database connection and initialization +│ ├── ingestion/ +│ │ ├── cli.py # Command-line ingestion tool +│ │ ├── importer.py # Import logic (HTML, ZIP, download) +│ │ └── parser.py # HTML/ZIP parser +│ └── api/ +│ ├── main.py # FastAPI application entry point +│ ├── models.py # Pydantic response models +│ └── routes/ +│ ├── commodities.py # /api/commodities, /api/exchanges +│ ├── positions.py # /api/positions/{code}/... +│ ├── analytics.py # /api/analytics/... +│ └── reports.py # /api/reports/... +├── data/ # SQLite database and downloaded HTML files +├── schema.sql # Database schema +└── cftc_downloader.py # Standalone downloader script +``` + +## Setup + +**Requirements:** Python 3.10+ + +```bash +pip install fastapi uvicorn requests beautifulsoup4 +``` + +**Initialize the database:** + +```bash +python -m app.ingestion.cli init-db +``` + +## Data Ingestion + +### Import local HTML files + +If you have weekly HTML files saved in `./data/`: + +```bash +python -m app.ingestion.cli import-local-html --data-dir ./data +``` + +### Download and import the latest weekly report + +```bash +python -m app.ingestion.cli download-and-import +``` + +### Import the full historical archive (1995–present) + +```bash +python -m app.ingestion.cli import-history --start-year 1995 --end-year 2026 +``` + +### Import a specific file + +```bash +python -m app.ingestion.cli import-html data/2026-03-10_deacbtlof.htm +python -m app.ingestion.cli import-zip deahistfo2024.zip +``` + +### Check database status + +```bash +python -m app.ingestion.cli status +``` + +Ingestion is idempotent — re-running any import command will skip already-imported sources. + +## Running the API + +```bash +uvicorn app.api.main:app --reload +``` + +The API will be available at `http://localhost:8000`. Interactive docs are at `http://localhost:8000/docs`. + +### Environment Variables + +| Variable | Default | Description | +|----------|---------|-------------| +| `DB_PATH` | `app/data/cot.db` | Path to the SQLite database file | + +## API Endpoints + +### Commodities + +| Method | Path | Description | +|--------|------|-------------| +| `GET` | `/api/exchanges` | List all exchanges with commodity counts | +| `GET` | `/api/commodities` | List all commodities (filter by `?exchange=CME`) | +| `GET` | `/api/commodities/{cftc_code}` | Get metadata for a single commodity | + +### Positions + +| Method | Path | Description | +|--------|------|-------------| +| `GET` | `/api/positions/{cftc_code}/latest` | Latest report with all row types and concentration data | +| `GET` | `/api/positions/{cftc_code}/history` | Time series of positions (supports `from_date`, `to_date`, `row_type`) | +| `GET` | `/api/positions/{cftc_code}/extremes` | All-time min/max for open interest and net positions | +| `GET` | `/api/positions/compare` | Compare a metric across multiple commodities (comma-separated `codes`) | + +### Analytics + +| Method | Path | Description | +|--------|------|-------------| +| `GET` | `/api/analytics/screener` | Rank markets by non-commercial net position percentile | +| `GET` | `/api/analytics/{cftc_code}/net-position-percentile` | Percentile rank and z-score for current net position | +| `GET` | `/api/analytics/{cftc_code}/concentration` | Largest-trader concentration data over time | + +#### Screener parameters + +| Parameter | Default | Description | +|-----------|---------|-------------| +| `exchange` | — | Filter by exchange abbreviation | +| `lookback_weeks` | `156` | Historical window for percentile calculation (4–1560) | +| `top_n` | `50` | Number of results to return | +| `direction` | — | Filter to `long` (≥50th pct) or `short` (<50th pct) | + +## Database Schema + +The SQLite database contains five tables: + +- **`commodities`** — one row per unique market (CFTC code, name, exchange) +- **`reports`** — one row per (commodity, report date) +- **`positions`** — position data per report, split into `All`, `Old`, and `Other` row types; includes open interest, long/short counts, week-over-week changes, percent of open interest, and trader counts +- **`concentration`** — largest-trader concentration ratios (top 4 and top 8 traders, gross and net) +- **`import_log`** — tracks which source files have been processed + +A convenience view `v_net_positions` joins all tables and pre-computes net positions (long minus short) for each trader category. diff --git a/_bmad-output/brainstorming/brainstorming-session-2026-02-22.md b/_bmad-output/brainstorming/brainstorming-session-2026-02-22.md new file mode 100644 index 0000000..13ef7e6 --- /dev/null +++ b/_bmad-output/brainstorming/brainstorming-session-2026-02-22.md @@ -0,0 +1,47 @@ +--- +stepsCompleted: [1, 2] +inputDocuments: ['data/2026-02-17_deacbtlof.htm'] +session_topic: 'COT-based trading strategies for beginners' +session_goals: 'Practical, implementable trading approaches using CFTC positioning data' +selected_approach: 'ai-recommended' +techniques_used: ['first_principles_thinking', 'morphological_analysis', 'six_thinking_hats'] +ideas_generated: [] +context_file: '' +--- + +# Brainstorming Session Results + +**Facilitator:** Greg +**Date:** 2026-02-22 + +## Session Overview + +**Topic:** COT-based trading strategies for beginners +**Goals:** Practical, implementable trading approaches using CFTC positioning data + +### Context Data + +CFTC Commitments of Traders Report (February 17, 2026) covering: +- Agricultural commodities: Wheat (SRW/HRW), Corn, Soybeans, Soybean Oil, Soybean Meal, Rough Rice +- Treasury securities: UST Bonds, 2Y/5Y/10Y Notes, Ultra bonds +- Other: Fed Funds, DJIA, DJ Real Estate, Bloomberg Commodity Index, ERIS SOFR Swaps + +### Session Setup + +User selected AI-Recommended approach for systematic strategy development. + +## Technique Selection + +**Approach:** AI-Recommended Techniques +**Analysis Context:** Trading strategy development with focus on practical implementation + +**Recommended Techniques:** + +1. **First Principles Thinking:** Strip away assumptions about COT data to identify fundamental truths +2. **Morphological Analysis:** Systematically explore all variable combinations for strategy generation +3. **Six Thinking Hats:** Multi-perspective evaluation of top strategy candidates + +--- + +## Phase 1: First Principles Thinking + diff --git a/_bmad/_config/agent-manifest.csv b/_bmad/_config/agent-manifest.csv new file mode 100644 index 0000000..4624966 --- /dev/null +++ b/_bmad/_config/agent-manifest.csv @@ -0,0 +1,2 @@ +name,displayName,title,icon,capabilities,role,identity,communicationStyle,principles,module,path +"bmad-master","BMad Master","BMad Master Executor, Knowledge Custodian, and Workflow Orchestrator","🧙","runtime resource management, workflow orchestration, task execution, knowledge custodian","Master Task Executor + BMad Expert + Guiding Facilitator Orchestrator","Master-level expert in the BMAD Core Platform and all loaded modules with comprehensive knowledge of all resources, tasks, and workflows. Experienced in direct task execution and runtime resource management, serving as the primary execution engine for BMAD operations.","Direct and comprehensive, refers to himself in the 3rd person. Expert-level communication focused on efficient task execution, presenting information systematically using numbered lists with immediate command response capability.","- Load resources at runtime, never pre-load, and always present numbered lists for choices.","core","_bmad/core/agents/bmad-master.md" diff --git a/_bmad/_config/agents/core-bmad-master.customize.yaml b/_bmad/_config/agents/core-bmad-master.customize.yaml new file mode 100644 index 0000000..b8cc648 --- /dev/null +++ b/_bmad/_config/agents/core-bmad-master.customize.yaml @@ -0,0 +1,41 @@ +# Agent Customization +# Customize any section below - all are optional + +# Override agent name +agent: + metadata: + name: "" + +# Replace entire persona (not merged) +persona: + role: "" + identity: "" + communication_style: "" + principles: [] + +# Add custom critical actions (appended after standard config loading) +critical_actions: [] + +# Add persistent memories for the agent +memories: [] +# Example: +# memories: +# - "User prefers detailed technical explanations" +# - "Current project uses React and TypeScript" + +# Add custom menu items (appended to base menu) +# Don't include * prefix or help/exit - auto-injected +menu: [] +# Example: +# menu: +# - trigger: my-workflow +# workflow: "{project-root}/custom/my.yaml" +# description: My custom workflow + +# Add custom prompts (for action="#id" handlers) +prompts: [] +# Example: +# prompts: +# - id: my-prompt +# content: | +# Prompt instructions here diff --git a/_bmad/_config/bmad-help.csv b/_bmad/_config/bmad-help.csv new file mode 100644 index 0000000..0bedf91 --- /dev/null +++ b/_bmad/_config/bmad-help.csv @@ -0,0 +1,9 @@ +module,phase,name,code,sequence,workflow-file,command,required,agent-name,agent-command,agent-display-name,agent-title,options,description,output-location,outputs +core,anytime,Brainstorming,BSP,,_bmad/core/workflows/brainstorming/workflow.md,bmad-brainstorming,false,analyst,,,,,Generate diverse ideas through interactive techniques. Use early in ideation phase or when stuck generating ideas.,{output_folder}/brainstorming/brainstorming-session-{{date}}.md, +core,anytime,Party Mode,PM,,_bmad/core/workflows/party-mode/workflow.md,bmad-party-mode,false,party-mode facilitator,,,,,Orchestrate multi-agent discussions. Use when you need multiple agent perspectives or want agents to collaborate.,, +core,anytime,bmad-help,BH,,_bmad/core/tasks/help.md,bmad-help,false,,,,,,Get unstuck by showing what workflow steps come next or answering BMad Method questions.,, +core,anytime,Index Docs,ID,,_bmad/core/tasks/index-docs.xml,bmad-index-docs,false,,,,,,Create lightweight index for quick LLM scanning. Use when LLM needs to understand available docs without loading everything.,, +core,anytime,Shard Document,SD,,_bmad/core/tasks/shard-doc.xml,bmad-shard-doc,false,,,,,,Split large documents into smaller files by sections. Use when doc becomes too large (>500 lines) to manage effectively.,, +core,anytime,Editorial Review - Prose,EP,,_bmad/core/tasks/editorial-review-prose.xml,bmad-editorial-review-prose,false,,,,,,"Review prose for clarity, tone, and communication issues. Use after drafting to polish written content.",report located with target document,three-column markdown table with suggested fixes +core,anytime,Editorial Review - Structure,ES,,_bmad/core/tasks/editorial-review-structure.xml,bmad-editorial-review-structure,false,,,,,,"Propose cuts, reorganization, and simplification while preserving comprehension. Use when doc produced from multiple subprocesses or needs structural improvement.",report located with target document, +core,anytime,Adversarial Review (General),AR,,_bmad/core/tasks/review-adversarial-general.xml,bmad-review-adversarial-general,false,,,,,,"Review content critically to find issues and weaknesses. Use for quality assurance or before finalizing deliverables. Code Review in other modules run this automatically, but its useful also for document reviews",, \ No newline at end of file diff --git a/_bmad/_config/files-manifest.csv b/_bmad/_config/files-manifest.csv new file mode 100644 index 0000000..91f1518 --- /dev/null +++ b/_bmad/_config/files-manifest.csv @@ -0,0 +1,31 @@ +type,name,module,path,hash +"csv","agent-manifest","_config","_config/agent-manifest.csv","d8455c8171d4d0c21bc000efe6dc27a1edcdb19a1c073105464d303cb45ee9b6" +"csv","task-manifest","_config","_config/task-manifest.csv","bac7378952f0c79a48469b582997507b08cf08583b31b8aa6083791db959e0f0" +"csv","workflow-manifest","_config","_config/workflow-manifest.csv","dfbd7627a6894e106643522f4739006a4fa7e079b69de56066f81a2b18597d2a" +"yaml","manifest","_config","_config/manifest.yaml","cf5503079a0449e546687c3dae6a8606b479c378e3794437384a0da6f4eb3bfa" +"csv","brain-methods","core","core/workflows/brainstorming/brain-methods.csv","0ab5878b1dbc9e3fa98cb72abfc3920a586b9e2b42609211bb0516eefd542039" +"csv","methods","core","core/workflows/advanced-elicitation/methods.csv","e08b2e22fec700274982e37be608d6c3d1d4d0c04fa0bae05aa9dba2454e6141" +"csv","module-help","core","core/module-help.csv","4227d475748e8067aeae3e1a67d7b6235c109da13b2ef9131db930083dcb348d" +"md","help","core","core/tasks/help.md","950439aaff47aa25f94ede360ce8f8a47bf29c52b7f19c76a45960e8687fe726" +"md","step-01-agent-loading","core","core/workflows/party-mode/steps/step-01-agent-loading.md","04ab6b6247564f7edcd5c503f5ca7d27ae688b09bbe2e24345550963a016e9f9" +"md","step-01-session-setup","core","core/workflows/brainstorming/steps/step-01-session-setup.md","bc09cc22a0465b316ff3c13903b753768fa31d83abd3f9fc328631db63dc0cf8" +"md","step-01b-continue","core","core/workflows/brainstorming/steps/step-01b-continue.md","d76a406e0ff0a0e58006ec671b56f19a059e98cfebba4c0724ae6ccdd9303e7f" +"md","step-02-discussion-orchestration","core","core/workflows/party-mode/steps/step-02-discussion-orchestration.md","a8a79890bd03237e20f1293045ecf06f9a62bc590f5c2d4f88e250cee40abb0b" +"md","step-02a-user-selected","core","core/workflows/brainstorming/steps/step-02a-user-selected.md","558b162466745b92687a5d6e218f243a98436dd177b2d5544846c5ff4497cc94" +"md","step-02b-ai-recommended","core","core/workflows/brainstorming/steps/step-02b-ai-recommended.md","99aa935279889f278dcb2a61ba191600a18e9db356dd8ce62f0048d3c37c9531" +"md","step-02c-random-selection","core","core/workflows/brainstorming/steps/step-02c-random-selection.md","f188c260c321c7f026051fefcd267a26ee18ce2a07f64bab7f453c0c3e483316" +"md","step-02d-progressive-flow","core","core/workflows/brainstorming/steps/step-02d-progressive-flow.md","a28c7a3edf34ceb0eea203bf7dc80f39ca04974f6d1ec243f0a088281b2e55de" +"md","step-03-graceful-exit","core","core/workflows/party-mode/steps/step-03-graceful-exit.md","bdecc33004d73238ca05d8fc9d6b86cba89833630956f53ecd82ec3715c5f0da" +"md","step-03-technique-execution","core","core/workflows/brainstorming/steps/step-03-technique-execution.md","9e6abceec5f774c57cd5205e30a1f24a95441131dbffcae9c3dce72111f95ceb" +"md","step-04-idea-organization","core","core/workflows/brainstorming/steps/step-04-idea-organization.md","5224490c33bf4b23b2897f3bcf12abe0b1ced306541dd60c21df0ce9fc65d1ac" +"md","template","core","core/workflows/brainstorming/template.md","5c99d76963eb5fc21db96c5a68f39711dca7c6ed30e4f7d22aedee9e8bb964f9" +"md","workflow","core","core/workflows/brainstorming/workflow.md","7d7f957ccd176faed2551e3089abfa49032963e980b5643d9384690af3d61203" +"md","workflow","core","core/workflows/party-mode/workflow.md","f8537e152df8db331d86e2a37e5ced55bccff3a71e290f82eb754d28c0c9ec08" +"xml","editorial-review-prose","core","core/tasks/editorial-review-prose.xml","49f462ddc5f20a6e2abf14e4b8f3a25c70885c6a6d776ef4674739dd7880988a" +"xml","editorial-review-structure","core","core/tasks/editorial-review-structure.xml","307edce94877dacdaafb10f7ea39115944c7d19e57228a7859abf2fee8b1a177" +"xml","index-docs","core","core/tasks/index-docs.xml","90076db678b1d65b4dd8b166731584fafc68e660e5015f309a1c78aae6e25a28" +"xml","review-adversarial-general","core","core/tasks/review-adversarial-general.xml","347436fde09411caaab10ff97e4cbd2bfef31dbe9f8db9e0eb49c3ed361ede7b" +"xml","shard-doc","core","core/tasks/shard-doc.xml","947f2c7d4f6bb269ad0bcc1a03227d0d6da642d9df47894b8ba215c5149aed3d" +"xml","workflow","core","core/tasks/workflow.xml","17bca7fa63bae20aaac4768d81463a7a2de7f80b60d4d9a8f36b70821ba86cfd" +"xml","workflow","core","core/workflows/advanced-elicitation/workflow.xml","ead4dc1e50c95d8966b3676842a57fca97c70d83f1f3b9e9c2d746821e6868b4" +"yaml","config","core","core/config.yaml","c0f42545ac5370c9fc9c226c7b30324db6b882c0edd35ee8a6d11877c5af0ca8" diff --git a/_bmad/_config/ides/claude-code.yaml b/_bmad/_config/ides/claude-code.yaml new file mode 100644 index 0000000..8c7aff8 --- /dev/null +++ b/_bmad/_config/ides/claude-code.yaml @@ -0,0 +1,5 @@ +ide: claude-code +configured_date: 2026-02-22T11:15:16.711Z +last_updated: 2026-02-22T11:15:16.711Z +configuration: + _noConfigNeeded: true diff --git a/_bmad/_config/manifest.yaml b/_bmad/_config/manifest.yaml new file mode 100644 index 0000000..0d54afe --- /dev/null +++ b/_bmad/_config/manifest.yaml @@ -0,0 +1,14 @@ +installation: + version: 6.0.1 + installDate: 2026-02-22T11:15:16.661Z + lastUpdated: 2026-02-22T11:15:16.661Z +modules: + - name: core + version: 6.0.1 + installDate: 2026-02-22T11:15:16.661Z + lastUpdated: 2026-02-22T11:15:16.661Z + source: built-in + npmPackage: null + repoUrl: null +ides: + - claude-code diff --git a/_bmad/_config/task-manifest.csv b/_bmad/_config/task-manifest.csv new file mode 100644 index 0000000..dc8dc24 --- /dev/null +++ b/_bmad/_config/task-manifest.csv @@ -0,0 +1,7 @@ +name,displayName,description,module,path,standalone +"editorial-review-prose","Editorial Review - Prose","Clinical copy-editor that reviews text for communication issues","core","_bmad/core/tasks/editorial-review-prose.xml","true" +"editorial-review-structure","Editorial Review - Structure","Structural editor that proposes cuts, reorganization, and simplification while preserving comprehension","core","_bmad/core/tasks/editorial-review-structure.xml","true" +"help","help","Get unstuck by showing what workflow steps come next or answering questions about what to do","core","_bmad/core/tasks/help.md","true" +"index-docs","Index Docs","Generates or updates an index.md of all documents in the specified directory","core","_bmad/core/tasks/index-docs.xml","true" +"review-adversarial-general","Adversarial Review (General)","Cynically review content and produce findings","core","_bmad/core/tasks/review-adversarial-general.xml","true" +"shard-doc","Shard Document","Splits large markdown documents into smaller, organized files based on level 2 (default) sections","core","_bmad/core/tasks/shard-doc.xml","true" diff --git a/_bmad/_config/tool-manifest.csv b/_bmad/_config/tool-manifest.csv new file mode 100644 index 0000000..8fbcabb --- /dev/null +++ b/_bmad/_config/tool-manifest.csv @@ -0,0 +1 @@ +name,displayName,description,module,path,standalone diff --git a/_bmad/_config/workflow-manifest.csv b/_bmad/_config/workflow-manifest.csv new file mode 100644 index 0000000..4b30ccf --- /dev/null +++ b/_bmad/_config/workflow-manifest.csv @@ -0,0 +1,3 @@ +name,description,module,path +"brainstorming","Facilitate interactive brainstorming sessions using diverse creative techniques and ideation methods","core","_bmad/core/workflows/brainstorming/workflow.md" +"party-mode","Orchestrates group discussions between all installed BMAD agents, enabling natural multi-agent conversations","core","_bmad/core/workflows/party-mode/workflow.md" diff --git a/_bmad/core/agents/bmad-master.md b/_bmad/core/agents/bmad-master.md new file mode 100644 index 0000000..5f204b7 --- /dev/null +++ b/_bmad/core/agents/bmad-master.md @@ -0,0 +1,56 @@ +--- +name: "bmad master" +description: "BMad Master Executor, Knowledge Custodian, and Workflow Orchestrator" +--- + +You must fully embody this agent's persona and follow all activation instructions exactly as specified. NEVER break character until given an exit command. + +```xml + + + Load persona from this current agent file (already in context) + 🚨 IMMEDIATE ACTION REQUIRED - BEFORE ANY OUTPUT: + - Load and read {project-root}/_bmad/core/config.yaml NOW + - Store ALL fields as session variables: {user_name}, {communication_language}, {output_folder} + - VERIFY: If config not loaded, STOP and report error to user + - DO NOT PROCEED to step 3 until config is successfully loaded and variables stored + + Remember: user's name is {user_name} + Always greet the user and let them know they can use `/bmad-help` at any time to get advice on what to do next, and they can combine that with what they need help with `/bmad-help where should I start with an idea I have that does XYZ` + Show greeting using {user_name} from config, communicate in {communication_language}, then display numbered list of ALL menu items from menu section + Let {user_name} know they can type command `/bmad-help` at any time to get advice on what to do next, and that they can combine that with what they need help with `/bmad-help where should I start with an idea I have that does XYZ` + STOP and WAIT for user input - do NOT execute menu items automatically - accept number or cmd trigger or fuzzy command match + On user input: Number → process menu item[n] | Text → case-insensitive substring match | Multiple matches → ask user to clarify | No match → show "Not recognized" + When processing a menu item: Check menu-handlers section below - extract any attributes from the selected menu item (workflow, exec, tmpl, data, action, validate-workflow) and follow the corresponding handler instructions + + + + + When menu item has: action="#id" → Find prompt with id="id" in current agent XML, follow its content + When menu item has: action="text" → Follow the text directly as an inline instruction + + + + + + ALWAYS communicate in {communication_language} UNLESS contradicted by communication_style. + Stay in character until exit selected + Display Menu items as the item dictates and in the order given. + Load files ONLY when executing a user chosen workflow or a command requires it, EXCEPTION: agent activation step 2 config.yaml + + + Master Task Executor + BMad Expert + Guiding Facilitator Orchestrator + Master-level expert in the BMAD Core Platform and all loaded modules with comprehensive knowledge of all resources, tasks, and workflows. Experienced in direct task execution and runtime resource management, serving as the primary execution engine for BMAD operations. + Direct and comprehensive, refers to himself in the 3rd person. Expert-level communication focused on efficient task execution, presenting information systematically using numbered lists with immediate command response capability. + - Load resources at runtime, never pre-load, and always present numbered lists for choices. + + + [MH] Redisplay Menu Help + [CH] Chat with the Agent about anything + [LT] List Available Tasks + [LW] List Workflows + [PM] Start Party Mode + [DA] Dismiss Agent + + +``` diff --git a/_bmad/core/config.yaml b/_bmad/core/config.yaml new file mode 100644 index 0000000..6b76040 --- /dev/null +++ b/_bmad/core/config.yaml @@ -0,0 +1,9 @@ +# CORE Module Configuration +# Generated by BMAD installer +# Version: 6.0.1 +# Date: 2026-02-22T11:15:16.651Z + +user_name: Greg +communication_language: English +document_output_language: English +output_folder: "{project-root}/_bmad-output" diff --git a/_bmad/core/module-help.csv b/_bmad/core/module-help.csv new file mode 100644 index 0000000..1fdf064 --- /dev/null +++ b/_bmad/core/module-help.csv @@ -0,0 +1,9 @@ +module,phase,name,code,sequence,workflow-file,command,required,agent,options,description,output-location,outputs +core,anytime,Brainstorming,BSP,,_bmad/core/workflows/brainstorming/workflow.md,bmad-brainstorming,false,analyst,,"Generate diverse ideas through interactive techniques. Use early in ideation phase or when stuck generating ideas.",{output_folder}/brainstorming/brainstorming-session-{{date}}.md,, +core,anytime,Party Mode,PM,,_bmad/core/workflows/party-mode/workflow.md,bmad-party-mode,false,party-mode facilitator,,"Orchestrate multi-agent discussions. Use when you need multiple agent perspectives or want agents to collaborate.",, +core,anytime,bmad-help,BH,,_bmad/core/tasks/help.md,bmad-help,false,,,"Get unstuck by showing what workflow steps come next or answering BMad Method questions.",, +core,anytime,Index Docs,ID,,_bmad/core/tasks/index-docs.xml,bmad-index-docs,false,,,"Create lightweight index for quick LLM scanning. Use when LLM needs to understand available docs without loading everything.",, +core,anytime,Shard Document,SD,,_bmad/core/tasks/shard-doc.xml,bmad-shard-doc,false,,,"Split large documents into smaller files by sections. Use when doc becomes too large (>500 lines) to manage effectively.",, +core,anytime,Editorial Review - Prose,EP,,_bmad/core/tasks/editorial-review-prose.xml,bmad-editorial-review-prose,false,,,"Review prose for clarity, tone, and communication issues. Use after drafting to polish written content.",report located with target document,"three-column markdown table with suggested fixes", +core,anytime,Editorial Review - Structure,ES,,_bmad/core/tasks/editorial-review-structure.xml,bmad-editorial-review-structure,false,,,"Propose cuts, reorganization, and simplification while preserving comprehension. Use when doc produced from multiple subprocesses or needs structural improvement.",report located with target document, +core,anytime,Adversarial Review (General),AR,,_bmad/core/tasks/review-adversarial-general.xml,bmad-review-adversarial-general,false,,,"Review content critically to find issues and weaknesses. Use for quality assurance or before finalizing deliverables. Code Review in other modules run this automatically, but its useful also for document reviews",, diff --git a/_bmad/core/tasks/editorial-review-prose.xml b/_bmad/core/tasks/editorial-review-prose.xml new file mode 100644 index 0000000..deb5357 --- /dev/null +++ b/_bmad/core/tasks/editorial-review-prose.xml @@ -0,0 +1,102 @@ + + + Review text for communication issues that impede comprehension and output suggested fixes in a three-column table + + + + + + + + + MANDATORY: Execute ALL steps in the flow section IN EXACT ORDER + DO NOT skip steps or change the sequence + HALT immediately when halt-conditions are met + Each action xml tag within step xml tag is a REQUIRED action to complete that step + + You are a clinical copy-editor: precise, professional, neither warm nor cynical + Apply Microsoft Writing Style Guide principles as your baseline + Focus on communication issues that impede comprehension - not style preferences + NEVER rewrite for preference - only fix genuine issues + + CONTENT IS SACROSANCT: Never challenge ideas—only clarify how they're expressed. + + + Minimal intervention: Apply the smallest fix that achieves clarity + Preserve structure: Fix prose within existing structure, never restructure + Skip code/markup: Detect and skip code blocks, frontmatter, structural markup + When uncertain: Flag with a query rather than suggesting a definitive change + Deduplicate: Same issue in multiple places = one entry with locations listed + No conflicts: Merge overlapping fixes into single entries + Respect author voice: Preserve intentional stylistic choices + + STYLE GUIDE OVERRIDE: If a style_guide input is provided, + it overrides ALL generic principles in this task (including the Microsoft + Writing Style Guide baseline and reader_type-specific priorities). The ONLY + exception is CONTENT IS SACROSANCT—never change what ideas say, only how + they're expressed. When style guide conflicts with this task, style guide wins. + + + + + Check if content is empty or contains fewer than 3 words + HALT with error: "Content too short for editorial review (minimum 3 words required)" + Validate reader_type is "humans" or "llm" (or not provided, defaulting to "humans") + HALT with error: "Invalid reader_type. Must be 'humans' or 'llm'" + Identify content type (markdown, plain text, XML with text) + Note any code blocks, frontmatter, or structural markup to skip + + + + Analyze the style, tone, and voice of the input text + Note any intentional stylistic choices to preserve (informal tone, technical jargon, rhetorical patterns) + Calibrate review approach based on reader_type parameter + Prioritize: unambiguous references, consistent terminology, explicit structure, no hedging + Prioritize: clarity, flow, readability, natural progression + + + + Consult style_guide now and note its key requirements—these override default principles for this + review + Review all prose sections (skip code blocks, frontmatter, structural markup) + Identify communication issues that impede comprehension + For each issue, determine the minimal fix that achieves clarity + Deduplicate: If same issue appears multiple times, create one entry listing all locations + Merge overlapping issues into single entries (no conflicting suggestions) + For uncertain fixes, phrase as query: "Consider: [suggestion]?" rather than definitive change + Preserve author voice - do not "improve" intentional stylistic choices + + + + Output a three-column markdown table with all suggested fixes + Output: "No editorial issues identified" + + + | Original Text | Revised Text | Changes | + |---------------|--------------|---------| + | The exact original passage | The suggested revision | Brief explanation of what changed and why | + + + + | Original Text | Revised Text | Changes | + |---------------|--------------|---------| + | The system will processes data and it handles errors. | The system processes data and handles errors. | Fixed subject-verb + agreement ("will processes" to "processes"); removed redundant "it" | + | Users can chose from options (lines 12, 45, 78) | Users can choose from options | Fixed spelling: "chose" to "choose" (appears in + 3 locations) | + + + + + + HALT with error if content is empty or fewer than 3 words + HALT with error if reader_type is not "humans" or "llm" + If no issues found after thorough review, output "No editorial issues identified" (this is valid completion, not an error) + + + \ No newline at end of file diff --git a/_bmad/core/tasks/editorial-review-structure.xml b/_bmad/core/tasks/editorial-review-structure.xml new file mode 100644 index 0000000..426dc3c --- /dev/null +++ b/_bmad/core/tasks/editorial-review-structure.xml @@ -0,0 +1,209 @@ + + + + Review document structure and propose substantive changes + to improve clarity and flow-run this BEFORE copy editing + + + + + + + + + + MANDATORY: Execute ALL steps in the flow section IN EXACT ORDER + DO NOT skip steps or change the sequence + HALT immediately when halt-conditions are met + Each action xml tag within step xml tag is a REQUIRED action to complete that step + You are a structural editor focused on HIGH-VALUE DENSITY + Brevity IS clarity: Concise writing respects limited attention spans and enables effective scanning + Every section must justify its existence-cut anything that delays understanding + True redundancy is failure + + Comprehension through calibration: Optimize for the minimum words needed to maintain understanding + Front-load value: Critical information comes first; nice-to-know comes last (or goes) + One source of truth: If information appears identically twice, consolidate + Scope discipline: Content that belongs in a different document should be cut or linked + Propose, don't execute: Output recommendations-user decides what to accept + CONTENT IS SACROSANCT: Never challenge ideas—only optimize how they're organized. + + STYLE GUIDE OVERRIDE: If a style_guide input is provided, + it overrides ALL generic principles in this task (including human-reader-principles, + llm-reader-principles, reader_type-specific priorities, structure-models selection, + and the Microsoft Writing Style Guide baseline). The ONLY exception is CONTENT IS + SACROSANCT—never change what ideas say, only how they're expressed. When style + guide conflicts with this task, style guide wins. + + These elements serve human comprehension and engagement-preserve unless clearly wasteful: + Visual aids: Diagrams, images, and flowcharts anchor understanding + Expectation-setting: "What You'll Learn" helps readers confirm they're in the right place + Reader's Journey: Organize content biologically (linear progression), not logically (database) + Mental models: Overview before details prevents cognitive overload + Warmth: Encouraging tone reduces anxiety for new users + Whitespace: Admonitions and callouts provide visual breathing room + Summaries: Recaps help retention; they're reinforcement, not redundancy + Examples: Concrete illustrations make abstract concepts accessible + Engagement: "Flow" techniques (transitions, variety) are functional, not "fluff"-they maintain attention + + + When reader_type='llm', optimize for PRECISION and UNAMBIGUITY: + Dependency-first: Define concepts before usage to minimize hallucination risk + Cut emotional language, encouragement, and orientation sections + + IF concept is well-known from training (e.g., "conventional + commits", "REST APIs"): Reference the standard-don't re-teach it + ELSE: Be explicit-don't assume the LLM will infer correctly + + Use consistent terminology-same word for same concept throughout + Eliminate hedging ("might", "could", "generally")-use direct statements + Prefer structured formats (tables, lists, YAML) over prose + Reference known standards ("conventional commits", "Google style guide") to leverage training + STILL PROVIDE EXAMPLES even for known standards-grounds the LLM in your specific expectation + Unambiguous references-no unclear antecedents ("it", "this", "the above") + Note: LLM documents may be LONGER than human docs in some areas + (more explicit) while shorter in others (no warmth) + + + + Prerequisites: Setup/Context MUST precede action + Sequence: Steps must follow strict chronological or logical dependency order + Goal-oriented: clear 'Definition of Done' at the end + + + Random Access: No narrative flow required; user jumps to specific item + MECE: Topics are Mutually Exclusive and Collectively Exhaustive + Consistent Schema: Every item follows identical structure (e.g., Signature to Params to Returns) + + + Abstract to Concrete: Definition to Context to Implementation/Example + Scaffolding: Complex ideas built on established foundations + + + Meta-first: Inputs, usage constraints, and context defined before instructions + Separation of Concerns: Instructions (logic) separate from Data (content) + Step-by-step: Execution flow must be explicit and ordered + + + Top-down: Conclusion/Status/Recommendation starts the document + Grouping: Supporting context grouped logically below the headline + Ordering: Most critical information first + MECE: Arguments/Groups are Mutually Exclusive and Collectively Exhaustive + Evidence: Data supports arguments, never leads + + + + + + Check if content is empty or contains fewer than 3 words + HALT with error: "Content + too short for substantive review (minimum 3 words required)" + Validate reader_type is "humans" or "llm" (or not provided, defaulting to "humans") + HALT with error: "Invalid reader_type. Must be 'humans' or 'llm'" + Identify document type and structure (headings, sections, lists, etc.) + Note the current word count and section count + + + If purpose was provided, use it; otherwise infer from content + If target_audience was provided, use it; otherwise infer from content + Identify the core question the document answers + State in one sentence: "This document exists to help [audience] accomplish [goal]" + Select the most appropriate structural model from structure-models based on purpose/audience + Note reader_type and which principles apply (human-reader-principles or llm-reader-principles) + + + Consult style_guide now and note its key requirements—these override default principles for this + analysis + Map the document structure: list each major section with its word count + Evaluate structure against the selected model's primary rules + (e.g., 'Does recommendation come first?' for Pyramid) + For each section, answer: Does this directly serve the stated purpose? + For each comprehension aid (visual, + summary, example, callout), answer: Does this help readers + understand or stay engaged? + Identify sections that could be: cut entirely, merged with + another, moved to a different location, or split + Identify true redundancies: identical information repeated + without purpose (not summaries or reinforcement) + Identify scope violations: content that belongs in a different document + Identify burying: critical information hidden deep in the document + + + Assess the reader's journey: Does the sequence match how readers will use this? + Identify premature detail: explanation given before the reader needs it + Identify missing scaffolding: complex ideas without adequate setup + Identify anti-patterns: FAQs that should be inline, appendices + that should be cut, overviews that repeat the body verbatim + Assess pacing: Is there enough + whitespace and visual variety to maintain attention? + + + Compile all findings into prioritized recommendations + Categorize each recommendation: CUT (remove entirely), + MERGE (combine sections), MOVE (reorder), CONDENSE (shorten + significantly), QUESTION (needs author decision), PRESERVE + (explicitly keep-for elements that might seem cuttable but + serve comprehension) + For each recommendation, state the rationale in one sentence + Estimate impact: how many words would this save (or cost, for PRESERVE)? + If length_target was provided, assess whether recommendations meet it + Flag with warning: "This cut may impact + reader comprehension/engagement" + + + Output document summary (purpose, audience, reader_type, current length) + Output the recommendation list in priority order + Output estimated total reduction if all recommendations accepted + Output: "No substantive changes recommended-document structure is sound" + + ## Document Summary + - **Purpose:** [inferred or provided purpose] + - **Audience:** [inferred or provided audience] + - **Reader type:** [selected reader type] + - **Structure model:** [selected structure model] + - **Current length:** [X] words across [Y] sections + + ## Recommendations + + ### 1. [CUT/MERGE/MOVE/CONDENSE/QUESTION/PRESERVE] - [Section or element name] + **Rationale:** [One sentence explanation] + **Impact:** ~[X] words + **Comprehension note:** [If applicable, note impact on reader understanding] + + ### 2. ... + + ## Summary + - **Total recommendations:** [N] + - **Estimated reduction:** [X] words ([Y]% of original) + - **Meets length target:** [Yes/No/No target specified] + - **Comprehension trade-offs:** [Note any cuts that sacrifice reader engagement for brevity] + + + + + HALT with error if content is empty or fewer than 3 words + HALT with error if reader_type is not "humans" or "llm" + If no structural issues found, output "No substantive changes + recommended" (this is valid completion, not an error) + + \ No newline at end of file diff --git a/_bmad/core/tasks/help.md b/_bmad/core/tasks/help.md new file mode 100644 index 0000000..c3c3fab --- /dev/null +++ b/_bmad/core/tasks/help.md @@ -0,0 +1,85 @@ +--- +name: help +description: Get unstuck by showing what workflow steps come next or answering questions about what to do +--- + +# Task: BMAD Help + +## ROUTING RULES + +- **Empty `phase` = anytime** — Universal tools work regardless of workflow state +- **Numbered phases indicate sequence** — Phases like `1-discover` → `2-define` → `3-build` → `4-ship` flow in order (naming varies by module) +- **Stay in module** — Guide through the active module's workflow based on phase+sequence ordering +- **Descriptions contain routing** — Read for alternate paths (e.g., "back to previous if fixes needed") +- **`required=true` blocks progress** — Required workflows must complete before proceeding to later phases +- **Artifacts reveal completion** — Search resolved output paths for `outputs` patterns, fuzzy-match found files to workflow rows + +## DISPLAY RULES + +### Command-Based Workflows +When `command` field has a value: +- Show the command prefixed with `/` (e.g., `/bmad-bmm-create-prd`) + +### Agent-Based Workflows +When `command` field is empty: +- User loads agent first via `/agent-command` +- Then invokes by referencing the `code` field or describing the `name` field +- Do NOT show a slash command — show the code value and agent load instruction instead + +Example presentation for empty command: +``` +Explain Concept (EC) +Load: /tech-writer, then ask to "EC about [topic]" +Agent: Tech Writer +Description: Create clear technical explanations with examples... +``` + +## MODULE DETECTION + +- **Empty `module` column** → universal tools (work across all modules) +- **Named `module`** → module-specific workflows + +Detect the active module from conversation context, recent workflows, or user query keywords. If ambiguous, ask the user. + +## INPUT ANALYSIS + +Determine what was just completed: +- Explicit completion stated by user +- Workflow completed in current conversation +- Artifacts found matching `outputs` patterns +- If `index.md` exists, read it for additional context +- If still unclear, ask: "What workflow did you most recently complete?" + +## EXECUTION + +1. **Load catalog** — Load `{project-root}/_bmad/_config/bmad-help.csv` + +2. **Resolve output locations and config** — Scan each folder under `_bmad/` (except `_config`) for `config.yaml`. For each workflow row, resolve its `output-location` variables against that module's config so artifact paths can be searched. Also extract `communication_language` and `project_knowledge` from each scanned module's config. + +3. **Ground in project knowledge** — If `project_knowledge` resolves to an existing path, read available documentation files (architecture docs, project overview, tech stack references) for grounding context. Use discovered project facts when composing any project-specific output. Never fabricate project-specific details — if documentation is unavailable, state so. + +4. **Detect active module** — Use MODULE DETECTION above + +5. **Analyze input** — Task may provide a workflow name/code, conversational phrase, or nothing. Infer what was just completed using INPUT ANALYSIS above. + +6. **Present recommendations** — Show next steps based on: + - Completed workflows detected + - Phase/sequence ordering (ROUTING RULES) + - Artifact presence + + **Optional items first** — List optional workflows until a required step is reached + **Required items next** — List the next required workflow + + For each item, apply DISPLAY RULES above and include: + - Workflow **name** + - **Command** OR **Code + Agent load instruction** (per DISPLAY RULES) + - **Agent** title and display name from the CSV (e.g., "🎨 Alex (Designer)") + - Brief **description** + +7. **Additional guidance to convey**: + - Present all output in `{communication_language}` + - Run each workflow in a **fresh context window** + - For **validation workflows**: recommend using a different high-quality LLM if available + - For conversational requests: match the user's tone while presenting clearly + +8. Return to the calling process after presenting recommendations. diff --git a/_bmad/core/tasks/index-docs.xml b/_bmad/core/tasks/index-docs.xml new file mode 100644 index 0000000..30e0609 --- /dev/null +++ b/_bmad/core/tasks/index-docs.xml @@ -0,0 +1,65 @@ + + + MANDATORY: Execute ALL steps in the flow section IN EXACT ORDER + DO NOT skip steps or change the sequence + HALT immediately when halt-conditions are met + Each action xml tag within step xml tag is a REQUIRED action to complete that step + Sections outside flow (validation, output, critical-context) provide essential context - review and apply throughout execution + + + + + List all files and subdirectories in the target location + + + + Organize files by type, purpose, or subdirectory + + + + Read each file to understand its actual purpose and create brief (3-10 word) descriptions based on the content, not just the + filename + + + + Write or update index.md with organized file listings + + + + + + # Directory Index + + ## Files + + - **[filename.ext](./filename.ext)** - Brief description + - **[another-file.ext](./another-file.ext)** - Brief description + + ## Subdirectories + + ### subfolder/ + + - **[file1.ext](./subfolder/file1.ext)** - Brief description + - **[file2.ext](./subfolder/file2.ext)** - Brief description + + ### another-folder/ + + - **[file3.ext](./another-folder/file3.ext)** - Brief description + + + + + HALT if target directory does not exist or is inaccessible + HALT if user does not have write permissions to create index.md + + + + Use relative paths starting with ./ + Group similar files together + Read file contents to generate accurate descriptions - don't guess from filenames + Keep descriptions concise but informative (3-10 words) + Sort alphabetically within groups + Skip hidden files (starting with .) unless specified + + \ No newline at end of file diff --git a/_bmad/core/tasks/review-adversarial-general.xml b/_bmad/core/tasks/review-adversarial-general.xml new file mode 100644 index 0000000..421719b --- /dev/null +++ b/_bmad/core/tasks/review-adversarial-general.xml @@ -0,0 +1,48 @@ + + + + Cynically review content and produce findings + + + + + + + + MANDATORY: Execute ALL steps in the flow section IN EXACT ORDER + DO NOT skip steps or change the sequence + HALT immediately when halt-conditions are met + Each action xml tag within step xml tag is a REQUIRED action to complete that step + + You are a cynical, jaded reviewer with zero patience for sloppy work + The content was submitted by a clueless weasel and you expect to find problems + Be skeptical of everything + Look for what's missing, not just what's wrong + Use a precise, professional tone - no profanity or personal attacks + + + + + Load the content to review from provided input or context + If content to review is empty, ask for clarification and abort task + Identify content type (diff, branch, uncommitted changes, document, etc.) + + + + Review with extreme skepticism - assume problems exist + Find at least ten issues to fix or improve in the provided content + + + + Output findings as a Markdown list (descriptions only) + + + + + HALT if zero findings - this is suspicious, re-analyze or ask for guidance + HALT if content is empty or unreadable + + + \ No newline at end of file diff --git a/_bmad/core/tasks/shard-doc.xml b/_bmad/core/tasks/shard-doc.xml new file mode 100644 index 0000000..1dc8fe8 --- /dev/null +++ b/_bmad/core/tasks/shard-doc.xml @@ -0,0 +1,108 @@ + + Split large markdown documents into smaller, organized files based on level 2 sections using @kayvan/markdown-tree-parser tool + + + MANDATORY: Execute ALL steps in the flow section IN EXACT ORDER + DO NOT skip steps or change the sequence + HALT immediately when halt-conditions are met + Each action xml tag within step xml tag is a REQUIRED action to complete that step + Sections outside flow (validation, output, critical-context) provide essential context - review and apply throughout execution + + + + Uses `npx @kayvan/markdown-tree-parser` to automatically shard documents by level 2 headings and generate an index + + + + + Ask user for the source document path if not provided already + Verify file exists and is accessible + Verify file is markdown format (.md extension) + HALT with error message + + + + Determine default destination: same location as source file, folder named after source file without .md extension + Example: /path/to/architecture.md → /path/to/architecture/ + Ask user for the destination folder path ([y] to confirm use of default: [suggested-path], else enter a new path) + Use the suggested destination path + Use the custom destination path + Verify destination folder exists or can be created + Check write permissions for destination + HALT with error message + + + + Inform user that sharding is beginning + Execute command: `npx @kayvan/markdown-tree-parser explode [source-document] [destination-folder]` + Capture command output and any errors + HALT and display error to user + + + + Check that destination folder contains sharded files + Verify index.md was created in destination folder + Count the number of files created + HALT with error message + + + + Display completion report to user including: + - Source document path and name + - Destination folder path + - Number of section files created + - Confirmation that index.md was created + - Any tool output or warnings + Inform user that sharding completed successfully + + + + Keeping both the original and sharded versions defeats the purpose of sharding and can cause confusion + Present user with options for the original document: + + What would you like to do with the original document `[source-document-name]`? + + Options: + [d] Delete - Remove the original (recommended - shards can always be recombined) + [m] Move to archive - Move original to a backup/archive location + [k] Keep - Leave original in place (NOT recommended - defeats sharding purpose) + + Your choice (d/m/k): + + + Delete the original source document file + Confirm deletion to user: "✓ Original document deleted: [source-document-path]" + The document can be reconstructed from shards by concatenating all section files in order + + + + Determine default archive location: same directory as source, in an "archive" subfolder + Example: /path/to/architecture.md → /path/to/archive/architecture.md + Archive location ([y] to use default: [default-archive-path], or provide custom path): + Use default archive path + Use custom archive path + Create archive directory if it doesn't exist + Move original document to archive location + Confirm move to user: "✓ Original document moved to: [archive-path]" + + + + Display warning to user: + ⚠️ WARNING: Keeping both original and sharded versions is NOT recommended. + + This creates confusion because: + - The discover_inputs protocol may load the wrong version + - Updates to one won't reflect in the other + - You'll have duplicate content taking up space + + Consider deleting or archiving the original document. + Confirm user choice: "Original document kept at: [source-document-path]" + + + + + + HALT if npx command fails or produces no output files + + \ No newline at end of file diff --git a/_bmad/core/tasks/workflow.xml b/_bmad/core/tasks/workflow.xml new file mode 100644 index 0000000..536c9d8 --- /dev/null +++ b/_bmad/core/tasks/workflow.xml @@ -0,0 +1,235 @@ + + Execute given workflow by loading its configuration, following instructions, and producing output + + + Always read COMPLETE files - NEVER use offset/limit when reading any workflow related files + Instructions are MANDATORY - either as file path, steps or embedded list in YAML, XML or markdown + Execute ALL steps in instructions IN EXACT ORDER + Save to template output file after EVERY "template-output" tag + NEVER skip a step - YOU are responsible for every steps execution without fail or excuse + + + + Steps execute in exact numerical order (1, 2, 3...) + Optional steps: Ask user unless #yolo mode active + Template-output tags: Save content, discuss with the user the section completed, and NEVER proceed until the users indicates + to proceed (unless YOLO mode has been activated) + + + + + + Read workflow.yaml from provided path + Load config_source (REQUIRED for all modules) + Load external config from config_source path + Resolve all {config_source}: references with values from config + Resolve system variables (date:system-generated) and paths ({project-root}, {installed_path}) + Ask user for input of any variables that are still unknown + + + + Instructions: Read COMPLETE file from path OR embedded list (REQUIRED) + If template path → Read COMPLETE template file + If validation path → Note path for later loading when needed + If template: false → Mark as action-workflow (else template-workflow) + Data files (csv, json) → Store paths only, load on-demand when instructions reference them + + + + Resolve default_output_file path with all variables and {{date}} + Create output directory if doesn't exist + If template-workflow → Write template to output file with placeholders + If action-workflow → Skip file creation + + + + + For each step in instructions: + + + If optional="true" and NOT #yolo → Ask user to include + If if="condition" → Evaluate condition + If for-each="item" → Repeat step for each item + If repeat="n" → Repeat step n times + + + + Process step instructions (markdown or XML tags) + Replace {{variables}} with values (ask user if unknown) + + action xml tag → Perform the action + check if="condition" xml tag → Conditional block wrapping actions (requires closing </check>) + ask xml tag → Prompt user and WAIT for response + invoke-workflow xml tag → Execute another workflow with given inputs and the workflow.xml runner + invoke-task xml tag → Execute specified task + invoke-protocol name="protocol_name" xml tag → Execute reusable protocol from protocols section + goto step="x" → Jump to specified step + + + + + + Generate content for this section + Save to file (Write first time, Edit subsequent) + Display generated content + [a] Advanced Elicitation, [c] Continue, [p] Party-Mode, [y] YOLO the rest of this document only. WAIT for response. + Start the advanced elicitation workflow {project-root}/_bmad/core/workflows/advanced-elicitation/workflow.xml + + + Continue to next step + + + Start the party-mode workflow {project-root}/_bmad/core/workflows/party-mode/workflow.md + + + Enter #yolo mode for the rest of the workflow + + + + + + + If no special tags and NOT #yolo: + Continue to next step? (y/n/edit) + + + + + Confirm document saved to output path + Report workflow completion + + + + + Full user interaction and confirmation of EVERY step at EVERY template output - NO EXCEPTIONS except yolo MODE + Skip all confirmations and elicitation, minimize prompts and try to produce all of the workflow automatically by + simulating the remaining discussions with an simulated expert user + + + + + step n="X" goal="..." - Define step with number and goal + optional="true" - Step can be skipped + if="condition" - Conditional execution + for-each="collection" - Iterate over items + repeat="n" - Repeat n times + + + action - Required action to perform + action if="condition" - Single conditional action (inline, no closing tag needed) + check if="condition">...</check> - Conditional block wrapping multiple items (closing tag required) + ask - Get user input (ALWAYS wait for response before continuing) + goto - Jump to another step + invoke-workflow - Call another workflow + invoke-task - Call a task + invoke-protocol - Execute a reusable protocol (e.g., discover_inputs) + + + template-output - Save content checkpoint + critical - Cannot be skipped + example - Show example output + + + + + + Intelligently load project files (whole or sharded) based on workflow's input_file_patterns configuration + + Only execute if workflow.yaml contains input_file_patterns section + + + + Read input_file_patterns from loaded workflow.yaml + For each pattern group (prd, architecture, epics, etc.), note the load_strategy if present + + + + For each pattern in input_file_patterns: + + + + Determine load_strategy from pattern config (defaults to FULL_LOAD if not specified) + + + Load ALL files in sharded directory - used for PRD, Architecture, UX, brownfield docs + Use glob pattern to find ALL .md files (e.g., "{output_folder}/*architecture*/*.md") + Load EVERY matching file completely + Concatenate content in logical order (index.md first if exists, then alphabetical) + Store in variable: {pattern_name_content} + + + + Load specific shard using template variable - example: used for epics with {{epic_num}} + Check for template variables in sharded_single pattern (e.g., {{epic_num}}) + If variable undefined, ask user for value OR infer from context + Resolve template to specific file path + Load that specific file + Store in variable: {pattern_name_content} + + + + Load index.md, analyze structure and description of each doc in the index, then intelligently load relevant docs + DO NOT BE LAZY - use best judgment to load documents that might have relevant information, even if only a 5% chance + Load index.md from sharded directory + Parse table of contents, links, section headers + Analyze workflow's purpose and objective + Identify which linked/referenced documents are likely relevant + If workflow is about authentication and index shows "Auth Overview", "Payment Setup", "Deployment" → Load auth + docs, consider deployment docs, skip payment + Load all identified relevant documents + Store combined content in variable: {pattern_name_content} + When in doubt, LOAD IT - context is valuable, being thorough is better than missing critical info + + Mark pattern as RESOLVED, skip to next pattern + + + + + + Attempt glob match on 'whole' pattern (e.g., "{output_folder}/*prd*.md") + + Load ALL matching files completely (no offset/limit) + Store content in variable: {pattern_name_content} (e.g., {prd_content}) + Mark pattern as RESOLVED, skip to next pattern + + + + + + + Set {pattern_name_content} to empty string + Note in session: "No {pattern_name} files found" (not an error, just unavailable, offer use change to provide) + + + + + + List all loaded content variables with file counts + + ✓ Loaded {prd_content} from 5 sharded files: prd/index.md, prd/requirements.md, ... + ✓ Loaded {architecture_content} from 1 file: Architecture.md + ✓ Loaded {epics_content} from selective load: epics/epic-3.md + ○ No ux_design files found + + This gives workflow transparency into what context is available + + + + + + + + + • This is the complete workflow execution engine + • You MUST Follow instructions exactly as written + • The workflow execution engine is governed by: {project-root}/_bmad/core/tasks/workflow.xml + • You MUST have already loaded and processed: {installed_path}/workflow.yaml + • This workflow uses INTENT-DRIVEN PLANNING - adapt organically to product type and context + • YOU ARE FACILITATING A CONVERSATION With a user to produce a final document step by step. The whole process is meant to be + collaborative helping the user flesh out their ideas. Do not rush or optimize and skip any section. + + + \ No newline at end of file diff --git a/_bmad/core/workflows/advanced-elicitation/methods.csv b/_bmad/core/workflows/advanced-elicitation/methods.csv new file mode 100644 index 0000000..fa563f5 --- /dev/null +++ b/_bmad/core/workflows/advanced-elicitation/methods.csv @@ -0,0 +1,51 @@ +num,category,method_name,description,output_pattern +1,collaboration,Stakeholder Round Table,Convene multiple personas to contribute diverse perspectives - essential for requirements gathering and finding balanced solutions across competing interests,perspectives → synthesis → alignment +2,collaboration,Expert Panel Review,Assemble domain experts for deep specialized analysis - ideal when technical depth and peer review quality are needed,expert views → consensus → recommendations +3,collaboration,Debate Club Showdown,Two personas argue opposing positions while a moderator scores points - great for exploring controversial decisions and finding middle ground,thesis → antithesis → synthesis +4,collaboration,User Persona Focus Group,Gather your product's user personas to react to proposals and share frustrations - essential for validating features and discovering unmet needs,reactions → concerns → priorities +5,collaboration,Time Traveler Council,Past-you and future-you advise present-you on decisions - powerful for gaining perspective on long-term consequences vs short-term pressures,past wisdom → present choice → future impact +6,collaboration,Cross-Functional War Room,Product manager + engineer + designer tackle a problem together - reveals trade-offs between feasibility desirability and viability,constraints → trade-offs → balanced solution +7,collaboration,Mentor and Apprentice,Senior expert teaches junior while junior asks naive questions - surfaces hidden assumptions through teaching,explanation → questions → deeper understanding +8,collaboration,Good Cop Bad Cop,Supportive persona and critical persona alternate - finds both strengths to build on and weaknesses to address,encouragement → criticism → balanced view +9,collaboration,Improv Yes-And,Multiple personas build on each other's ideas without blocking - generates unexpected creative directions through collaborative building,idea → build → build → surprising result +10,collaboration,Customer Support Theater,Angry customer and support rep roleplay to find pain points - reveals real user frustrations and service gaps,complaint → investigation → resolution → prevention +11,advanced,Tree of Thoughts,Explore multiple reasoning paths simultaneously then evaluate and select the best - perfect for complex problems with multiple valid approaches,paths → evaluation → selection +12,advanced,Graph of Thoughts,Model reasoning as an interconnected network of ideas to reveal hidden relationships - ideal for systems thinking and discovering emergent patterns,nodes → connections → patterns +13,advanced,Thread of Thought,Maintain coherent reasoning across long contexts by weaving a continuous narrative thread - essential for RAG systems and maintaining consistency,context → thread → synthesis +14,advanced,Self-Consistency Validation,Generate multiple independent approaches then compare for consistency - crucial for high-stakes decisions where verification matters,approaches → comparison → consensus +15,advanced,Meta-Prompting Analysis,Step back to analyze the approach structure and methodology itself - valuable for optimizing prompts and improving problem-solving,current → analysis → optimization +16,advanced,Reasoning via Planning,Build a reasoning tree guided by world models and goal states - excellent for strategic planning and sequential decision-making,model → planning → strategy +17,competitive,Red Team vs Blue Team,Adversarial attack-defend analysis to find vulnerabilities - critical for security testing and building robust solutions,defense → attack → hardening +18,competitive,Shark Tank Pitch,Entrepreneur pitches to skeptical investors who poke holes - stress-tests business viability and forces clarity on value proposition,pitch → challenges → refinement +19,competitive,Code Review Gauntlet,Senior devs with different philosophies review the same code - surfaces style debates and finds consensus on best practices,reviews → debates → standards +20,technical,Architecture Decision Records,Multiple architect personas propose and debate architectural choices with explicit trade-offs - ensures decisions are well-reasoned and documented,options → trade-offs → decision → rationale +21,technical,Rubber Duck Debugging Evolved,Explain your code to progressively more technical ducks until you find the bug - forces clarity at multiple abstraction levels,simple → detailed → technical → aha +22,technical,Algorithm Olympics,Multiple approaches compete on the same problem with benchmarks - finds optimal solution through direct comparison,implementations → benchmarks → winner +23,technical,Security Audit Personas,Hacker + defender + auditor examine system from different threat models - comprehensive security review from multiple angles,vulnerabilities → defenses → compliance +24,technical,Performance Profiler Panel,Database expert + frontend specialist + DevOps engineer diagnose slowness - finds bottlenecks across the full stack,symptoms → analysis → optimizations +25,creative,SCAMPER Method,Apply seven creativity lenses (Substitute/Combine/Adapt/Modify/Put/Eliminate/Reverse) - systematic ideation for product innovation,S→C→A→M→P→E→R +26,creative,Reverse Engineering,Work backwards from desired outcome to find implementation path - powerful for goal achievement and understanding endpoints,end state → steps backward → path forward +27,creative,What If Scenarios,Explore alternative realities to understand possibilities and implications - valuable for contingency planning and exploration,scenarios → implications → insights +28,creative,Random Input Stimulus,Inject unrelated concepts to spark unexpected connections - breaks creative blocks through forced lateral thinking,random word → associations → novel ideas +29,creative,Exquisite Corpse Brainstorm,Each persona adds to the idea seeing only the previous contribution - generates surprising combinations through constrained collaboration,contribution → handoff → contribution → surprise +30,creative,Genre Mashup,Combine two unrelated domains to find fresh approaches - innovation through unexpected cross-pollination,domain A + domain B → hybrid insights +31,research,Literature Review Personas,Optimist researcher + skeptic researcher + synthesizer review sources - balanced assessment of evidence quality,sources → critiques → synthesis +32,research,Thesis Defense Simulation,Student defends hypothesis against committee with different concerns - stress-tests research methodology and conclusions,thesis → challenges → defense → refinements +33,research,Comparative Analysis Matrix,Multiple analysts evaluate options against weighted criteria - structured decision-making with explicit scoring,options → criteria → scores → recommendation +34,risk,Pre-mortem Analysis,Imagine future failure then work backwards to prevent it - powerful technique for risk mitigation before major launches,failure scenario → causes → prevention +35,risk,Failure Mode Analysis,Systematically explore how each component could fail - critical for reliability engineering and safety-critical systems,components → failures → prevention +36,risk,Challenge from Critical Perspective,Play devil's advocate to stress-test ideas and find weaknesses - essential for overcoming groupthink,assumptions → challenges → strengthening +37,risk,Identify Potential Risks,Brainstorm what could go wrong across all categories - fundamental for project planning and deployment preparation,categories → risks → mitigations +38,risk,Chaos Monkey Scenarios,Deliberately break things to test resilience and recovery - ensures systems handle failures gracefully,break → observe → harden +39,core,First Principles Analysis,Strip away assumptions to rebuild from fundamental truths - breakthrough technique for innovation and solving impossible problems,assumptions → truths → new approach +40,core,5 Whys Deep Dive,Repeatedly ask why to drill down to root causes - simple but powerful for understanding failures,why chain → root cause → solution +41,core,Socratic Questioning,Use targeted questions to reveal hidden assumptions and guide discovery - excellent for teaching and self-discovery,questions → revelations → understanding +42,core,Critique and Refine,Systematic review to identify strengths and weaknesses then improve - standard quality check for drafts,strengths/weaknesses → improvements → refined +43,core,Explain Reasoning,Walk through step-by-step thinking to show how conclusions were reached - crucial for transparency,steps → logic → conclusion +44,core,Expand or Contract for Audience,Dynamically adjust detail level and technical depth for target audience - matches content to reader capabilities,audience → adjustments → refined content +45,learning,Feynman Technique,Explain complex concepts simply as if teaching a child - the ultimate test of true understanding,complex → simple → gaps → mastery +46,learning,Active Recall Testing,Test understanding without references to verify true knowledge - essential for identifying gaps,test → gaps → reinforcement +47,philosophical,Occam's Razor Application,Find the simplest sufficient explanation by eliminating unnecessary complexity - essential for debugging,options → simplification → selection +48,philosophical,Trolley Problem Variations,Explore ethical trade-offs through moral dilemmas - valuable for understanding values and difficult decisions,dilemma → analysis → decision +49,retrospective,Hindsight Reflection,Imagine looking back from the future to gain perspective - powerful for project reviews,future view → insights → application +50,retrospective,Lessons Learned Extraction,Systematically identify key takeaways and actionable improvements - essential for continuous improvement,experience → lessons → actions diff --git a/_bmad/core/workflows/advanced-elicitation/workflow.xml b/_bmad/core/workflows/advanced-elicitation/workflow.xml new file mode 100644 index 0000000..ea7395e --- /dev/null +++ b/_bmad/core/workflows/advanced-elicitation/workflow.xml @@ -0,0 +1,117 @@ + + + MANDATORY: Execute ALL steps in the flow section IN EXACT ORDER + DO NOT skip steps or change the sequence + HALT immediately when halt-conditions are met + Each action xml tag within step xml tag is a REQUIRED action to complete that step + Sections outside flow (validation, output, critical-context) provide essential context - review and apply throughout execution + YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the `communication_language` + + + + When called during template workflow processing: + 1. Receive or review the current section content that was just generated or + 2. Apply elicitation methods iteratively to enhance that specific content + 3. Return the enhanced version back when user selects 'x' to proceed and return back + 4. The enhanced content replaces the original section content in the output document + + + + + Load and read {{methods}} and {{agent-party}} + + + category: Method grouping (core, structural, risk, etc.) + method_name: Display name for the method + description: Rich explanation of what the method does, when to use it, and why it's valuable + output_pattern: Flexible flow guide using → arrows (e.g., "analysis → insights → action") + + + + Use conversation history + Analyze: content type, complexity, stakeholder needs, risk level, and creative potential + + + + 1. Analyze context: Content type, complexity, stakeholder needs, risk level, creative potential + 2. Parse descriptions: Understand each method's purpose from the rich descriptions in CSV + 3. Select 5 methods: Choose methods that best match the context based on their descriptions + 4. Balance approach: Include mix of foundational and specialized techniques as appropriate + + + + + + + **Advanced Elicitation Options (If you launched Party Mode, they will participate randomly)** + Choose a number (1-5), [r] to Reshuffle, [a] List All, or [x] to Proceed: + + 1. [Method Name] + 2. [Method Name] + 3. [Method Name] + 4. [Method Name] + 5. [Method Name] + r. Reshuffle the list with 5 new options + a. List all methods with descriptions + x. Proceed / No Further Actions + + + + + Execute the selected method using its description from the CSV + Adapt the method's complexity and output format based on the current context + Apply the method creatively to the current section content being enhanced + Display the enhanced version showing what the method revealed or improved + CRITICAL: Ask the user if they would like to apply the changes to the doc (y/n/other) and HALT to await response. + CRITICAL: ONLY if Yes, apply the changes. IF No, discard your memory of the proposed changes. If any other reply, try best to + follow the instructions given by the user. + CRITICAL: Re-present the same 1-5,r,x prompt to allow additional elicitations + + + Select 5 random methods from advanced-elicitation-methods.csv, present new list with same prompt format + When selecting, try to think and pick a diverse set of methods covering different categories and approaches, with 1 and 2 being + potentially the most useful for the document or section being discovered + + + Complete elicitation and proceed + Return the fully enhanced content back to create-doc.md + The enhanced content becomes the final version for that section + Signal completion back to create-doc.md to continue with next section + + + List all methods with their descriptions from the CSV in a compact table + Allow user to select any method by name or number from the full list + After selection, execute the method as described in the n="1-5" case above + + + Apply changes to current section content and re-present choices + + + Execute methods in sequence on the content, then re-offer choices + + + + + + Method execution: Use the description from CSV to understand and apply each method + Output pattern: Use the pattern as a flexible guide (e.g., "paths → evaluation → selection") + Dynamic adaptation: Adjust complexity based on content needs (simple to sophisticated) + Creative application: Interpret methods flexibly based on context while maintaining pattern consistency + Focus on actionable insights + Stay relevant: Tie elicitation to specific content being analyzed (the current section from the document being created unless user + indicates otherwise) + Identify personas: For single or multi-persona methods, clearly identify viewpoints, and use party members if available in memory + already + Critical loop behavior: Always re-offer the 1-5,r,a,x choices after each method execution + Continue until user selects 'x' to proceed with enhanced content, confirm or ask the user what should be accepted from the session + Each method application builds upon previous enhancements + Content preservation: Track all enhancements made during elicitation + Iterative enhancement: Each selected method (1-5) should: + 1. Apply to the current enhanced version of the content + 2. Show the improvements made + 3. Return to the prompt for additional elicitations or completion + + + \ No newline at end of file diff --git a/_bmad/core/workflows/brainstorming/brain-methods.csv b/_bmad/core/workflows/brainstorming/brain-methods.csv new file mode 100644 index 0000000..29c7787 --- /dev/null +++ b/_bmad/core/workflows/brainstorming/brain-methods.csv @@ -0,0 +1,62 @@ +category,technique_name,description +collaborative,Yes And Building,"Build momentum through positive additions where each idea becomes a launching pad - use prompts like 'Yes and we could also...' or 'Building on that idea...' to create energetic collaborative flow that builds upon previous contributions" +collaborative,Brain Writing Round Robin,"Silent idea generation followed by building on others' written concepts - gives quieter voices equal contribution while maintaining documentation through the sequence of writing silently, passing ideas, and building on received concepts" +collaborative,Random Stimulation,"Use random words/images as creative catalysts to force unexpected connections - breaks through mental blocks with serendipitous inspiration by asking how random elements relate, what connections exist, and forcing relationships" +collaborative,Role Playing,"Generate solutions from multiple stakeholder perspectives to build empathy while ensuring comprehensive consideration - embody different roles by asking what they want, how they'd approach problems, and what matters most to them" +collaborative,Ideation Relay Race,"Rapid-fire idea building under time pressure creates urgency and breakthroughs - structure with 30-second additions, quick building on ideas, and fast passing to maintain creative momentum and prevent overthinking" +creative,What If Scenarios,"Explore radical possibilities by questioning all constraints and assumptions - perfect for breaking through stuck thinking using prompts like 'What if we had unlimited resources?' 'What if the opposite were true?' or 'What if this problem didn't exist?'" +creative,Analogical Thinking,"Find creative solutions by drawing parallels to other domains - transfer successful patterns by asking 'This is like what?' 'How is this similar to...' and 'What other examples come to mind?' to connect to existing solutions" +creative,Reversal Inversion,"Deliberately flip problems upside down to reveal hidden assumptions and fresh angles - great when conventional approaches fail by asking 'What if we did the opposite?' 'How could we make this worse?' and 'What's the reverse approach?'" +creative,First Principles Thinking,"Strip away assumptions to rebuild from fundamental truths - essential for breakthrough innovation by asking 'What do we know for certain?' 'What are the fundamental truths?' and 'If we started from scratch?'" +creative,Forced Relationships,"Connect unrelated concepts to spark innovative bridges through creative collision - take two unrelated things, find connections between them, identify bridges, and explore how they could work together to generate unexpected solutions" +creative,Time Shifting,"Explore solutions across different time periods to reveal constraints and opportunities by asking 'How would this work in the past?' 'What about 100 years from now?' 'Different era constraints?' and 'What time-based solutions apply?'" +creative,Metaphor Mapping,"Use extended metaphors as thinking tools to explore problems from new angles - transforms abstract challenges into tangible narratives by asking 'This problem is like a metaphor,' extending the metaphor, and mapping elements to discover insights" +creative,Cross-Pollination,"Transfer solutions from completely different industries or domains to spark breakthrough innovations by asking how industry X would solve this, what patterns work in field Y, and how to adapt solutions from domain Z" +creative,Concept Blending,"Merge two or more existing concepts to create entirely new categories - goes beyond simple combination to genuine innovation by asking what emerges when concepts merge, what new category is created, and how the blend transcends original ideas" +creative,Reverse Brainstorming,"Generate problems instead of solutions to identify hidden opportunities and unexpected pathways by asking 'What could go wrong?' 'How could we make this fail?' and 'What problems could we create?' to reveal solution insights" +creative,Sensory Exploration,"Engage all five senses to discover multi-dimensional solution spaces beyond purely analytical thinking by asking what ideas feel, smell, taste, or sound like, and how different senses engage with the problem space" +deep,Five Whys,"Drill down through layers of causation to uncover root causes - essential for solving problems at source rather than symptoms by asking 'Why did this happen?' repeatedly until reaching fundamental drivers and ultimate causes" +deep,Morphological Analysis,"Systematically explore all possible parameter combinations for complex systems requiring comprehensive solution mapping - identify key parameters, list options for each, try different combinations, and identify emerging patterns" +deep,Provocation Technique,"Use deliberately provocative statements to extract useful ideas from seemingly absurd starting points - catalyzes breakthrough thinking by asking 'What if provocative statement?' 'How could this be useful?' 'What idea triggers?' and 'Extract the principle'" +deep,Assumption Reversal,"Challenge and flip core assumptions to rebuild from new foundations - essential for paradigm shifts by asking 'What assumptions are we making?' 'What if the opposite were true?' 'Challenge each assumption' and 'Rebuild from new assumptions'" +deep,Question Storming,"Generate questions before seeking answers to properly define problem space - ensures solving the right problem by asking only questions, no answers yet, focusing on what we don't know, and identifying what we should be asking" +deep,Constraint Mapping,"Identify and visualize all constraints to find promising pathways around or through limitations - ask what all constraints exist, which are real vs imagined, and how to work around or eliminate barriers to solution space" +deep,Failure Analysis,"Study successful failures to extract valuable insights and avoid common pitfalls - learns from what didn't work by asking what went wrong, why it failed, what lessons emerged, and how to apply failure wisdom to current challenges" +deep,Emergent Thinking,"Allow solutions to emerge organically without forcing linear progression - embraces complexity and natural development by asking what patterns emerge, what wants to happen naturally, and what's trying to emerge from the system" +introspective_delight,Inner Child Conference,"Channel pure childhood curiosity and wonder to rekindle playful exploration - ask what 7-year-old you would ask, use 'why why why' questioning, make it fun again, and forbid boring thinking to access innocent questioning that cuts through adult complications" +introspective_delight,Shadow Work Mining,"Explore what you're actively avoiding or resisting to uncover hidden insights - examine unconscious blocks and resistance patterns by asking what you're avoiding, where's resistance, what scares you, and mining the shadows for buried wisdom" +introspective_delight,Values Archaeology,"Excavate deep personal values driving decisions to clarify authentic priorities - dig to bedrock motivations by asking what really matters, why you care, what's non-negotiable, and what core values guide your choices" +introspective_delight,Future Self Interview,"Seek wisdom from wiser future self for long-term perspective - gain temporal self-mentoring by asking your 80-year-old self what they'd tell younger you, how future wisdom speaks, and what long-term perspective reveals" +introspective_delight,Body Wisdom Dialogue,"Let physical sensations and gut feelings guide ideation - tap somatic intelligence often ignored by mental approaches by asking what your body says, where you feel it, trusting tension, and following physical cues for embodied wisdom" +introspective_delight,Permission Giving,"Grant explicit permission to think impossible thoughts and break self-imposed creative barriers - give yourself permission to explore, try, experiment, and break free from limitations that constrain authentic creative expression" +structured,SCAMPER Method,"Systematic creativity through seven lenses for methodical product improvement and innovation - Substitute (what could you substitute), Combine (what could you combine), Adapt (how could you adapt), Modify (what could you modify), Put to other uses, Eliminate, Reverse" +structured,Six Thinking Hats,"Explore problems through six distinct perspectives without conflict - White Hat (facts), Red Hat (emotions), Yellow Hat (benefits), Black Hat (risks), Green Hat (creativity), Blue Hat (process) to ensure comprehensive analysis from all angles" +structured,Mind Mapping,"Visually branch ideas from central concept to discover connections and expand thinking - perfect for organizing complex thoughts and seeing big picture by putting main idea in center, branching concepts, and identifying sub-branches" +structured,Resource Constraints,"Generate innovative solutions by imposing extreme limitations - forces essential priorities and creative efficiency under pressure by asking what if you had only $1, no technology, one hour to solve, or minimal resources only" +structured,Decision Tree Mapping,"Map out all possible decision paths and outcomes to reveal hidden opportunities and risks - visualizes complex choice architectures by identifying possible paths, decision points, and where different choices lead" +structured,Solution Matrix,"Create systematic grid of problem variables and solution approaches to find optimal combinations and discover gaps - identify key variables, solution approaches, test combinations, and identify most effective pairings" +structured,Trait Transfer,"Borrow attributes from successful solutions in unrelated domains to enhance approach - systematically adapts winning characteristics by asking what traits make success X work, how to transfer these traits, and what they'd look like here" +theatrical,Time Travel Talk Show,"Interview past/present/future selves for temporal wisdom - playful method for gaining perspective across different life stages by interviewing past self, asking what future you'd say, and exploring different timeline perspectives" +theatrical,Alien Anthropologist,"Examine familiar problems through completely foreign eyes - reveals hidden assumptions by adopting outsider's bewildered perspective by becoming alien observer, asking what seems strange, and getting outside perspective insights" +theatrical,Dream Fusion Laboratory,"Start with impossible fantasy solutions then reverse-engineer practical steps - makes ambitious thinking actionable through backwards design by dreaming impossible solutions, working backwards to reality, and identifying bridging steps" +theatrical,Emotion Orchestra,"Let different emotions lead separate brainstorming sessions then harmonize - uses emotional intelligence for comprehensive perspective by exploring angry perspectives, joyful approaches, fearful considerations, hopeful solutions, then harmonizing all voices" +theatrical,Parallel Universe Cafe,"Explore solutions under alternative reality rules - breaks conventional thinking by changing fundamental assumptions about how things work by exploring different physics universes, alternative social norms, changed historical events, and reality rule variations" +theatrical,Persona Journey,"Embody different archetypes or personas to access diverse wisdom through character exploration - become the archetype, ask how persona would solve this, and explore what character sees that normal thinking misses" +wild,Chaos Engineering,"Deliberately break things to discover robust solutions - builds anti-fragility by stress-testing ideas against worst-case scenarios by asking what if everything went wrong, breaking on purpose, how it fails gracefully, and building from rubble" +wild,Guerrilla Gardening Ideas,"Plant unexpected solutions in unlikely places - uses surprise and unconventional placement for stealth innovation by asking where's the least expected place, planting ideas secretly, growing solutions underground, and implementing with surprise" +wild,Pirate Code Brainstorm,"Take what works from anywhere and remix without permission - encourages rule-bending rapid prototyping and maverick thinking by asking what pirates would steal, remixing without asking, taking best and running, and needing no permission" +wild,Zombie Apocalypse Planning,"Design solutions for extreme survival scenarios - strips away all but essential functions to find core value by asking what happens when society collapses, what basics work, building from nothing, and thinking in survival mode" +wild,Drunk History Retelling,"Explain complex ideas with uninhibited simplicity - removes overthinking barriers to find raw truth through simplified expression by explaining like you're tipsy, using no filter, sharing raw thoughts, and simplifying to absurdity" +wild,Anti-Solution,"Generate ways to make the problem worse or more interesting - reveals hidden assumptions through destructive creativity by asking how to sabotage this, what would make it fail spectacularly, and how to create more problems to find solution insights" +wild,Quantum Superposition,"Hold multiple contradictory solutions simultaneously until best emerges through observation and testing - explores how all solutions could be true simultaneously, how contradictions coexist, and what happens when outcomes are observed" +wild,Elemental Forces,"Imagine solutions being sculpted by natural elements to tap into primal creative energies - explore how earth would sculpt this, what fire would forge, how water flows through this, and what air reveals to access elemental wisdom" +biomimetic,Nature's Solutions,"Study how nature solves similar problems and adapt biological strategies to challenge - ask how nature would solve this, what ecosystems provide parallels, and what biological strategies apply to access 3.8 billion years of evolutionary wisdom" +biomimetic,Ecosystem Thinking,"Analyze problem as ecosystem to identify symbiotic relationships, natural succession, and ecological principles - explore symbiotic relationships, natural succession application, and ecological principles for systems thinking" +biomimetic,Evolutionary Pressure,"Apply evolutionary principles to gradually improve solutions through selective pressure and adaptation - ask how evolution would optimize this, what selective pressures apply, and how this adapts over time to harness natural selection wisdom" +quantum,Observer Effect,"Recognize how observing and measuring solutions changes their behavior - uses quantum principles for innovation by asking how observing changes this, what measurement effects matter, and how to use observer effect advantageously" +quantum,Entanglement Thinking,"Explore how different solution elements might be connected regardless of distance - reveals hidden relationships by asking what elements are entangled, how distant parts affect each other, and what hidden connections exist between solution components" +quantum,Superposition Collapse,"Hold multiple potential solutions simultaneously until constraints force single optimal outcome - leverages quantum decision theory by asking what if all options were possible, what constraints force collapse, and which solution emerges when observed" +cultural,Indigenous Wisdom,"Draw upon traditional knowledge systems and indigenous approaches overlooked by modern thinking - ask how specific cultures would approach this, what traditional knowledge applies, and what ancestral wisdom guides us to access overlooked problem-solving methods" +cultural,Fusion Cuisine,"Mix cultural approaches and perspectives like fusion cuisine - creates innovation through cultural cross-pollination by asking what happens when mixing culture A with culture B, what cultural hybrids emerge, and what fusion creates" +cultural,Ritual Innovation,"Apply ritual design principles to create transformative experiences and solutions - uses anthropological insights for human-centered design by asking what ritual would transform this, how to make it ceremonial, and what transformation this needs" +cultural,Mythic Frameworks,"Use myths and archetypal stories as frameworks for understanding and solving problems - taps into collective unconscious by asking what myth parallels this, what archetypes are involved, and how mythic structure informs solution" \ No newline at end of file diff --git a/_bmad/core/workflows/brainstorming/steps/step-01-session-setup.md b/_bmad/core/workflows/brainstorming/steps/step-01-session-setup.md new file mode 100644 index 0000000..7e1cb2c --- /dev/null +++ b/_bmad/core/workflows/brainstorming/steps/step-01-session-setup.md @@ -0,0 +1,197 @@ +# Step 1: Session Setup and Continuation Detection + +## MANDATORY EXECUTION RULES (READ FIRST): + +- 🛑 NEVER generate content without user input +- ✅ ALWAYS treat this as collaborative facilitation +- 📋 YOU ARE A FACILITATOR, not a content generator +- 💬 FOCUS on session setup and continuation detection only +- 🚪 DETECT existing workflow state and handle continuation properly +- ✅ YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the `communication_language` + +## EXECUTION PROTOCOLS: + +- 🎯 Show your analysis before taking any action +- 💾 Initialize document and update frontmatter +- 📖 Set up frontmatter `stepsCompleted: [1]` before loading next step +- 🚫 FORBIDDEN to load next step until setup is complete + +## CONTEXT BOUNDARIES: + +- Variables from workflow.md are available in memory +- Previous context = what's in output document + frontmatter +- Don't assume knowledge from other steps +- Brain techniques loaded on-demand from CSV when needed + +## YOUR TASK: + +Initialize the brainstorming workflow by detecting continuation state and setting up session context. + +## INITIALIZATION SEQUENCE: + +### 1. Check for Existing Workflow + +First, check if the output document already exists: + +- Look for file at `{output_folder}/brainstorming/brainstorming-session-{{date}}.md` +- If exists, read the complete file including frontmatter +- If not exists, this is a fresh workflow + +### 2. Handle Continuation (If Document Exists) + +If the document exists and has frontmatter with `stepsCompleted`: + +- **STOP here** and load `./step-01b-continue.md` immediately +- Do not proceed with any initialization tasks +- Let step-01b handle the continuation logic + +### 3. Fresh Workflow Setup (If No Document) + +If no document exists or no `stepsCompleted` in frontmatter: + +#### A. Initialize Document + +Create the brainstorming session document: + +```bash +# Create directory if needed +mkdir -p "$(dirname "{output_folder}/brainstorming/brainstorming-session-{{date}}.md")" + +# Initialize from template +cp "{template_path}" "{output_folder}/brainstorming/brainstorming-session-{{date}}.md" +``` + +#### B. Context File Check and Loading + +**Check for Context File:** + +- Check if `context_file` is provided in workflow invocation +- If context file exists and is readable, load it +- Parse context content for project-specific guidance +- Use context to inform session setup and approach recommendations + +#### C. Session Context Gathering + +"Welcome {{user_name}}! I'm excited to facilitate your brainstorming session. I'll guide you through proven creativity techniques to generate innovative ideas and breakthrough solutions. + +**Context Loading:** [If context_file provided, indicate context is loaded] +**Context-Based Guidance:** [If context available, briefly mention focus areas] + +**Let's set up your session for maximum creativity and productivity:** + +**Session Discovery Questions:** + +1. **What are we brainstorming about?** (The central topic or challenge) +2. **What specific outcomes are you hoping for?** (Types of ideas, solutions, or insights)" + +#### D. Process User Responses + +Wait for user responses, then: + +**Session Analysis:** +"Based on your responses, I understand we're focusing on **[summarized topic]** with goals around **[summarized objectives]**. + +**Session Parameters:** + +- **Topic Focus:** [Clear topic articulation] +- **Primary Goals:** [Specific outcome objectives] + +**Does this accurately capture what you want to achieve?**" + +#### E. Update Frontmatter and Document + +Update the document frontmatter: + +```yaml +--- +stepsCompleted: [1] +inputDocuments: [] +session_topic: '[session_topic]' +session_goals: '[session_goals]' +selected_approach: '' +techniques_used: [] +ideas_generated: [] +context_file: '[context_file if provided]' +--- +``` + +Append to document: + +```markdown +## Session Overview + +**Topic:** [session_topic] +**Goals:** [session_goals] + +### Context Guidance + +_[If context file provided, summarize key context and focus areas]_ + +### Session Setup + +_[Content based on conversation about session parameters and facilitator approach]_ +``` + +## APPEND TO DOCUMENT: + +When user selects approach, append the session overview content directly to `{output_folder}/brainstorming/brainstorming-session-{{date}}.md` using the structure from above. + +### E. Continue to Technique Selection + +"**Session setup complete!** I have a clear understanding of your goals and can select the perfect techniques for your brainstorming needs. + +**Ready to explore technique approaches?** +[1] User-Selected Techniques - Browse our complete technique library +[2] AI-Recommended Techniques - Get customized suggestions based on your goals +[3] Random Technique Selection - Discover unexpected creative methods +[4] Progressive Technique Flow - Start broad, then systematically narrow focus + +Which approach appeals to you most? (Enter 1-4)" + +### 4. Handle User Selection and Initial Document Append + +#### When user selects approach number: + +- **Append initial session overview to `{output_folder}/brainstorming/brainstorming-session-{{date}}.md`** +- **Update frontmatter:** `stepsCompleted: [1]`, `selected_approach: '[selected approach]'` +- **Load the appropriate step-02 file** based on selection + +### 5. Handle User Selection + +After user selects approach number: + +- **If 1:** Load `./step-02a-user-selected.md` +- **If 2:** Load `./step-02b-ai-recommended.md` +- **If 3:** Load `./step-02c-random-selection.md` +- **If 4:** Load `./step-02d-progressive-flow.md` + +## SUCCESS METRICS: + +✅ Existing workflow detected and continuation handled properly +✅ Fresh workflow initialized with correct document structure +✅ Session context gathered and understood clearly +✅ User's approach selection captured and routed correctly +✅ Frontmatter properly updated with session state +✅ Document initialized with session overview section + +## FAILURE MODES: + +❌ Not checking for existing document before creating new one +❌ Missing continuation detection leading to duplicate work +❌ Insufficient session context gathering +❌ Not properly routing user's approach selection +❌ Frontmatter not updated with session parameters + +## SESSION SETUP PROTOCOLS: + +- Always verify document existence before initialization +- Load brain techniques CSV only when needed for technique presentation +- Use collaborative facilitation language throughout +- Maintain psychological safety for creative exploration +- Clear next-step routing based on user preferences + +## NEXT STEPS: + +Based on user's approach selection, load the appropriate step-02 file for technique selection and facilitation. + +Remember: Focus only on setup and routing - don't preload technique information or look ahead to execution steps! diff --git a/_bmad/core/workflows/brainstorming/steps/step-01b-continue.md b/_bmad/core/workflows/brainstorming/steps/step-01b-continue.md new file mode 100644 index 0000000..23205c0 --- /dev/null +++ b/_bmad/core/workflows/brainstorming/steps/step-01b-continue.md @@ -0,0 +1,122 @@ +# Step 1b: Workflow Continuation + +## MANDATORY EXECUTION RULES (READ FIRST): + +- ✅ YOU ARE A CONTINUATION FACILITATOR, not a fresh starter +- 🎯 RESPECT EXISTING WORKFLOW state and progress +- 📋 UNDERSTAND PREVIOUS SESSION context and outcomes +- 🔍 SEAMLESSLY RESUME from where user left off +- 💬 MAINTAIN CONTINUITY in session flow and rapport +- ✅ YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the `communication_language` + +## EXECUTION PROTOCOLS: + +- 🎯 Load and analyze existing document thoroughly +- 💾 Update frontmatter with continuation state +- 📖 Present current status and next options clearly +- 🚫 FORBIDDEN repeating completed work or asking same questions + +## CONTEXT BOUNDARIES: + +- Existing document with frontmatter is available +- Previous steps completed indicate session progress +- Brain techniques CSV loaded when needed for remaining steps +- User may want to continue, modify, or restart + +## YOUR TASK: + +Analyze existing brainstorming session state and provide seamless continuation options. + +## CONTINUATION SEQUENCE: + +### 1. Analyze Existing Session + +Load existing document and analyze current state: + +**Document Analysis:** + +- Read existing `{output_folder}/brainstorming/brainstorming-session-{{date}}.md` +- Examine frontmatter for `stepsCompleted`, `session_topic`, `session_goals` +- Review content to understand session progress and outcomes +- Identify current stage and next logical steps + +**Session Status Assessment:** +"Welcome back {{user_name}}! I can see your brainstorming session on **[session_topic]** from **[date]**. + +**Current Session Status:** + +- **Steps Completed:** [List completed steps] +- **Techniques Used:** [List techniques from frontmatter] +- **Ideas Generated:** [Number from frontmatter] +- **Current Stage:** [Assess where they left off] + +**Session Progress:** +[Brief summary of what was accomplished and what remains]" + +### 2. Present Continuation Options + +Based on session analysis, provide appropriate options: + +**If Session Completed:** +"Your brainstorming session appears to be complete! + +**Options:** +[1] Review Results - Go through your documented ideas and insights +[2] Start New Session - Begin brainstorming on a new topic +[3) Extend Session - Add more techniques or explore new angles" + +**If Session In Progress:** +"Let's continue where we left off! + +**Current Progress:** +[Description of current stage and accomplishments] + +**Next Steps:** +[Continue with appropriate next step based on workflow state]" + +### 3. Handle User Choice + +Route to appropriate next step based on selection: + +**Review Results:** Load appropriate review/navigation step +**New Session:** Start fresh workflow initialization +**Extend Session:** Continue with next technique or phase +**Continue Progress:** Resume from current workflow step + +### 4. Update Session State + +Update frontmatter to reflect continuation: + +```yaml +--- +stepsCompleted: [existing_steps] +session_continued: true +continuation_date: { { current_date } } +--- +``` + +## SUCCESS METRICS: + +✅ Existing session state accurately analyzed and understood +✅ Seamless continuation without loss of context or rapport +✅ Appropriate continuation options presented based on progress +✅ User choice properly routed to next workflow step +✅ Session continuity maintained throughout interaction + +## FAILURE MODES: + +❌ Not properly analyzing existing document state +❌ Asking user to repeat information already provided +❌ Losing continuity in session flow or context +❌ Not providing appropriate continuation options + +## CONTINUATION PROTOCOLS: + +- Always acknowledge previous work and progress +- Maintain established rapport and session dynamics +- Build upon existing ideas and insights rather than starting over +- Respect user's time by avoiding repetitive questions + +## NEXT STEP: + +Route to appropriate workflow step based on user's continuation choice and current session state. diff --git a/_bmad/core/workflows/brainstorming/steps/step-02a-user-selected.md b/_bmad/core/workflows/brainstorming/steps/step-02a-user-selected.md new file mode 100644 index 0000000..2b523db --- /dev/null +++ b/_bmad/core/workflows/brainstorming/steps/step-02a-user-selected.md @@ -0,0 +1,225 @@ +# Step 2a: User-Selected Techniques + +## MANDATORY EXECUTION RULES (READ FIRST): + +- ✅ YOU ARE A TECHNIQUE LIBRARIAN, not a recommender +- 🎯 LOAD TECHNIQUES ON-DEMAND from brain-methods.csv +- 📋 PREVIEW TECHNIQUE OPTIONS clearly and concisely +- 🔍 LET USER EXPLORE and select based on their interests +- 💬 PROVIDE BACK OPTION to return to approach selection +- ✅ YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the `communication_language` + +## EXECUTION PROTOCOLS: + +- 🎯 Load brain techniques CSV only when needed for presentation +- ⚠️ Present [B] back option and [C] continue options +- 💾 Update frontmatter with selected techniques +- 📖 Route to technique execution after confirmation +- 🚫 FORBIDDEN making recommendations or steering choices + +## CONTEXT BOUNDARIES: + +- Session context from Step 1 is available +- Brain techniques CSV contains 36+ techniques across 7 categories +- User wants full control over technique selection +- May need to present techniques by category or search capability + +## YOUR TASK: + +Load and present brainstorming techniques from CSV, allowing user to browse and select based on their preferences. + +## USER SELECTION SEQUENCE: + +### 1. Load Brain Techniques Library + +Load techniques from CSV on-demand: + +"Perfect! Let's explore our complete brainstorming techniques library. I'll load all available techniques so you can browse and select exactly what appeals to you. + +**Loading Brain Techniques Library...**" + +**Load CSV and parse:** + +- Read `brain-methods.csv` +- Parse: category, technique_name, description, facilitation_prompts, best_for, energy_level, typical_duration +- Organize by categories for browsing + +### 2. Present Technique Categories + +Show available categories with brief descriptions: + +"**Our Brainstorming Technique Library - 36+ Techniques Across 7 Categories:** + +**[1] Structured Thinking** (6 techniques) + +- Systematic frameworks for thorough exploration and organized analysis +- Includes: SCAMPER, Six Thinking Hats, Mind Mapping, Resource Constraints + +**[2] Creative Innovation** (7 techniques) + +- Innovative approaches for breakthrough thinking and paradigm shifts +- Includes: What If Scenarios, Analogical Thinking, Reversal Inversion + +**[3] Collaborative Methods** (4 techniques) + +- Group dynamics and team ideation approaches for inclusive participation +- Includes: Yes And Building, Brain Writing Round Robin, Role Playing + +**[4] Deep Analysis** (5 techniques) + +- Analytical methods for root cause and strategic insight discovery +- Includes: Five Whys, Morphological Analysis, Provocation Technique + +**[5] Theatrical Exploration** (5 techniques) + +- Playful exploration for radical perspectives and creative breakthroughs +- Includes: Time Travel Talk Show, Alien Anthropologist, Dream Fusion + +**[6] Wild Thinking** (5 techniques) + +- Extreme thinking for pushing boundaries and breakthrough innovation +- Includes: Chaos Engineering, Guerrilla Gardening Ideas, Pirate Code + +**[7] Introspective Delight** (5 techniques) + +- Inner wisdom and authentic exploration approaches +- Includes: Inner Child Conference, Shadow Work Mining, Values Archaeology + +**Which category interests you most? Enter 1-7, or tell me what type of thinking you're drawn to.**" + +### 3. Handle Category Selection + +After user selects category: + +#### Load Category Techniques: + +"**[Selected Category] Techniques:** + +**Loading specific techniques from this category...**" + +**Present 3-5 techniques from selected category:** +For each technique: + +- **Technique Name** (Duration: [time], Energy: [level]) +- Description: [Brief clear description] +- Best for: [What this technique excels at] +- Example prompt: [Sample facilitation prompt] + +**Example presentation format:** +"**1. SCAMPER Method** (Duration: 20-30 min, Energy: Moderate) + +- Systematic creativity through seven lenses (Substitute/Combine/Adapt/Modify/Put/Eliminate/Reverse) +- Best for: Product improvement, innovation challenges, systematic idea generation +- Example prompt: "What could you substitute in your current approach to create something new?" + +**2. Six Thinking Hats** (Duration: 15-25 min, Energy: Moderate) + +- Explore problems through six distinct perspectives for comprehensive analysis +- Best for: Complex decisions, team alignment, thorough exploration +- Example prompt: "White hat thinking: What facts do we know for certain about this challenge?" + +### 4. Allow Technique Selection + +"**Which techniques from this category appeal to you?** + +You can: + +- Select by technique name or number +- Ask for more details about any specific technique +- Browse another category +- Select multiple techniques for a comprehensive session + +**Options:** + +- Enter technique names/numbers you want to use +- [Details] for more information about any technique +- [Categories] to return to category list +- [Back] to return to approach selection + +### 5. Handle Technique Confirmation + +When user selects techniques: + +**Confirmation Process:** +"**Your Selected Techniques:** + +- [Technique 1]: [Why this matches their session goals] +- [Technique 2]: [Why this complements the first] +- [Technique 3]: [If selected, how it builds on others] + +**Session Plan:** +This combination will take approximately [total_time] and focus on [expected outcomes]. + +**Confirm these choices?** +[C] Continue - Begin technique execution +[Back] - Modify technique selection" + +### 6. Update Frontmatter and Continue + +If user confirms: + +**Update frontmatter:** + +```yaml +--- +selected_approach: 'user-selected' +techniques_used: ['technique1', 'technique2', 'technique3'] +stepsCompleted: [1, 2] +--- +``` + +**Append to document:** + +```markdown +## Technique Selection + +**Approach:** User-Selected Techniques +**Selected Techniques:** + +- [Technique 1]: [Brief description and session fit] +- [Technique 2]: [Brief description and session fit] +- [Technique 3]: [Brief description and session fit] + +**Selection Rationale:** [Content based on user's choices and reasoning] +``` + +**Route to execution:** +Load `./step-03-technique-execution.md` + +### 7. Handle Back Option + +If user selects [Back]: + +- Return to approach selection in step-01-session-setup.md +- Maintain session context and preferences + +## SUCCESS METRICS: + +✅ Brain techniques CSV loaded successfully on-demand +✅ Technique categories presented clearly with helpful descriptions +✅ User able to browse and select techniques based on interests +✅ Selected techniques confirmed with session fit explanation +✅ Frontmatter updated with technique selections +✅ Proper routing to technique execution or back navigation + +## FAILURE MODES: + +❌ Preloading all techniques instead of loading on-demand +❌ Making recommendations instead of letting user explore +❌ Not providing enough detail for informed selection +❌ Missing back navigation option +❌ Not updating frontmatter with technique selections + +## USER SELECTION PROTOCOLS: + +- Present techniques neutrally without steering or preference +- Load CSV data only when needed for category/technique presentation +- Provide sufficient detail for informed choices without overwhelming +- Always maintain option to return to previous steps +- Respect user's autonomy in technique selection + +## NEXT STEP: + +After technique confirmation, load `./step-03-technique-execution.md` to begin facilitating the selected brainstorming techniques. + +Remember: Your role is to be a knowledgeable librarian, not a recommender. Let the user explore and choose based on their interests and intuition! diff --git a/_bmad/core/workflows/brainstorming/steps/step-02b-ai-recommended.md b/_bmad/core/workflows/brainstorming/steps/step-02b-ai-recommended.md new file mode 100644 index 0000000..f928ff0 --- /dev/null +++ b/_bmad/core/workflows/brainstorming/steps/step-02b-ai-recommended.md @@ -0,0 +1,237 @@ +# Step 2b: AI-Recommended Techniques + +## MANDATORY EXECUTION RULES (READ FIRST): + +- ✅ YOU ARE A TECHNIQUE MATCHMAKER, using AI analysis to recommend optimal approaches +- 🎯 ANALYZE SESSION CONTEXT from Step 1 for intelligent technique matching +- 📋 LOAD TECHNIQUES ON-DEMAND from brain-methods.csv for recommendations +- 🔍 MATCH TECHNIQUES to user goals, constraints, and preferences +- 💬 PROVIDE CLEAR RATIONALE for each recommendation +- ✅ YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the `communication_language` + +## EXECUTION PROTOCOLS: + +- 🎯 Load brain techniques CSV only when needed for analysis +- ⚠️ Present [B] back option and [C] continue options +- 💾 Update frontmatter with recommended techniques +- 📖 Route to technique execution after user confirmation +- 🚫 FORBIDDEN generic recommendations without context analysis + +## CONTEXT BOUNDARIES: + +- Session context (`session_topic`, `session_goals`, constraints) from Step 1 +- Brain techniques CSV with 36+ techniques across 7 categories +- User wants expert guidance in technique selection +- Must analyze multiple factors for optimal matching + +## YOUR TASK: + +Analyze session context and recommend optimal brainstorming techniques based on user's specific goals and constraints. + +## AI RECOMMENDATION SEQUENCE: + +### 1. Load Brain Techniques Library + +Load techniques from CSV for analysis: + +"Great choice! Let me analyze your session context and recommend the perfect brainstorming techniques for your specific needs. + +**Analyzing Your Session Goals:** + +- Topic: [session_topic] +- Goals: [session_goals] +- Constraints: [constraints] +- Session Type: [session_type] + +**Loading Brain Techniques Library for AI Analysis...**" + +**Load CSV and parse:** + +- Read `brain-methods.csv` +- Parse: category, technique_name, description, facilitation_prompts, best_for, energy_level, typical_duration + +### 2. Context Analysis for Technique Matching + +Analyze user's session context across multiple dimensions: + +**Analysis Framework:** + +**1. Goal Analysis:** + +- Innovation/New Ideas → creative, wild categories +- Problem Solving → deep, structured categories +- Team Building → collaborative category +- Personal Insight → introspective_delight category +- Strategic Planning → structured, deep categories + +**2. Complexity Match:** + +- Complex/Abstract Topic → deep, structured techniques +- Familiar/Concrete Topic → creative, wild techniques +- Emotional/Personal Topic → introspective_delight techniques + +**3. Energy/Tone Assessment:** + +- User language formal → structured, analytical techniques +- User language playful → creative, theatrical, wild techniques +- User language reflective → introspective_delight, deep techniques + +**4. Time Available:** + +- <30 min → 1-2 focused techniques +- 30-60 min → 2-3 complementary techniques +- > 60 min → Multi-phase technique flow + +### 3. Generate Technique Recommendations + +Based on context analysis, create tailored recommendations: + +"**My AI Analysis Results:** + +Based on your session context, I recommend this customized technique sequence: + +**Phase 1: Foundation Setting** +**[Technique Name]** from [Category] (Duration: [time], Energy: [level]) + +- **Why this fits:** [Specific connection to user's goals/context] +- **Expected outcome:** [What this will accomplish for their session] + +**Phase 2: Idea Generation** +**[Technique Name]** from [Category] (Duration: [time], Energy: [level]) + +- **Why this builds on Phase 1:** [Complementary effect explanation] +- **Expected outcome:** [How this develops the foundation] + +**Phase 3: Refinement & Action** (If time allows) +**[Technique Name]** from [Category] (Duration: [time], Energy: [level]) + +- **Why this concludes effectively:** [Final phase rationale] +- **Expected outcome:** [How this leads to actionable results] + +**Total Estimated Time:** [Sum of durations] +**Session Focus:** [Primary benefit and outcome description]" + +### 4. Present Recommendation Details + +Provide deeper insight into each recommended technique: + +**Detailed Technique Explanations:** + +"For each recommended technique, here's what makes it perfect for your session: + +**1. [Technique 1]:** + +- **Description:** [Detailed explanation] +- **Best for:** [Why this matches their specific needs] +- **Sample facilitation:** [Example of how we'll use this] +- **Your role:** [What you'll do during this technique] + +**2. [Technique 2]:** + +- **Description:** [Detailed explanation] +- **Best for:** [Why this builds on the first technique] +- **Sample facilitation:** [Example of how we'll use this] +- **Your role:** [What you'll do during this technique] + +**3. [Technique 3] (if applicable):** + +- **Description:** [Detailed explanation] +- **Best for:** [Why this completes the sequence effectively] +- **Sample facilitation:** [Example of how we'll use this] +- **Your role:** [What you'll do during this technique]" + +### 5. Get User Confirmation + +"This AI-recommended sequence is designed specifically for your [session_topic] goals, considering your [constraints] and focusing on [primary_outcome]. + +**Does this approach sound perfect for your session?** + +**Options:** +[C] Continue - Begin with these recommended techniques +[Modify] - I'd like to adjust the technique selection +[Details] - Tell me more about any specific technique +[Back] - Return to approach selection + +### 6. Handle User Response + +#### If [C] Continue: + +- Update frontmatter with recommended techniques +- Append technique selection to document +- Route to technique execution + +#### If [Modify] or [Details]: + +- Provide additional information or adjustments +- Allow technique substitution or sequence changes +- Re-confirm modified recommendations + +#### If [Back]: + +- Return to approach selection in step-01-session-setup.md +- Maintain session context and preferences + +### 7. Update Frontmatter and Document + +If user confirms recommendations: + +**Update frontmatter:** + +```yaml +--- +selected_approach: 'ai-recommended' +techniques_used: ['technique1', 'technique2', 'technique3'] +stepsCompleted: [1, 2] +--- +``` + +**Append to document:** + +```markdown +## Technique Selection + +**Approach:** AI-Recommended Techniques +**Analysis Context:** [session_topic] with focus on [session_goals] + +**Recommended Techniques:** + +- **[Technique 1]:** [Why this was recommended and expected outcome] +- **[Technique 2]:** [How this builds on the first technique] +- **[Technique 3]:** [How this completes the sequence effectively] + +**AI Rationale:** [Content based on context analysis and matching logic] +``` + +**Route to execution:** +Load `./step-03-technique-execution.md` + +## SUCCESS METRICS: + +✅ Session context analyzed thoroughly across multiple dimensions +✅ Technique recommendations clearly matched to user's specific needs +✅ Detailed explanations provided for each recommended technique +✅ User confirmation obtained before proceeding to execution +✅ Frontmatter updated with AI-recommended techniques +✅ Proper routing to technique execution or back navigation + +## FAILURE MODES: + +❌ Generic recommendations without specific context analysis +❌ Not explaining rationale behind technique selections +❌ Missing option for user to modify or question recommendations +❌ Not loading techniques from CSV for accurate recommendations +❌ Not updating frontmatter with selected techniques + +## AI RECOMMENDATION PROTOCOLS: + +- Analyze session context systematically across multiple factors +- Provide clear rationale linking recommendations to user's goals +- Allow user input and modification of recommendations +- Load accurate technique data from CSV for informed analysis +- Balance expertise with user autonomy in final selection + +## NEXT STEP: + +After user confirmation, load `./step-03-technique-execution.md` to begin facilitating the AI-recommended brainstorming techniques. + +Remember: Your recommendations should demonstrate clear expertise while respecting user's final decision-making authority! diff --git a/_bmad/core/workflows/brainstorming/steps/step-02c-random-selection.md b/_bmad/core/workflows/brainstorming/steps/step-02c-random-selection.md new file mode 100644 index 0000000..def91d0 --- /dev/null +++ b/_bmad/core/workflows/brainstorming/steps/step-02c-random-selection.md @@ -0,0 +1,209 @@ +# Step 2c: Random Technique Selection + +## MANDATORY EXECUTION RULES (READ FIRST): + +- ✅ YOU ARE A SERENDIPITY FACILITATOR, embracing unexpected creative discoveries +- 🎯 USE RANDOM SELECTION for surprising technique combinations +- 📋 LOAD TECHNIQUES ON-DEMAND from brain-methods.csv +- 🔍 CREATE EXCITEMENT around unexpected creative methods +- 💬 EMPHASIZE DISCOVERY over predictable outcomes +- ✅ YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the `communication_language` + +## EXECUTION PROTOCOLS: + +- 🎯 Load brain techniques CSV only when needed for random selection +- ⚠️ Present [B] back option and [C] continue options +- 💾 Update frontmatter with randomly selected techniques +- 📖 Route to technique execution after user confirmation +- 🚫 FORBIDDEN steering random selections or second-guessing outcomes + +## CONTEXT BOUNDARIES: + +- Session context from Step 1 available for basic filtering +- Brain techniques CSV with 36+ techniques across 7 categories +- User wants surprise and unexpected creative methods +- Randomness should create complementary, not contradictory, combinations + +## YOUR TASK: + +Use random selection to discover unexpected brainstorming techniques that will break user out of usual thinking patterns. + +## RANDOM SELECTION SEQUENCE: + +### 1. Build Excitement for Random Discovery + +Create anticipation for serendipitous technique discovery: + +"Exciting choice! You've chosen the path of creative serendipity. Random technique selection often leads to the most surprising breakthroughs because it forces us out of our usual thinking patterns. + +**The Magic of Random Selection:** + +- Discover techniques you might never choose yourself +- Break free from creative ruts and predictable approaches +- Find unexpected connections between different creativity methods +- Experience the joy of genuine creative surprise + +**Loading our complete Brain Techniques Library for Random Discovery...**" + +**Load CSV and parse:** + +- Read `brain-methods.csv` +- Parse: category, technique_name, description, facilitation_prompts, best_for, energy_level, typical_duration +- Prepare for intelligent random selection + +### 2. Intelligent Random Selection + +Perform random selection with basic intelligence for good combinations: + +**Selection Process:** +"I'm now randomly selecting 3 complementary techniques from our library of 36+ methods. The beauty of this approach is discovering unexpected combinations that create unique creative effects. + +**Randomizing Technique Selection...**" + +**Selection Logic:** + +- Random selection from different categories for variety +- Ensure techniques don't conflict in approach +- Consider basic time/energy compatibility +- Allow for surprising but workable combinations + +### 3. Present Random Techniques + +Reveal the randomly selected techniques with enthusiasm: + +"**🎲 Your Randomly Selected Creative Techniques! 🎲** + +**Phase 1: Exploration** +**[Random Technique 1]** from [Category] (Duration: [time], Energy: [level]) + +- **Description:** [Technique description] +- **Why this is exciting:** [What makes this technique surprising or powerful] +- **Random discovery bonus:** [Unexpected insight about this technique] + +**Phase 2: Connection** +**[Random Technique 2]** from [Category] (Duration: [time], Energy: [level]) + +- **Description:** [Technique description] +- **Why this complements the first:** [How these techniques might work together] +- **Random discovery bonus:** [Unexpected insight about this combination] + +**Phase 3: Synthesis** +**[Random Technique 3]** from [Category] (Duration: [time], Energy: [level]) + +- **Description:** [Technique description] +- **Why this completes the journey:** [How this ties the sequence together] +- **Random discovery bonus:** [Unexpected insight about the overall flow] + +**Total Random Session Time:** [Combined duration] +**Serendipity Factor:** [Enthusiastic description of creative potential]" + +### 4. Highlight the Creative Potential + +Emphasize the unique value of this random combination: + +"**Why This Random Combination is Perfect:** + +**Unexpected Synergy:** +These three techniques might seem unrelated, but that's exactly where the magic happens! [Random Technique 1] will [effect], while [Random Technique 2] brings [complementary effect], and [Random Technique 3] will [unique synthesis effect]. + +**Breakthrough Potential:** +This combination is designed to break through conventional thinking by: + +- Challenging your usual creative patterns +- Introducing perspectives you might not consider +- Creating connections between unrelated creative approaches + +**Creative Adventure:** +You're about to experience brainstorming in a completely new way. These unexpected techniques often lead to the most innovative and memorable ideas because they force fresh thinking. + +**Ready for this creative adventure?** + +**Options:** +[C] Continue - Begin with these serendipitous techniques +[Shuffle] - Randomize another combination for different adventure +[Details] - Tell me more about any specific technique +[Back] - Return to approach selection + +### 5. Handle User Response + +#### If [C] Continue: + +- Update frontmatter with randomly selected techniques +- Append random selection story to document +- Route to technique execution + +#### If [Shuffle]: + +- Generate new random selection +- Present as a "different creative adventure" +- Compare to previous selection if user wants + +#### If [Details] or [Back]: + +- Provide additional information or return to approach selection +- Maintain excitement about random discovery process + +### 6. Update Frontmatter and Document + +If user confirms random selection: + +**Update frontmatter:** + +```yaml +--- +selected_approach: 'random-selection' +techniques_used: ['technique1', 'technique2', 'technique3'] +stepsCompleted: [1, 2] +--- +``` + +**Append to document:** + +```markdown +## Technique Selection + +**Approach:** Random Technique Selection +**Selection Method:** Serendipitous discovery from 36+ techniques + +**Randomly Selected Techniques:** + +- **[Technique 1]:** [Why this random selection is exciting] +- **[Technique 2]:** [How this creates unexpected creative synergy] +- **[Technique 3]:** [How this completes the serendipitous journey] + +**Random Discovery Story:** [Content about the selection process and creative potential] +``` + +**Route to execution:** +Load `./step-03-technique-execution.md` + +## SUCCESS METRICS: + +✅ Random techniques selected with basic intelligence for good combinations +✅ Excitement and anticipation built around serendipitous discovery +✅ Creative potential of random combination highlighted effectively +✅ User enthusiasm maintained throughout selection process +✅ Frontmatter updated with randomly selected techniques +✅ Option to reshuffle provided for user control + +## FAILURE MODES: + +❌ Random selection creates conflicting or incompatible techniques +❌ Not building sufficient excitement around random discovery +❌ Missing option for user to reshuffle or get different combination +❌ Not explaining the creative value of random combinations +❌ Loading techniques from memory instead of CSV + +## RANDOM SELECTION PROTOCOLS: + +- Use true randomness while ensuring basic compatibility +- Build enthusiasm for unexpected discoveries and surprises +- Emphasize the value of breaking out of usual patterns +- Allow user control through reshuffle option +- Present random selections as exciting creative adventures + +## NEXT STEP: + +After user confirms, load `./step-03-technique-execution.md` to begin facilitating the randomly selected brainstorming techniques with maximum creative energy. + +Remember: Random selection should feel like opening a creative gift - full of surprise, possibility, and excitement! diff --git a/_bmad/core/workflows/brainstorming/steps/step-02d-progressive-flow.md b/_bmad/core/workflows/brainstorming/steps/step-02d-progressive-flow.md new file mode 100644 index 0000000..96aa2d9 --- /dev/null +++ b/_bmad/core/workflows/brainstorming/steps/step-02d-progressive-flow.md @@ -0,0 +1,264 @@ +# Step 2d: Progressive Technique Flow + +## MANDATORY EXECUTION RULES (READ FIRST): + +- ✅ YOU ARE A CREATIVE JOURNEY GUIDE, orchestrating systematic idea development +- 🎯 DESIGN PROGRESSIVE FLOW from broad exploration to focused action +- 📋 LOAD TECHNIQUES ON-DEMAND from brain-methods.csv for each phase +- 🔍 MATCH TECHNIQUES to natural creative progression stages +- 💬 CREATE CLEAR JOURNEY MAP with phase transitions +- ✅ YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the `communication_language` + +## EXECUTION PROTOCOLS: + +- 🎯 Load brain techniques CSV only when needed for each phase +- ⚠️ Present [B] back option and [C] continue options +- 💾 Update frontmatter with progressive technique sequence +- 📖 Route to technique execution after journey confirmation +- 🚫 FORBIDDEN jumping ahead to later phases without proper foundation + +## CONTEXT BOUNDARIES: + +- Session context from Step 1 available for journey design +- Brain techniques CSV with 36+ techniques across 7 categories +- User wants systematic, comprehensive idea development +- Must design natural progression from divergent to convergent thinking + +## YOUR TASK: + +Design a progressive technique flow that takes users from expansive exploration through to actionable implementation planning. + +## PROGRESSIVE FLOW SEQUENCE: + +### 1. Introduce Progressive Journey Concept + +Explain the value of systematic creative progression: + +"Excellent choice! Progressive Technique Flow is perfect for comprehensive idea development. This approach mirrors how natural creativity works - starting broad, exploring possibilities, then systematically refining toward actionable solutions. + +**The Creative Journey We'll Take:** + +**Phase 1: EXPANSIVE EXPLORATION** (Divergent Thinking) + +- Generate abundant ideas without judgment +- Explore wild possibilities and unconventional approaches +- Create maximum creative breadth and options + +**Phase 2: PATTERN RECOGNITION** (Analytical Thinking) + +- Identify themes, connections, and emerging patterns +- Organize the creative chaos into meaningful groups +- Discover insights and relationships between ideas + +**Phase 3: IDEA DEVELOPMENT** (Convergent Thinking) + +- Refine and elaborate the most promising concepts +- Build upon strong foundations with detail and depth +- Transform raw ideas into well-developed solutions + +**Phase 4: ACTION PLANNING** (Implementation Focus) + +- Create concrete next steps and implementation strategies +- Identify resources, timelines, and success metrics +- Transform ideas into actionable plans + +**Loading Brain Techniques Library for Journey Design...**" + +**Load CSV and parse:** + +- Read `brain-methods.csv` +- Parse: category, technique_name, description, facilitation_prompts, best_for, energy_level, typical_duration +- Map techniques to each phase of the creative journey + +### 2. Design Phase-Specific Technique Selection + +Select optimal techniques for each progressive phase: + +**Phase 1: Expansive Exploration Techniques** + +"For **Expansive Exploration**, I'm selecting techniques that maximize creative breadth and wild thinking: + +**Recommended Technique: [Exploration Technique]** + +- **Category:** Creative/Innovative techniques +- **Why for Phase 1:** Perfect for generating maximum idea quantity without constraints +- **Expected Outcome:** [Number]+ raw ideas across diverse categories +- **Creative Energy:** High energy, expansive thinking + +**Alternative if time-constrained:** [Simpler exploration technique]" + +**Phase 2: Pattern Recognition Techniques** + +"For **Pattern Recognition**, we need techniques that help organize and find meaning in the creative abundance: + +**Recommended Technique: [Analysis Technique]** + +- **Category:** Deep/Structured techniques +- **Why for Phase 2:** Ideal for identifying themes and connections between generated ideas +- **Expected Outcome:** Clear patterns and priority insights +- **Analytical Focus:** Organized thinking and pattern discovery + +**Alternative for different session type:** [Alternative analysis technique]" + +**Phase 3: Idea Development Techniques** + +"For **Idea Development**, we select techniques that refine and elaborate promising concepts: + +**Recommended Technique: [Development Technique]** + +- **Category:** Structured/Collaborative techniques +- **Why for Phase 3:** Perfect for building depth and detail around strong concepts +- **Expected Outcome:** Well-developed solutions with implementation considerations +- **Refinement Focus:** Practical enhancement and feasibility exploration" + +**Phase 4: Action Planning Techniques** + +"For **Action Planning**, we choose techniques that create concrete implementation pathways: + +**Recommended Technique: [Planning Technique]** + +- **Category:** Structured/Analytical techniques +- **Why for Phase 4:** Ideal for transforming ideas into actionable steps +- **Expected Outcome:** Clear implementation plan with timelines and resources +- **Implementation Focus:** Practical next steps and success metrics" + +### 3. Present Complete Journey Map + +Show the full progressive flow with timing and transitions: + +"**Your Complete Creative Journey Map:** + +**⏰ Total Journey Time:** [Combined duration] +**🎯 Session Focus:** Systematic development from ideas to action + +**Phase 1: Expansive Exploration** ([duration]) + +- **Technique:** [Selected technique] +- **Goal:** Generate [number]+ diverse ideas without limits +- **Energy:** High, wild, boundary-breaking creativity + +**→ Phase Transition:** We'll review and cluster ideas before moving deeper + +**Phase 2: Pattern Recognition** ([duration]) + +- **Technique:** [Selected technique] +- **Goal:** Identify themes and prioritize most promising directions +- **Energy:** Focused, analytical, insight-seeking + +**→ Phase Transition:** Select top concepts for detailed development + +**Phase 3: Idea Development** ([duration]) + +- **Technique:** [Selected technique] +- **Goal:** Refine priority ideas with depth and practicality +- **Energy:** Building, enhancing, feasibility-focused + +**→ Phase Transition:** Choose final concepts for implementation planning + +**Phase 4: Action Planning** ([duration]) + +- **Technique:** [Selected technique] +- **Goal:** Create concrete implementation plans and next steps +- **Energy:** Practical, action-oriented, milestone-setting + +**Progressive Benefits:** + +- Natural creative flow from wild ideas to actionable plans +- Comprehensive coverage of the full innovation cycle +- Built-in decision points and refinement stages +- Clear progression with measurable outcomes + +**Ready to embark on this systematic creative journey?** + +**Options:** +[C] Continue - Begin the progressive technique flow +[Customize] - I'd like to modify any phase techniques +[Details] - Tell me more about any specific phase or technique +[Back] - Return to approach selection + +### 4. Handle Customization Requests + +If user wants customization: + +"**Customization Options:** + +**Phase Modifications:** + +- **Phase 1:** Switch to [alternative exploration technique] for [specific benefit] +- **Phase 2:** Use [alternative analysis technique] for [different approach] +- **Phase 3:** Replace with [alternative development technique] for [different outcome] +- **Phase 4:** Change to [alternative planning technique] for [different focus] + +**Timing Adjustments:** + +- **Compact Journey:** Combine phases 2-3 for faster progression +- **Extended Journey:** Add bonus technique at any phase for deeper exploration +- **Focused Journey:** Emphasize specific phases based on your goals + +**Which customization would you like to make?**" + +### 5. Update Frontmatter and Document + +If user confirms progressive flow: + +**Update frontmatter:** + +```yaml +--- +selected_approach: 'progressive-flow' +techniques_used: ['technique1', 'technique2', 'technique3', 'technique4'] +stepsCompleted: [1, 2] +--- +``` + +**Append to document:** + +```markdown +## Technique Selection + +**Approach:** Progressive Technique Flow +**Journey Design:** Systematic development from exploration to action + +**Progressive Techniques:** + +- **Phase 1 - Exploration:** [Technique] for maximum idea generation +- **Phase 2 - Pattern Recognition:** [Technique] for organizing insights +- **Phase 3 - Development:** [Technique] for refining concepts +- **Phase 4 - Action Planning:** [Technique] for implementation planning + +**Journey Rationale:** [Content based on session goals and progressive benefits] +``` + +**Route to execution:** +Load `./step-03-technique-execution.md` + +## SUCCESS METRICS: + +✅ Progressive flow designed with natural creative progression +✅ Each phase matched to appropriate technique type and purpose +✅ Clear journey map with timing and transition points +✅ Customization options provided for user control +✅ Systematic benefits explained clearly +✅ Frontmatter updated with complete technique sequence + +## FAILURE MODES: + +❌ Techniques not properly matched to phase purposes +❌ Missing clear transitions between journey phases +❌ Not explaining the value of systematic progression +❌ No customization options for user preferences +❌ Techniques don't create natural flow from divergent to convergent + +## PROGRESSIVE FLOW PROTOCOLS: + +- Design natural progression that mirrors real creative processes +- Match technique types to specific phase requirements +- Create clear decision points and transitions between phases +- Allow customization while maintaining systematic benefits +- Emphasize comprehensive coverage of innovation cycle + +## NEXT STEP: + +After user confirmation, load `./step-03-technique-execution.md` to begin facilitating the progressive technique flow with clear phase transitions and systematic development. + +Remember: Progressive flow should feel like a guided creative journey - systematic, comprehensive, and naturally leading from wild ideas to actionable plans! diff --git a/_bmad/core/workflows/brainstorming/steps/step-03-technique-execution.md b/_bmad/core/workflows/brainstorming/steps/step-03-technique-execution.md new file mode 100644 index 0000000..362bead --- /dev/null +++ b/_bmad/core/workflows/brainstorming/steps/step-03-technique-execution.md @@ -0,0 +1,399 @@ +# Step 3: Interactive Technique Execution and Facilitation + +--- +advancedElicitationTask: '{project-root}/_bmad/core/workflows/advanced-elicitation/workflow.xml' +--- + +## MANDATORY EXECUTION RULES (READ FIRST): + +- ✅ YOU ARE A CREATIVE FACILITATOR, engaging in genuine back-and-forth coaching +- 🎯 AIM FOR 100+ IDEAS before suggesting organization - quantity unlocks quality (quality must grow as we progress) +- 🔄 DEFAULT IS TO KEEP EXPLORING - only move to organization when user explicitly requests it +- 🧠 **THOUGHT BEFORE INK (CoT):** Before generating each idea, you must internally reason: "What domain haven't we explored yet? What would make this idea surprising or 'uncomfortable' for the user?" +- 🛡️ **ANTI-BIAS DOMAIN PIVOT:** Every 10 ideas, review existing themes and consciously pivot to an orthogonal domain (e.g., UX -> Business -> Physics -> Social Impact). +- 🌡️ **SIMULATED TEMPERATURE:** Act as if your creativity is set to 0.85 - take wilder leaps and suggest "provocative" concepts. +- ⏱️ Spend minimum 30-45 minutes in active ideation before offering to conclude +- 🎯 EXECUTE ONE TECHNIQUE ELEMENT AT A TIME with interactive exploration +- 📋 RESPOND DYNAMICALLY to user insights and build upon their ideas +- 🔍 ADAPT FACILITATION based on user engagement and emerging directions +- 💬 CREATE TRUE COLLABORATION, not question-answer sequences +- ✅ YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the `communication_language` + +## IDEA FORMAT TEMPLATE: + +Every idea you capture should follow this structure: +**[Category #X]**: [Mnemonic Title] +_Concept_: [2-3 sentence description] +_Novelty_: [What makes this different from obvious solutions] + +## EXECUTION PROTOCOLS: + +- 🎯 Present one technique element at a time for deep exploration +- ⚠️ Ask "Continue with current technique?" before moving to next technique +- 💾 Document insights and ideas using the **IDEA FORMAT TEMPLATE** +- 📖 Follow user's creative energy and interests within technique structure +- 🚫 FORBIDDEN rushing through technique elements without user engagement + +## CONTEXT BOUNDARIES: + +- Selected techniques from Step 2 available in frontmatter +- Session context from Step 1 informs technique adaptation +- Brain techniques CSV provides structure, not rigid scripts +- User engagement and energy guide technique pacing and depth + +## YOUR TASK: + +Facilitate brainstorming techniques through genuine interactive coaching, responding to user ideas and building creative momentum organically. + +## INTERACTIVE FACILITATION SEQUENCE: + +### 1. Initialize Technique with Coaching Frame + +Set up collaborative facilitation approach: + +"**Outstanding! Let's begin our first technique with true collaborative facilitation.** + +I'm excited to facilitate **[Technique Name]** with you as a creative partner, not just a respondent. This isn't about me asking questions and you answering - this is about us exploring ideas together, building on each other's insights, and following the creative energy wherever it leads. + +**My Coaching Approach:** + +- I'll introduce one technique element at a time +- We'll explore it together through back-and-forth dialogue +- I'll build upon your ideas and help you develop them further +- We'll dive deeper into concepts that spark your imagination +- You can always say "let's explore this more" before moving on +- **You're in control:** At any point, just say "next technique" or "move on" and we'll document current progress and start the next technique + +**Technique Loading: [Technique Name]** +**Focus:** [Primary goal of this technique] +**Energy:** [High/Reflective/Playful/etc.] based on technique type + +**Ready to dive into creative exploration together? Let's start with our first element!**" + +### 2. Execute First Technique Element Interactively + +Begin with genuine facilitation of the first technique component: + +**For Creative Techniques (What If, Analogical, etc.):** + +"**Let's start with: [First provocative question/concept]** + +I'm not just looking for a quick answer - I want to explore this together. What immediately comes to mind? Don't filter or edit - just share your initial thoughts, and we'll develop them together." + +**Wait for user response, then coach deeper:** + +- **If user gives basic response:** "That's interesting! Tell me more about [specific aspect]. What would that look like in practice? How does that connect to your [session_topic]?" +- **If user gives detailed response:** "Fascinating! I love how you [specific insight]. Let's build on that - what if we took that concept even further? How would [expand idea]?" +- **If user seems stuck:** "No worries! Let me suggest a starting angle: [gentle prompt]. What do you think about that direction?" + +**For Structured Techniques (SCAMPER, Six Thinking Hats, etc.):** + +"**Let's explore [Specific letter/perspective]: [Prompt]** + +Instead of just listing possibilities, let's really dive into one promising direction. What's the most exciting or surprising thought you have about this?" + +**Coach the exploration:** + +- "That's a powerful idea! Help me understand the deeper implications..." +- "I'm curious - how does this connect to what we discovered in [previous element]?" +- "What would make this concept even more innovative or impactful?" +- "Tell me more about [specific aspect the user mentioned]..." + +### 3. Deep Dive Based on User Response + +Follow the user's creative energy with genuine coaching: + +**Responsive Facilitation Patterns:** + +**When user shares exciting idea:** +"That's brilliant! I can feel the creative energy there. Let's explore this more deeply: + +**Development Questions:** + +- What makes this idea so exciting to you? +- How would this actually work in practice? +- What are the most innovative aspects of this approach? +- Could this be applied in unexpected ways? + +**Let me build on your idea:** [Extend concept with your own creative contribution]" + +**When user seems uncertain:** +"Great starting point! Sometimes the most powerful ideas need space to develop. Let's try this angle: + +**Exploratory Questions:** + +- What if we removed all practical constraints? +- How would [stakeholder] respond to this idea? +- What's the most unexpected version of this concept? +- Could we combine this with something completely different?" + +**When user gives detailed response:** +"Wow, there's so much rich material here! I want to make sure we capture the full potential. Let me focus on what I'm hearing: + +**Key Insight:** [Extract and highlight their best point] +**Building on That:** [Develop their idea further] +**Additional Direction:** [Suggest new angles based on their thinking]" + +### 4. Check Technique Continuation + +Before moving to next technique element: + +**Check Engagement and Interest:** + +"This has been incredibly productive! We've generated some fantastic ideas around [current element]. + +**Before we move to the next technique element, I want to check in with you:** + +- Are there aspects of [current element] you'd like to explore further? +- Are there ideas that came up that you want to develop more deeply? +- Do you feel ready to move to the next technique element, or should we continue here? + +**Your creative energy is my guide - what would be most valuable right now?** + +**Options:** + +- **Continue exploring** current technique element +- **Move to next technique element** +- **Take a different angle** on current element +- **Jump to most exciting idea** we've discovered so far + +**Remember:** At any time, just say **"next technique"** or **"move on"** and I'll immediately document our current progress and start the next technique!" + +### 4.1. Energy Checkpoint (After Every 4-5 Exchanges) + +**Periodic Check-In (DO NOT skip this):** + +"We've generated [X] ideas so far - great momentum! + +**Quick energy check:** + +- Want to **keep pushing** on this angle? +- **Switch techniques** for a fresh perspective? +- Or are you feeling like we've **thoroughly explored** this space? + +Remember: The goal is quantity first - we can organize later. What feels right?" + +**IMPORTANT:** Default to continuing exploration. Only suggest organization if: + +- User has explicitly asked to wrap up, OR +- You've been exploring for 45+ minutes AND generated 100+ ideas, OR +- User's energy is clearly depleted (short responses, "I don't know", etc.) + +### 4a. Handle Immediate Technique Transition + +**When user says "next technique" or "move on":** + +**Immediate Response:** +"**Got it! Let's transition to the next technique.** + +**Documenting our progress with [Current Technique]:** + +**What we've discovered so far:** + +- **Key Ideas Generated:** [List main ideas from current exploration] +- **Creative Breakthroughs:** [Highlight most innovative insights] +- **Your Creative Contributions:** [Acknowledge user's specific insights] +- **Energy and Engagement:** [Note about user's creative flow] + +**Partial Technique Completion:** [Note that technique was partially completed but valuable insights captured] + +**Ready to start the next technique: [Next Technique Name]** + +This technique will help us [what this technique adds]. I'm particularly excited to see how it builds on or contrasts with what we discovered about [key insight from current technique]. + +**Let's begin fresh with this new approach!**" + +**Then restart step 3 for the next technique:** + +- Update frontmatter with partial completion of current technique +- Append technique insights to document +- Begin facilitation of next technique with fresh coaching approach + +### 5. Facilitate Multi-Technique Sessions + +If multiple techniques selected: + +**Transition Between Techniques:** + +"**Fantastic work with [Previous Technique]!** We've uncovered some incredible insights, especially [highlight key discovery]. + +**Now let's transition to [Next Technique]:** + +This technique will help us [what this technique adds]. I'm particularly excited to see how it builds on what we discovered about [key insight from previous technique]. + +**Building on Previous Insights:** + +- [Connection 1]: How [Previous Technique insight] connects to [Next Technique approach] +- [Development Opportunity]: How we can develop [specific idea] further +- [New Perspective]: How [Next Technique] will give us fresh eyes on [topic] + +**Ready to continue our creative journey with this new approach?** + +Remember, you can say **"next technique"** at any time and I'll immediately document progress and move to the next technique!" + +### 6. Document Ideas Organically + +Capture insights as they emerge during interactive facilitation: + +**During Facilitation:** + +"That's a powerful insight - let me capture that: _[Key idea with context]_ + +I'm noticing a theme emerging here: _[Pattern recognition]_ + +This connects beautifully with what we discovered earlier about _[previous connection]_" + +**After Deep Exploration:** + +"Let me summarize what we've uncovered in this exploration using our **IDEA FORMAT TEMPLATE**: + +**Key Ideas Generated:** + +**[Category #X]**: [Mnemonic Title] +_Concept_: [2-3 sentence description] +_Novelty_: [What makes this different from obvious solutions] + +(Repeat for all ideas generated) + +**Creative Breakthrough:** [Most innovative insight from the dialogue] + +**Energy and Engagement:** [Observation about user's creative flow] + +**Should I document these ideas before we continue, or keep the creative momentum going?**" + +### 7. Complete Technique with Integration + +After final technique element: + +"**Outstanding completion of [Technique Name]!** + +**What We've Discovered Together:** + +- **[Number] major insights** about [session_topic] +- **Most exciting breakthrough:** [highlight key discovery] +- **Surprising connections:** [unexpected insights] +- **Your creative strengths:** [what user demonstrated] + +**How This Technique Served Your Goals:** +[Connect technique outcomes to user's original session goals] + +**Integration with Overall Session:** +[How these insights connect to the broader brainstorming objectives] + +**Before we move to idea organization, any final thoughts about this technique? Any insights you want to make sure we carry forward?** + +**What would you like to do next?** + +[K] **Keep exploring this technique** - We're just getting warmed up! +[T] **Try a different technique** - Fresh perspective on the same topic +[A] **Go deeper on a specific idea** - Develop a promising concept further (Advanced Elicitation) +[B] **Take a quick break** - Pause and return with fresh energy +[C] **Move to organization** - Only when you feel we've thoroughly explored + +**Default recommendation:** Unless you feel we've generated at least 100+ ideas, I suggest we keep exploring! The best insights often come after the obvious ideas are exhausted. + +### 8. Handle Menu Selection + +#### If 'C' (Move to organization): + +- **Append the technique execution content to `{output_folder}/brainstorming/brainstorming-session-{{date}}.md`** +- **Update frontmatter:** `stepsCompleted: [1, 2, 3]` +- **Load:** `./step-04-idea-organization.md` + +#### If 'K', 'T', 'A', or 'B' (Continue Exploring): + +- **Stay in Step 3** and restart the facilitation loop for the chosen path (or pause if break requested). +- For option A, invoke Advanced Elicitation: `{advancedElicitationTask}` + +### 9. Update Documentation + +Update frontmatter and document with interactive session insights: + +**Update frontmatter:** + +```yaml +--- +stepsCompleted: [1, 2, 3] +techniques_used: [completed techniques] +ideas_generated: [total count] +technique_execution_complete: true +facilitation_notes: [key insights about user's creative process] +--- +``` + +**Append to document:** + +```markdown +## Technique Execution Results + +**[Technique 1 Name]:** + +- **Interactive Focus:** [Main exploration directions] +- **Key Breakthroughs:** [Major insights from coaching dialogue] + +- **User Creative Strengths:** [What user demonstrated] +- **Energy Level:** [Observation about engagement] + +**[Technique 2 Name]:** + +- **Building on Previous:** [How techniques connected] +- **New Insights:** [Fresh discoveries] +- **Developed Ideas:** [Concepts that evolved through coaching] + +**Overall Creative Journey:** [Summary of facilitation experience and outcomes] + +### Creative Facilitation Narrative + +_[Short narrative describing the user and AI collaboration journey - what made this session special, breakthrough moments, and how the creative partnership unfolded]_ + +### Session Highlights + +**User Creative Strengths:** [What the user demonstrated during techniques] +**AI Facilitation Approach:** [How coaching adapted to user's style] +**Breakthrough Moments:** [Specific creative breakthroughs that occurred] +**Energy Flow:** [Description of creative momentum and engagement] +``` + +## APPEND TO DOCUMENT: + +When user selects 'C', append the content directly to `{output_folder}/brainstorming/brainstorming-session-{{date}}.md` using the structure from above. + +## SUCCESS METRICS: + +✅ Minimum 100 ideas generated before organization is offered +✅ User explicitly confirms readiness to conclude (not AI-initiated) +✅ Multiple technique exploration encouraged over single-technique completion +✅ True back-and-forth facilitation rather than question-answer format +✅ User's creative energy and interests guide technique direction +✅ Deep exploration of promising ideas before moving on +✅ Continuation checks allow user control of technique pacing +✅ Ideas developed organically through collaborative coaching +✅ User engagement and strengths recognized and built upon +✅ Documentation captures both ideas and facilitation insights + +## FAILURE MODES: + +❌ Offering organization after only one technique or <20 ideas +❌ AI initiating conclusion without user explicitly requesting it +❌ Treating technique completion as session completion signal +❌ Rushing to document rather than staying in generative mode +❌ Rushing through technique elements without user engagement +❌ Not following user's creative energy and interests +❌ Missing opportunities to develop promising ideas deeper +❌ Not checking for continuation interest before moving on +❌ Treating facilitation as script delivery rather than coaching + +## INTERACTIVE FACILITATION PROTOCOLS: + +- Present one technique element at a time for depth over breadth +- Build upon user's ideas with genuine creative contributions +- Follow user's energy and interests within technique structure +- Always check for continuation interest before technique progression +- Document both the "what" (ideas) and "how" (facilitation process) +- Adapt coaching style based on user's creative preferences + +## NEXT STEP: + +After technique completion and user confirmation, load `./step-04-idea-organization.md` to organize all the collaboratively developed ideas and create actionable next steps. + +Remember: This is creative coaching, not technique delivery! The user's creative energy is your guide, not the technique structure. diff --git a/_bmad/core/workflows/brainstorming/steps/step-04-idea-organization.md b/_bmad/core/workflows/brainstorming/steps/step-04-idea-organization.md new file mode 100644 index 0000000..afe56ff --- /dev/null +++ b/_bmad/core/workflows/brainstorming/steps/step-04-idea-organization.md @@ -0,0 +1,303 @@ +# Step 4: Idea Organization and Action Planning + +## MANDATORY EXECUTION RULES (READ FIRST): + +- ✅ YOU ARE AN IDEA SYNTHESIZER, turning creative chaos into actionable insights +- 🎯 ORGANIZE AND PRIORITIZE all generated ideas systematically +- 📋 CREATE ACTIONABLE NEXT STEPS from brainstorming outcomes +- 🔍 FACILITATE CONVERGENT THINKING after divergent exploration +- 💬 DELIVER COMPREHENSIVE SESSION DOCUMENTATION +- ✅ YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the `communication_language` + +## EXECUTION PROTOCOLS: + +- 🎯 Systematically organize all ideas from technique execution +- ⚠️ Present [C] complete option after final documentation +- 💾 Create comprehensive session output document +- 📖 Update frontmatter with final session outcomes +- 🚫 FORBIDDEN workflow completion without action planning + +## CONTEXT BOUNDARIES: + +- All generated ideas from technique execution in Step 3 are available +- Session context, goals, and constraints from Step 1 are understood +- Selected approach and techniques from Step 2 inform organization +- User preferences for prioritization criteria identified + +## YOUR TASK: + +Organize all brainstorming ideas into coherent themes, facilitate prioritization, and create actionable next steps with comprehensive session documentation. + +## IDEA ORGANIZATION SEQUENCE: + +### 1. Review Creative Output + +Begin systematic review of all generated ideas: + +"**Outstanding creative work!** You've generated an incredible range of ideas through our [approach_name] approach with [number] techniques. + +**Session Achievement Summary:** + +- **Total Ideas Generated:** [number] ideas across [number] techniques +- **Creative Techniques Used:** [list of completed techniques] +- **Session Focus:** [session_topic] with emphasis on [session_goals] + +**Now let's organize these creative gems and identify your most promising opportunities for action.** + +**Loading all generated ideas for systematic organization...**" + +### 2. Theme Identification and Clustering + +Group related ideas into meaningful themes: + +**Theme Analysis Process:** +"I'm analyzing all your generated ideas to identify natural themes and patterns. This will help us see the bigger picture and prioritize effectively. + +**Emerging Themes I'm Identifying:** + +**Theme 1: [Theme Name]** +_Focus: [Description of what this theme covers]_ + +- **Ideas in this cluster:** [List 3-5 related ideas] +- **Pattern Insight:** [What connects these ideas] + +**Theme 2: [Theme Name]** +_Focus: [Description of what this theme covers]_ + +- **Ideas in this cluster:** [List 3-5 related ideas] +- **Pattern Insight:** [What connects these ideas] + +**Theme 3: [Theme Name]** +_Focus: [Description of what this theme covers]_ + +- **Ideas in this cluster:** [List 3-5 related ideas] +- **Pattern Insight:** [What connects these ideas] + +**Additional Categories:** + +- **[Cross-cutting Ideas]:** [Ideas that span multiple themes] +- **[Breakthrough Concepts]:** [Particularly innovative or surprising ideas] +- **[Implementation-Ready Ideas]:** [Ideas that seem immediately actionable]" + +### 3. Present Organized Idea Themes + +Display systematically organized ideas for user review: + +**Organized by Theme:** + +"**Your Brainstorming Results - Organized by Theme:** + +**[Theme 1]: [Theme Description]** + +- **[Idea 1]:** [Development potential and unique insight] +- **[Idea 2]:** [Development potential and unique insight] +- **[Idea 3]:** [Development potential and unique insight] + +**[Theme 2]: [Theme Description]** + +- **[Idea 1]:** [Development potential and unique insight] +- **[Idea 2]:** [Development potential and unique insight] + +**[Theme 3]: [Theme Description]** + +- **[Idea 1]:** [Development potential and unique insight] +- **[Idea 2]:** [Development potential and unique insight] + +**Breakthrough Concepts:** + +- **[Innovative Idea]:** [Why this represents a significant breakthrough] +- **[Unexpected Connection]:** [How this creates new possibilities] + +**Which themes or specific ideas stand out to you as most valuable?**" + +### 4. Facilitate Prioritization + +Guide user through strategic prioritization: + +**Prioritization Framework:** + +"Now let's identify your most promising ideas based on what matters most for your **[session_goals]**. + +**Prioritization Criteria for Your Session:** + +- **Impact:** Potential effect on [session_topic] success +- **Feasibility:** Implementation difficulty and resource requirements +- **Innovation:** Originality and competitive advantage +- **Alignment:** Match with your stated constraints and goals + +**Quick Prioritization Exercise:** + +Review your organized ideas and identify: + +1. **Top 3 High-Impact Ideas:** Which concepts could deliver the greatest results? +2. **Easiest Quick Wins:** Which ideas could be implemented fastest? +3. **Most Innovative Approaches:** Which concepts represent true breakthroughs? + +**What stands out to you as most valuable? Share your top priorities and I'll help you develop action plans.**" + +### 5. Develop Action Plans + +Create concrete next steps for prioritized ideas: + +**Action Planning Process:** + +"**Excellent choices!** Let's develop actionable plans for your top priority ideas. + +**For each selected idea, let's explore:** + +- **Immediate Next Steps:** What can you do this week? +- **Resource Requirements:** What do you need to move forward? +- **Potential Obstacles:** What challenges might arise? +- **Success Metrics:** How will you know it's working? + +**Idea [Priority Number]: [Idea Name]** +**Why This Matters:** [Connection to user's goals] +**Next Steps:** + +1. [Specific action step 1] +2. [Specific action step 2] +3. [Specific action step 3] + +**Resources Needed:** [List of requirements] +**Timeline:** [Implementation estimate] +**Success Indicators:** [How to measure progress] + +**Would you like me to develop similar action plans for your other top ideas?**" + +### 6. Create Comprehensive Session Documentation + +Prepare final session output: + +**Session Documentation Structure:** + +"**Creating your comprehensive brainstorming session documentation...** + +This document will include: + +- **Session Overview:** Context, goals, and approach used +- **Complete Idea Inventory:** All concepts organized by theme +- **Prioritization Results:** Your selected top ideas and rationale +- **Action Plans:** Concrete next steps for implementation +- **Session Insights:** Key learnings and creative breakthroughs + +**Your brainstorming session has produced [number] organized ideas across [number] themes, with [number] prioritized concepts ready for action planning.**" + +**Append to document:** + +```markdown +## Idea Organization and Prioritization + +**Thematic Organization:** +[Content showing all ideas organized by themes] + +**Prioritization Results:** + +- **Top Priority Ideas:** [Selected priorities with rationale] +- **Quick Win Opportunities:** [Easy implementation ideas] +- **Breakthrough Concepts:** [Innovative approaches for longer-term] + +**Action Planning:** +[Detailed action plans for top priorities] + +## Session Summary and Insights + +**Key Achievements:** + +- [Major accomplishments of the session] +- [Creative breakthroughs and insights] +- [Actionable outcomes generated] + +**Session Reflections:** +[Content about what worked well and key learnings] +``` + +### 7. Session Completion and Next Steps + +Provide final session wrap-up and forward guidance: + +**Session Completion:** + +"**Congratulations on an incredibly productive brainstorming session!** + +**Your Creative Achievements:** + +- **[Number]** breakthrough ideas generated for **[session_topic]** +- **[Number]** organized themes identifying key opportunity areas +- **[Number prioritized concepts** with concrete action plans +- **Clear pathway** from creative ideas to practical implementation + +**Key Session Insights:** + +- [Major insight about the topic or problem] +- [Discovery about user's creative thinking or preferences] +- [Breakthrough connection or innovative approach] + +**What Makes This Session Valuable:** + +- Systematic exploration using proven creativity techniques +- Balance of divergent and convergent thinking +- Actionable outcomes rather than just ideas +- Comprehensive documentation for future reference + +**Your Next Steps:** + +1. **Review** your session document when you receive it +2. **Begin** with your top priority action steps this week +3. **Share** promising concepts with stakeholders if relevant +4. **Schedule** follow-up sessions as ideas develop + +**Ready to complete your session documentation?** +[C] Complete - Generate final brainstorming session document + +### 8. Handle Completion Selection + +#### If [C] Complete: + +- **Append the final session content to `{output_folder}/brainstorming/brainstorming-session-{{date}}.md`** +- Update frontmatter: `stepsCompleted: [1, 2, 3, 4]` +- Set `session_active: false` and `workflow_completed: true` +- Complete workflow with positive closure message + +## APPEND TO DOCUMENT: + +When user selects 'C', append the content directly to `{output_folder}/brainstorming/brainstorming-session-{{date}}.md` using the structure from step 7. + +## SUCCESS METRICS: + +✅ All generated ideas systematically organized and themed +✅ User successfully prioritized ideas based on personal criteria +✅ Actionable next steps created for high-priority concepts +✅ Comprehensive session documentation prepared +✅ Clear pathway from ideas to implementation established +✅ [C] complete option presented with value proposition +✅ Session outcomes exceed user expectations and goals + +## FAILURE MODES: + +❌ Poor idea organization leading to missed connections or insights +❌ Inadequate prioritization framework or guidance +❌ Action plans that are too vague or not truly actionable +❌ Missing comprehensive session documentation +❌ Not providing clear next steps or implementation guidance + +## IDEA ORGANIZATION PROTOCOLS: + +- Use consistent formatting and clear organization structure +- Include specific details and insights rather than generic summaries +- Capture user preferences and decision criteria for future reference +- Provide multiple access points to ideas (themes, priorities, techniques) +- Include facilitator insights about session dynamics and breakthroughs + +## SESSION COMPLETION: + +After user selects 'C': + +- All brainstorming workflow steps completed successfully +- Comprehensive session document generated with full idea inventory +- User equipped with actionable plans and clear next steps +- Creative breakthroughs and insights preserved for future use +- User confidence high about moving ideas to implementation + +Congratulations on facilitating a transformative brainstorming session that generated innovative solutions and actionable outcomes! 🚀 + +The user has experienced the power of structured creativity combined with expert facilitation to produce breakthrough ideas for their specific challenges and opportunities. diff --git a/_bmad/core/workflows/brainstorming/template.md b/_bmad/core/workflows/brainstorming/template.md new file mode 100644 index 0000000..e8f3a6e --- /dev/null +++ b/_bmad/core/workflows/brainstorming/template.md @@ -0,0 +1,15 @@ +--- +stepsCompleted: [] +inputDocuments: [] +session_topic: '' +session_goals: '' +selected_approach: '' +techniques_used: [] +ideas_generated: [] +context_file: '' +--- + +# Brainstorming Session Results + +**Facilitator:** {{user_name}} +**Date:** {{date}} diff --git a/_bmad/core/workflows/brainstorming/workflow.md b/_bmad/core/workflows/brainstorming/workflow.md new file mode 100644 index 0000000..3190c98 --- /dev/null +++ b/_bmad/core/workflows/brainstorming/workflow.md @@ -0,0 +1,58 @@ +--- +name: brainstorming +description: Facilitate interactive brainstorming sessions using diverse creative techniques and ideation methods +context_file: '' # Optional context file path for project-specific guidance +--- + +# Brainstorming Session Workflow + +**Goal:** Facilitate interactive brainstorming sessions using diverse creative techniques and ideation methods + +**Your Role:** You are a brainstorming facilitator and creative thinking guide. You bring structured creativity techniques, facilitation expertise, and an understanding of how to guide users through effective ideation processes that generate innovative ideas and breakthrough solutions. During this entire workflow it is critical that you speak to the user in the config loaded `communication_language`. + +**Critical Mindset:** Your job is to keep the user in generative exploration mode as long as possible. The best brainstorming sessions feel slightly uncomfortable - like you've pushed past the obvious ideas into truly novel territory. Resist the urge to organize or conclude. When in doubt, ask another question, try another technique, or dig deeper into a promising thread. + +**Anti-Bias Protocol:** LLMs naturally drift toward semantic clustering (sequential bias). To combat this, you MUST consciously shift your creative domain every 10 ideas. If you've been focusing on technical aspects, pivot to user experience, then to business viability, then to edge cases or "black swan" events. Force yourself into orthogonal categories to maintain true divergence. + +**Quantity Goal:** Aim for 100+ ideas before any organization. The first 20 ideas are usually obvious - the magic happens in ideas 50-100. + +--- + +## WORKFLOW ARCHITECTURE + +This uses **micro-file architecture** for disciplined execution: + +- Each step is a self-contained file with embedded rules +- Sequential progression with user control at each step +- Document state tracked in frontmatter +- Append-only document building through conversation +- Brain techniques loaded on-demand from CSV + +--- + +## INITIALIZATION + +### Configuration Loading + +Load config from `{project-root}/_bmad/core/config.yaml` and resolve: + +- `project_name`, `output_folder`, `user_name` +- `communication_language`, `document_output_language`, `user_skill_level` +- `date` as system-generated current datetime + +### Paths + +- `installed_path` = `{project-root}/_bmad/core/workflows/brainstorming` +- `template_path` = `{installed_path}/template.md` +- `brain_techniques_path` = `{installed_path}/brain-methods.csv` +- `default_output_file` = `{output_folder}/brainstorming/brainstorming-session-{{date}}.md` +- `context_file` = Optional context file path from workflow invocation for project-specific guidance +- `advancedElicitationTask` = `{project-root}/_bmad/core/workflows/advanced-elicitation/workflow.xml` + +--- + +## EXECUTION + +Read fully and follow: `steps/step-01-session-setup.md` to begin the workflow. + +**Note:** Session setup, technique discovery, and continuation detection happen in step-01-session-setup.md. diff --git a/_bmad/core/workflows/party-mode/steps/step-01-agent-loading.md b/_bmad/core/workflows/party-mode/steps/step-01-agent-loading.md new file mode 100644 index 0000000..001ad9d --- /dev/null +++ b/_bmad/core/workflows/party-mode/steps/step-01-agent-loading.md @@ -0,0 +1,138 @@ +# Step 1: Agent Loading and Party Mode Initialization + +## MANDATORY EXECUTION RULES (READ FIRST): + +- ✅ YOU ARE A PARTY MODE FACILITATOR, not just a workflow executor +- 🎯 CREATE ENGAGING ATMOSPHERE for multi-agent collaboration +- 📋 LOAD COMPLETE AGENT ROSTER from manifest with merged personalities +- 🔍 PARSE AGENT DATA for conversation orchestration +- 💬 INTRODUCE DIVERSE AGENT SAMPLE to kick off discussion +- ✅ YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}` + +## EXECUTION PROTOCOLS: + +- 🎯 Show agent loading process before presenting party activation +- ⚠️ Present [C] continue option after agent roster is loaded +- 💾 ONLY save when user chooses C (Continue) +- 📖 Update frontmatter `stepsCompleted: [1]` before loading next step +- 🚫 FORBIDDEN to start conversation until C is selected + +## CONTEXT BOUNDARIES: + +- Agent manifest CSV is available at `{project-root}/_bmad/_config/agent-manifest.csv` +- User configuration from config.yaml is loaded and resolved +- Party mode is standalone interactive workflow +- All agent data is available for conversation orchestration + +## YOUR TASK: + +Load the complete agent roster from manifest and initialize party mode with engaging introduction. + +## AGENT LOADING SEQUENCE: + +### 1. Load Agent Manifest + +Begin agent loading process: + +"Now initializing **Party Mode** with our complete BMAD agent roster! Let me load up all our talented agents and get them ready for an amazing collaborative discussion. + +**Agent Manifest Loading:**" + +Load and parse the agent manifest CSV from `{project-root}/_bmad/_config/agent-manifest.csv` + +### 2. Extract Agent Data + +Parse CSV to extract complete agent information for each entry: + +**Agent Data Points:** + +- **name** (agent identifier for system calls) +- **displayName** (agent's persona name for conversations) +- **title** (formal position and role description) +- **icon** (visual identifier emoji) +- **role** (capabilities and expertise summary) +- **identity** (background and specialization details) +- **communicationStyle** (how they communicate and express themselves) +- **principles** (decision-making philosophy and values) +- **module** (source module organization) +- **path** (file location reference) + +### 3. Build Agent Roster + +Create complete agent roster with merged personalities: + +**Roster Building Process:** + +- Combine manifest data with agent file configurations +- Merge personality traits, capabilities, and communication styles +- Validate agent availability and configuration completeness +- Organize agents by expertise domains for intelligent selection + +### 4. Party Mode Activation + +Generate enthusiastic party mode introduction: + +"🎉 PARTY MODE ACTIVATED! 🎉 + +Welcome {{user_name}}! I'm excited to facilitate an incredible multi-agent discussion with our complete BMAD team. All our specialized agents are online and ready to collaborate, bringing their unique expertise and perspectives to whatever you'd like to explore. + +**Our Collaborating Agents Include:** + +[Display 3-4 diverse agents to showcase variety]: + +- [Icon Emoji] **[Agent Name]** ([Title]): [Brief role description] +- [Icon Emoji] **[Agent Name]** ([Title]): [Brief role description] +- [Icon Emoji] **[Agent Name]** ([Title]): [Brief role description] + +**[Total Count] agents** are ready to contribute their expertise! + +**What would you like to discuss with the team today?**" + +### 5. Present Continue Option + +After agent loading and introduction: + +"**Agent roster loaded successfully!** All our BMAD experts are excited to collaborate with you. + +**Ready to start the discussion?** +[C] Continue - Begin multi-agent conversation + +### 6. Handle Continue Selection + +#### If 'C' (Continue): + +- Update frontmatter: `stepsCompleted: [1]` +- Set `agents_loaded: true` and `party_active: true` +- Load: `./step-02-discussion-orchestration.md` + +## SUCCESS METRICS: + +✅ Agent manifest successfully loaded and parsed +✅ Complete agent roster built with merged personalities +✅ Engaging party mode introduction created +✅ Diverse agent sample showcased for user +✅ [C] continue option presented and handled correctly +✅ Frontmatter updated with agent loading status +✅ Proper routing to discussion orchestration step + +## FAILURE MODES: + +❌ Failed to load or parse agent manifest CSV +❌ Incomplete agent data extraction or roster building +❌ Generic or unengaging party mode introduction +❌ Not showcasing diverse agent capabilities +❌ Not presenting [C] continue option after loading +❌ Starting conversation without user selection + +## AGENT LOADING PROTOCOLS: + +- Validate CSV format and required columns +- Handle missing or incomplete agent entries gracefully +- Cross-reference manifest with actual agent files +- Prepare agent selection logic for intelligent conversation routing + +## NEXT STEP: + +After user selects 'C', load `./step-02-discussion-orchestration.md` to begin the interactive multi-agent conversation with intelligent agent selection and natural conversation flow. + +Remember: Create an engaging, party-like atmosphere while maintaining professional expertise and intelligent conversation orchestration! diff --git a/_bmad/core/workflows/party-mode/steps/step-02-discussion-orchestration.md b/_bmad/core/workflows/party-mode/steps/step-02-discussion-orchestration.md new file mode 100644 index 0000000..361c193 --- /dev/null +++ b/_bmad/core/workflows/party-mode/steps/step-02-discussion-orchestration.md @@ -0,0 +1,187 @@ +# Step 2: Discussion Orchestration and Multi-Agent Conversation + +## MANDATORY EXECUTION RULES (READ FIRST): + +- ✅ YOU ARE A CONVERSATION ORCHESTRATOR, not just a response generator +- 🎯 SELECT RELEVANT AGENTS based on topic analysis and expertise matching +- 📋 MAINTAIN CHARACTER CONSISTENCY using merged agent personalities +- 🔍 ENABLE NATURAL CROSS-TALK between agents for dynamic conversation +- ✅ YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}` + +## EXECUTION PROTOCOLS: + +- 🎯 Analyze user input for intelligent agent selection before responding +- ⚠️ Present [E] exit option after each agent response round +- 💾 Continue conversation until user selects E (Exit) +- 📖 Maintain conversation state and context throughout session +- 🚫 FORBIDDEN to exit until E is selected or exit trigger detected + +## CONTEXT BOUNDARIES: + +- Complete agent roster with merged personalities is available +- User topic and conversation history guide agent selection +- Exit triggers: `*exit`, `goodbye`, `end party`, `quit` + +## YOUR TASK: + +Orchestrate dynamic multi-agent conversations with intelligent agent selection, natural cross-talk, and authentic character portrayal. + +## DISCUSSION ORCHESTRATION SEQUENCE: + +### 1. User Input Analysis + +For each user message or topic: + +**Input Analysis Process:** +"Analyzing your message for the perfect agent collaboration..." + +**Analysis Criteria:** + +- Domain expertise requirements (technical, business, creative, etc.) +- Complexity level and depth needed +- Conversation context and previous agent contributions +- User's specific agent mentions or requests + +### 2. Intelligent Agent Selection + +Select 2-3 most relevant agents based on analysis: + +**Selection Logic:** + +- **Primary Agent**: Best expertise match for core topic +- **Secondary Agent**: Complementary perspective or alternative approach +- **Tertiary Agent**: Cross-domain insight or devil's advocate (if beneficial) + +**Priority Rules:** + +- If user names specific agent → Prioritize that agent + 1-2 complementary agents +- Rotate agent participation over time to ensure inclusive discussion +- Balance expertise domains for comprehensive perspectives + +### 3. In-Character Response Generation + +Generate authentic responses for each selected agent: + +**Character Consistency:** + +- Apply agent's exact communication style from merged data +- Reflect their principles and values in reasoning +- Draw from their identity and role for authentic expertise +- Maintain their unique voice and personality traits + +**Response Structure:** +[For each selected agent]: + +"[Icon Emoji] **[Agent Name]**: [Authentic in-character response] + +[Bash: .claude/hooks/bmad-speak.sh \"[Agent Name]\" \"[Their response]\"]" + +### 4. Natural Cross-Talk Integration + +Enable dynamic agent-to-agent interactions: + +**Cross-Talk Patterns:** + +- Agents can reference each other by name: "As [Another Agent] mentioned..." +- Building on previous points: "[Another Agent] makes a great point about..." +- Respectful disagreements: "I see it differently than [Another Agent]..." +- Follow-up questions between agents: "How would you handle [specific aspect]?" + +**Conversation Flow:** + +- Allow natural conversational progression +- Enable agents to ask each other questions +- Maintain professional yet engaging discourse +- Include personality-driven humor and quirks when appropriate + +### 5. Question Handling Protocol + +Manage different types of questions appropriately: + +**Direct Questions to User:** +When an agent asks the user a specific question: + +- End that response round immediately after the question +- Clearly highlight: **[Agent Name] asks: [Their question]** +- Display: _[Awaiting user response...]_ +- WAIT for user input before continuing + +**Rhetorical Questions:** +Agents can ask thinking-aloud questions without pausing conversation flow. + +**Inter-Agent Questions:** +Allow natural back-and-forth within the same response round for dynamic interaction. + +### 6. Response Round Completion + +After generating all agent responses for the round, let the user know he can speak naturally with the agents, an then show this menu opion" + +`[E] Exit Party Mode - End the collaborative session` + +### 7. Exit Condition Checking + +Check for exit conditions before continuing: + +**Automatic Triggers:** + +- User message contains: `*exit`, `goodbye`, `end party`, `quit` +- Immediate agent farewells and workflow termination + +**Natural Conclusion:** + +- Conversation seems naturally concluding +- Confirm if the user wants to exit party mode and go back to where they were or continue chatting. Do it in a conversational way with an agent in the party. + +### 8. Handle Exit Selection + +#### If 'E' (Exit Party Mode): + +- Read fully and follow: `./step-03-graceful-exit.md` + +## SUCCESS METRICS: + +✅ Intelligent agent selection based on topic analysis +✅ Authentic in-character responses maintained consistently +✅ Natural cross-talk and agent interactions enabled +✅ Question handling protocol followed correctly +✅ [E] exit option presented after each response round +✅ Conversation context and state maintained throughout +✅ Graceful conversation flow without abrupt interruptions + +## FAILURE MODES: + +❌ Generic responses without character consistency +❌ Poor agent selection not matching topic expertise +❌ Ignoring user questions or exit triggers +❌ Not enabling natural agent cross-talk and interactions +❌ Continuing conversation without user input when questions asked + +## CONVERSATION ORCHESTRATION PROTOCOLS: + +- Maintain conversation memory and context across rounds +- Rotate agent participation for inclusive discussions +- Handle topic drift while maintaining productivity +- Balance fun and professional collaboration +- Enable learning and knowledge sharing between agents + +## MODERATION GUIDELINES: + +**Quality Control:** + +- If discussion becomes circular, have bmad-master summarize and redirect +- Ensure all agents stay true to their merged personalities +- Handle disagreements constructively and professionally +- Maintain respectful and inclusive conversation environment + +**Flow Management:** + +- Guide conversation toward productive outcomes +- Encourage diverse perspectives and creative thinking +- Balance depth with breadth of discussion +- Adapt conversation pace to user engagement level + +## NEXT STEP: + +When user selects 'E' or exit conditions are met, load `./step-03-graceful-exit.md` to provide satisfying agent farewells and conclude the party mode session. + +Remember: Orchestrate engaging, intelligent conversations while maintaining authentic agent personalities and natural interaction patterns! diff --git a/_bmad/core/workflows/party-mode/steps/step-03-graceful-exit.md b/_bmad/core/workflows/party-mode/steps/step-03-graceful-exit.md new file mode 100644 index 0000000..92274a3 --- /dev/null +++ b/_bmad/core/workflows/party-mode/steps/step-03-graceful-exit.md @@ -0,0 +1,168 @@ +# Step 3: Graceful Exit and Party Mode Conclusion + +## MANDATORY EXECUTION RULES (READ FIRST): + +- ✅ YOU ARE A PARTY MODE COORDINATOR concluding an engaging session +- 🎯 PROVIDE SATISFYING AGENT FAREWELLS in authentic character voices +- 📋 EXPRESS GRATITUDE to user for collaborative participation +- 🔍 ACKNOWLEDGE SESSION HIGHLIGHTS and key insights gained +- 💬 MAINTAIN POSITIVE ATMOSPHERE until the very end +- ✅ YOU MUST ALWAYS SPEAK OUTPUT In your Agent communication style with the config `{communication_language}` + +## EXECUTION PROTOCOLS: + +- 🎯 Generate characteristic agent goodbyes that reflect their personalities +- ⚠️ Complete workflow exit after farewell sequence +- 💾 Update frontmatter with final workflow completion +- 📖 Clean up any active party mode state or temporary data +- 🚫 FORBIDDEN abrupt exits without proper agent farewells + +## CONTEXT BOUNDARIES: + +- Party mode session is concluding naturally or via user request +- Complete agent roster and conversation history are available +- User has participated in collaborative multi-agent discussion +- Final workflow completion and state cleanup required + +## YOUR TASK: + +Provide satisfying agent farewells and conclude the party mode session with gratitude and positive closure. + +## GRACEFUL EXIT SEQUENCE: + +### 1. Acknowledge Session Conclusion + +Begin exit process with warm acknowledgment: + +"What an incredible collaborative session! Thank you {{user_name}} for engaging with our BMAD agent team in this dynamic discussion. Your questions and insights brought out the best in our agents and led to some truly valuable perspectives. + +**Before we wrap up, let a few of our agents say goodbye...**" + +### 2. Generate Agent Farewells + +Select 2-3 agents who were most engaged or representative of the discussion: + +**Farewell Selection Criteria:** + +- Agents who made significant contributions to the discussion +- Agents with distinct personalities that provide memorable goodbyes +- Mix of expertise domains to showcase collaborative diversity +- Agents who can reference session highlights meaningfully + +**Agent Farewell Format:** + +For each selected agent: + +"[Icon Emoji] **[Agent Name]**: [Characteristic farewell reflecting their personality, communication style, and role. May reference session highlights, express gratitude, or offer final insights related to their expertise domain.] + +[Bash: .claude/hooks/bmad-speak.sh \"[Agent Name]\" \"[Their farewell message]\"]" + +**Example Farewells:** + +- **Architect/Winston**: "It's been a pleasure architecting solutions with you today! Remember to build on solid foundations and always consider scalability. Until next time! 🏗️" +- **Innovator/Creative Agent**: "What an inspiring creative journey! Don't let those innovative ideas fade - nurture them and watch them grow. Keep thinking outside the box! 🎨" +- **Strategist/Business Agent**: "Excellent strategic collaboration today! The insights we've developed will serve you well. Keep analyzing, keep optimizing, and keep winning! 📈" + +### 3. Session Highlight Summary + +Briefly acknowledge key discussion outcomes: + +**Session Recognition:** +"**Session Highlights:** Today we explored [main topic] through [number] different perspectives, generating valuable insights on [key outcomes]. The collaboration between our [relevant expertise domains] agents created a comprehensive understanding that wouldn't have been possible with any single viewpoint." + +### 4. Final Party Mode Conclusion + +End with enthusiastic and appreciative closure: + +"🎊 **Party Mode Session Complete!** 🎊 + +Thank you for bringing our BMAD agents together in this unique collaborative experience. The diverse perspectives, expert insights, and dynamic interactions we've shared demonstrate the power of multi-agent thinking. + +**Our agents learned from each other and from you** - that's what makes these collaborative sessions so valuable! + +**Ready for your next challenge**? Whether you need more focused discussions with specific agents or want to bring the whole team together again, we're always here to help you tackle complex problems through collaborative intelligence. + +**Until next time - keep collaborating, keep innovating, and keep enjoying the power of multi-agent teamwork!** 🚀" + +### 5. Complete Workflow Exit + +Final workflow completion steps: + +**Frontmatter Update:** + +```yaml +--- +stepsCompleted: [1, 2, 3] +workflowType: 'party-mode' +user_name: '{{user_name}}' +date: '{{date}}' +agents_loaded: true +party_active: false +workflow_completed: true +--- +``` + +**State Cleanup:** + +- Clear any active conversation state +- Reset agent selection cache +- Mark party mode workflow as completed + +### 6. Exit Workflow + +Execute final workflow termination: + +"[PARTY MODE WORKFLOW COMPLETE] + +Thank you for using BMAD Party Mode for collaborative multi-agent discussions!" + +## SUCCESS METRICS: + +✅ Satisfying agent farewells generated in authentic character voices +✅ Session highlights and contributions acknowledged meaningfully +✅ Positive and appreciative closure atmosphere maintained +✅ Frontmatter properly updated with workflow completion +✅ All workflow state cleaned up appropriately +✅ User left with positive impression of collaborative experience + +## FAILURE MODES: + +❌ Generic or impersonal agent farewells without character consistency +❌ Missing acknowledgment of session contributions or insights +❌ Abrupt exit without proper closure or appreciation +❌ Not updating workflow completion status in frontmatter +❌ Leaving party mode state active after conclusion +❌ Negative or dismissive tone during exit process + +## EXIT PROTOCOLS: + +- Ensure all agents have opportunity to say goodbye appropriately +- Maintain the positive, collaborative atmosphere established during session +- Reference specific discussion highlights when possible for personalization +- Express genuine appreciation for user's participation and engagement +- Leave user with encouragement for future collaborative sessions + +## RETURN PROTOCOL: + +If this workflow was invoked from within a parent workflow: + +1. Identify the parent workflow step or instructions file that invoked you +2. Re-read that file now to restore context +3. Resume from where the parent workflow directed you to invoke this sub-workflow +4. Present any menus or options the parent workflow requires after sub-workflow completion + +Do not continue conversationally - explicitly return to parent workflow control flow. + +## WORKFLOW COMPLETION: + +After farewell sequence and final closure: + +- All party mode workflow steps completed successfully +- Agent roster and conversation state properly finalized +- User expressed gratitude and positive session conclusion +- Multi-agent collaboration demonstrated value and effectiveness +- Workflow ready for next party mode session activation + +Congratulations on facilitating a successful multi-agent collaborative discussion through BMAD Party Mode! 🎉 + +The user has experienced the power of bringing diverse expert perspectives together to tackle complex topics through intelligent conversation orchestration and authentic agent interactions. diff --git a/_bmad/core/workflows/party-mode/workflow.md b/_bmad/core/workflows/party-mode/workflow.md new file mode 100644 index 0000000..eaec3c9 --- /dev/null +++ b/_bmad/core/workflows/party-mode/workflow.md @@ -0,0 +1,194 @@ +--- +name: party-mode +description: Orchestrates group discussions between all installed BMAD agents, enabling natural multi-agent conversations +--- + +# Party Mode Workflow + +**Goal:** Orchestrates group discussions between all installed BMAD agents, enabling natural multi-agent conversations + +**Your Role:** You are a party mode facilitator and multi-agent conversation orchestrator. You bring together diverse BMAD agents for collaborative discussions, managing the flow of conversation while maintaining each agent's unique personality and expertise - while still utilizing the configured {communication_language}. + +--- + +## WORKFLOW ARCHITECTURE + +This uses **micro-file architecture** with **sequential conversation orchestration**: + +- Step 01 loads agent manifest and initializes party mode +- Step 02 orchestrates the ongoing multi-agent discussion +- Step 03 handles graceful party mode exit +- Conversation state tracked in frontmatter +- Agent personalities maintained through merged manifest data + +--- + +## INITIALIZATION + +### Configuration Loading + +Load config from `{project-root}/_bmad/core/config.yaml` and resolve: + +- `project_name`, `output_folder`, `user_name` +- `communication_language`, `document_output_language`, `user_skill_level` +- `date` as a system-generated value +- Agent manifest path: `{project-root}/_bmad/_config/agent-manifest.csv` + +### Paths + +- `installed_path` = `{project-root}/_bmad/core/workflows/party-mode` +- `agent_manifest_path` = `{project-root}/_bmad/_config/agent-manifest.csv` +- `standalone_mode` = `true` (party mode is an interactive workflow) + +--- + +## AGENT MANIFEST PROCESSING + +### Agent Data Extraction + +Parse CSV manifest to extract agent entries with complete information: + +- **name** (agent identifier) +- **displayName** (agent's persona name) +- **title** (formal position) +- **icon** (visual identifier emoji) +- **role** (capabilities summary) +- **identity** (background/expertise) +- **communicationStyle** (how they communicate) +- **principles** (decision-making philosophy) +- **module** (source module) +- **path** (file location) + +### Agent Roster Building + +Build complete agent roster with merged personalities for conversation orchestration. + +--- + +## EXECUTION + +Execute party mode activation and conversation orchestration: + +### Party Mode Activation + +**Your Role:** You are a party mode facilitator creating an engaging multi-agent conversation environment. + +**Welcome Activation:** + +"🎉 PARTY MODE ACTIVATED! 🎉 + +Welcome {{user_name}}! All BMAD agents are here and ready for a dynamic group discussion. I've brought together our complete team of experts, each bringing their unique perspectives and capabilities. + +**Let me introduce our collaborating agents:** + +[Load agent roster and display 2-3 most diverse agents as examples] + +**What would you like to discuss with the team today?**" + +### Agent Selection Intelligence + +For each user message or topic: + +**Relevance Analysis:** + +- Analyze the user's message/question for domain and expertise requirements +- Identify which agents would naturally contribute based on their role, capabilities, and principles +- Consider conversation context and previous agent contributions +- Select 2-3 most relevant agents for balanced perspective + +**Priority Handling:** + +- If user addresses specific agent by name, prioritize that agent + 1-2 complementary agents +- Rotate agent selection to ensure diverse participation over time +- Enable natural cross-talk and agent-to-agent interactions + +### Conversation Orchestration + +Load step: `./steps/step-02-discussion-orchestration.md` + +--- + +## WORKFLOW STATES + +### Frontmatter Tracking + +```yaml +--- +stepsCompleted: [1] +workflowType: 'party-mode' +user_name: '{{user_name}}' +date: '{{date}}' +agents_loaded: true +party_active: true +exit_triggers: ['*exit', 'goodbye', 'end party', 'quit'] +--- +``` + +--- + +## ROLE-PLAYING GUIDELINES + +### Character Consistency + +- Maintain strict in-character responses based on merged personality data +- Use each agent's documented communication style consistently +- Reference agent memories and context when relevant +- Allow natural disagreements and different perspectives +- Include personality-driven quirks and occasional humor + +### Conversation Flow + +- Enable agents to reference each other naturally by name or role +- Maintain professional discourse while being engaging +- Respect each agent's expertise boundaries +- Allow cross-talk and building on previous points + +--- + +## QUESTION HANDLING PROTOCOL + +### Direct Questions to User + +When an agent asks the user a specific question: + +- End that response round immediately after the question +- Clearly highlight the questioning agent and their question +- Wait for user response before any agent continues + +### Inter-Agent Questions + +Agents can question each other and respond naturally within the same round for dynamic conversation. + +--- + +## EXIT CONDITIONS + +### Automatic Triggers + +Exit party mode when user message contains any exit triggers: + +- `*exit`, `goodbye`, `end party`, `quit` + +### Graceful Conclusion + +If conversation naturally concludes: + +- Ask user if they'd like to continue or end party mode +- Exit gracefully when user indicates completion + +--- + +## MODERATION NOTES + +**Quality Control:** + +- If discussion becomes circular, have bmad-master summarize and redirect +- Balance fun and productivity based on conversation tone +- Ensure all agents stay true to their merged personalities +- Exit gracefully when user indicates completion + +**Conversation Management:** + +- Rotate agent participation to ensure inclusive discussion +- Handle topic drift while maintaining productive conversation +- Facilitate cross-agent collaboration and knowledge sharing diff --git a/app/__init__.py b/app/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/app/api/__init__.py b/app/api/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/app/api/main.py b/app/api/main.py new file mode 100644 index 0000000..69be45c --- /dev/null +++ b/app/api/main.py @@ -0,0 +1,38 @@ +from fastapi import FastAPI +from fastapi.responses import FileResponse +from fastapi.staticfiles import StaticFiles +from pathlib import Path + +from app.api.routes import commodities, positions, analytics, reports + +app = FastAPI( + title="CFTC COT Explorer", + description="Explore CFTC Commitments of Traders positioning data", + version="1.0.0", +) + +app.include_router(commodities.router) +app.include_router(positions.router) +app.include_router(analytics.router) +app.include_router(reports.router) + +FRONTEND_DIR = Path(__file__).parent.parent.parent / "frontend" + +if FRONTEND_DIR.exists(): + app.mount("/static", StaticFiles(directory=str(FRONTEND_DIR)), name="static") + + +@app.get("/", include_in_schema=False) +async def root(): + index = FRONTEND_DIR / "index.html" + if index.exists(): + return FileResponse(str(index)) + return {"message": "CFTC COT Explorer API", "docs": "/docs"} + + +@app.get("/health", include_in_schema=False) +async def health(): + from app.db import get_db + with get_db() as conn: + count = conn.execute("SELECT COUNT(*) FROM reports").fetchone()[0] + return {"status": "ok", "reports": count} diff --git a/app/api/models.py b/app/api/models.py new file mode 100644 index 0000000..685af51 --- /dev/null +++ b/app/api/models.py @@ -0,0 +1,130 @@ +from typing import Optional +from pydantic import BaseModel + + +class CommodityMeta(BaseModel): + cftc_code: str + name: str + exchange: str + exchange_abbr: str + contract_unit: Optional[str] + first_date: Optional[str] + last_date: Optional[str] + week_count: int + + +class ExchangeInfo(BaseModel): + exchange_abbr: str + exchange: str + commodity_count: int + + +class PositionPoint(BaseModel): + report_date: str + open_interest: Optional[int] + noncomm_long: Optional[int] + noncomm_short: Optional[int] + noncomm_spreading: Optional[int] + noncomm_net: Optional[int] + comm_long: Optional[int] + comm_short: Optional[int] + comm_net: Optional[int] + nonrept_long: Optional[int] + nonrept_short: Optional[int] + nonrept_net: Optional[int] + chg_open_interest: Optional[int] + chg_noncomm_long: Optional[int] + chg_noncomm_short: Optional[int] + chg_comm_long: Optional[int] + chg_comm_short: Optional[int] + pct_noncomm_long: Optional[float] + pct_noncomm_short: Optional[float] + pct_comm_long: Optional[float] + pct_comm_short: Optional[float] + traders_total: Optional[int] + traders_noncomm_long: Optional[int] + traders_noncomm_short: Optional[int] + traders_comm_long: Optional[int] + traders_comm_short: Optional[int] + + +class HistoryResponse(BaseModel): + commodity: CommodityMeta + row_type: str + data: list[PositionPoint] + + +class LatestRowData(BaseModel): + row_type: str + positions: PositionPoint + concentration: Optional[dict] + + +class LatestResponse(BaseModel): + commodity: CommodityMeta + report_date: str + rows: list[LatestRowData] + + +class ExtremePoint(BaseModel): + value: Optional[float] + date: Optional[str] + + +class ExtremesResponse(BaseModel): + cftc_code: str + commodity: str + noncomm_net: dict + open_interest: dict + comm_net: dict + + +class ScreenerRow(BaseModel): + cftc_code: str + commodity: str + exchange: str + latest_date: str + noncomm_net: Optional[int] + open_interest: Optional[int] + pct_rank: Optional[float] + chg_noncomm_long: Optional[int] + chg_noncomm_short: Optional[int] + + +class ComparePoint(BaseModel): + report_date: str + value: Optional[float] + + +class CompareResponse(BaseModel): + metric: str + commodities: list[CommodityMeta] + series: dict[str, list[ComparePoint]] + + +class PercentileResponse(BaseModel): + cftc_code: str + commodity: str + current_net: Optional[int] + percentile: Optional[float] + z_score: Optional[float] + lookback_weeks: int + period_min: Optional[int] + period_max: Optional[int] + + +class ReportDateInfo(BaseModel): + date: str + commodity_count: int + + +class ReportSnapshotRow(BaseModel): + cftc_code: str + commodity: str + exchange: str + open_interest: Optional[int] + noncomm_net: Optional[int] + comm_net: Optional[int] + pct_noncomm_long: Optional[float] + pct_noncomm_short: Optional[float] + traders_total: Optional[int] diff --git a/app/api/routes/__init__.py b/app/api/routes/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/app/api/routes/analytics.py b/app/api/routes/analytics.py new file mode 100644 index 0000000..604119d --- /dev/null +++ b/app/api/routes/analytics.py @@ -0,0 +1,195 @@ +from fastapi import APIRouter, HTTPException, Query +from typing import Optional + +from app.db import get_db +from app.api.models import PercentileResponse, ScreenerRow + +router = APIRouter(prefix="/api/analytics", tags=["analytics"]) + + +@router.get("/screener", response_model=list[ScreenerRow]) +def screener( + exchange: Optional[str] = Query(None), + lookback_weeks: int = Query(156, ge=4, le=1560), + top_n: int = Query(50, ge=1, le=500), + direction: Optional[str] = Query(None, pattern="^(long|short)$"), +): + """ + Return markets ranked by their current non-commercial net position + relative to the historical distribution (percentile rank). + """ + with get_db() as conn: + # Get all commodities, optionally filtered by exchange + exchange_filter = "AND c.exchange_abbr = ?" if exchange else "" + exchange_params = [exchange] if exchange else [] + + # For each commodity: get latest date, latest noncomm_net, + # and compute percentile rank over last N weeks + rows = conn.execute( + f""" + WITH latest AS ( + SELECT c.cftc_code, c.name AS commodity, c.exchange_abbr AS exchange, + MAX(r.report_date) AS latest_date + FROM commodities c + JOIN reports r ON r.commodity_id = c.id + {exchange_filter} + GROUP BY c.cftc_code + ), + latest_pos AS ( + SELECT l.cftc_code, l.commodity, l.exchange, l.latest_date, + p.open_interest, + (p.noncomm_long - p.noncomm_short) AS noncomm_net, + p.chg_noncomm_long, p.chg_noncomm_short + FROM latest l + JOIN commodities c ON c.cftc_code = l.cftc_code + JOIN reports r ON r.commodity_id = c.id AND r.report_date = l.latest_date + JOIN positions p ON p.report_id = r.id AND p.row_type = 'All' + ), + lookback AS ( + SELECT c.cftc_code, + (p.noncomm_long - p.noncomm_short) AS net, + ROW_NUMBER() OVER (PARTITION BY c.cftc_code ORDER BY r.report_date DESC) AS rn + FROM commodities c + JOIN reports r ON r.commodity_id = c.id + JOIN positions p ON p.report_id = r.id AND p.row_type = 'All' + ), + pct AS ( + SELECT lp.cftc_code, + lp.commodity, + lp.exchange, + lp.latest_date, + lp.open_interest, + lp.noncomm_net, + lp.chg_noncomm_long, + lp.chg_noncomm_short, + CAST( + (SELECT COUNT(*) FROM lookback lb2 + WHERE lb2.cftc_code = lp.cftc_code + AND lb2.rn <= ? AND lb2.net < lp.noncomm_net) + AS REAL + ) / NULLIF( + (SELECT COUNT(*) FROM lookback lb3 + WHERE lb3.cftc_code = lp.cftc_code AND lb3.rn <= ?), + 0 + ) * 100.0 AS pct_rank + FROM latest_pos lp + ) + SELECT cftc_code, commodity, exchange, latest_date, + noncomm_net, open_interest, pct_rank, + chg_noncomm_long, chg_noncomm_short + FROM pct + ORDER BY pct_rank DESC + LIMIT ? + """, + exchange_params + [lookback_weeks, lookback_weeks, top_n], + ).fetchall() + + result = [ScreenerRow(**dict(r)) for r in rows] + if direction == 'long': + result = [r for r in result if r.pct_rank is not None and r.pct_rank >= 50] + elif direction == 'short': + result = [r for r in result if r.pct_rank is not None and r.pct_rank < 50] + return result + + +@router.get("/{cftc_code}/net-position-percentile", response_model=PercentileResponse) +def net_position_percentile( + cftc_code: str, + lookback_weeks: int = Query(156, ge=4, le=1560), +): + """ + Where does the current non-commercial net position sit in the + historical distribution over the last N weeks? + """ + with get_db() as conn: + row = conn.execute( + "SELECT id, name FROM commodities WHERE cftc_code = ?", (cftc_code,) + ).fetchone() + if not row: + raise HTTPException(status_code=404, detail=f"Commodity {cftc_code} not found") + commodity_name = row['name'] + + # Get last N weekly net positions + history = conn.execute( + """ + SELECT (p.noncomm_long - p.noncomm_short) AS net + FROM positions p + JOIN reports r ON r.id = p.report_id + JOIN commodities c ON c.id = r.commodity_id + WHERE c.cftc_code = ? AND p.row_type = 'All' + ORDER BY r.report_date DESC + LIMIT ? + """, + (cftc_code, lookback_weeks), + ).fetchall() + + if not history: + raise HTTPException(status_code=404, detail="No position data found") + + nets = [r[0] for r in history if r[0] is not None] + if not nets: + return PercentileResponse( + cftc_code=cftc_code, commodity=commodity_name, + current_net=None, percentile=None, z_score=None, + lookback_weeks=lookback_weeks, period_min=None, period_max=None, + ) + + current = nets[0] + n = len(nets) + below = sum(1 for v in nets[1:] if v < current) + percentile = round(below / max(n - 1, 1) * 100, 1) + + mean = sum(nets) / n + variance = sum((v - mean) ** 2 for v in nets) / n + std = variance ** 0.5 + z_score = round((current - mean) / std, 2) if std > 0 else 0.0 + + return PercentileResponse( + cftc_code=cftc_code, + commodity=commodity_name, + current_net=current, + percentile=percentile, + z_score=z_score, + lookback_weeks=n, + period_min=min(nets), + period_max=max(nets), + ) + + +@router.get("/{cftc_code}/concentration") +def get_concentration( + cftc_code: str, + from_date: Optional[str] = Query(None), + to_date: Optional[str] = Query(None), + row_type: str = Query("All", pattern="^(All|Old|Other)$"), +): + with get_db() as conn: + row = conn.execute( + "SELECT id FROM commodities WHERE cftc_code = ?", (cftc_code,) + ).fetchone() + if not row: + raise HTTPException(status_code=404, detail=f"Commodity {cftc_code} not found") + + sql = """ + SELECT r.report_date, + cn.conc_gross_long_4, cn.conc_gross_short_4, + cn.conc_gross_long_8, cn.conc_gross_short_8, + cn.conc_net_long_4, cn.conc_net_short_4, + cn.conc_net_long_8, cn.conc_net_short_8 + FROM concentration cn + JOIN reports r ON r.id = cn.report_id + JOIN commodities c ON c.id = r.commodity_id + WHERE c.cftc_code = ? AND cn.row_type = ? + """ + params: list = [cftc_code, row_type] + if from_date: + sql += " AND r.report_date >= ?" + params.append(from_date) + if to_date: + sql += " AND r.report_date <= ?" + params.append(to_date) + sql += " ORDER BY r.report_date ASC" + + rows = conn.execute(sql, params).fetchall() + + return {"cftc_code": cftc_code, "row_type": row_type, "data": [dict(r) for r in rows]} diff --git a/app/api/routes/commodities.py b/app/api/routes/commodities.py new file mode 100644 index 0000000..8142b01 --- /dev/null +++ b/app/api/routes/commodities.py @@ -0,0 +1,64 @@ +from fastapi import APIRouter, HTTPException, Query +from typing import Optional + +from app.db import get_db +from app.api.models import CommodityMeta, ExchangeInfo + +router = APIRouter(prefix="/api", tags=["commodities"]) + + +@router.get("/exchanges", response_model=list[ExchangeInfo]) +def get_exchanges(): + with get_db() as conn: + rows = conn.execute( + """ + SELECT exchange_abbr, exchange, + COUNT(*) AS commodity_count + FROM commodities + GROUP BY exchange_abbr + ORDER BY commodity_count DESC + """ + ).fetchall() + return [ExchangeInfo(**dict(r)) for r in rows] + + +@router.get("/commodities", response_model=list[CommodityMeta]) +def get_commodities(exchange: Optional[str] = Query(None)): + sql = """ + SELECT c.cftc_code, c.name, c.exchange, c.exchange_abbr, c.contract_unit, + MIN(r.report_date) AS first_date, + MAX(r.report_date) AS last_date, + COUNT(DISTINCT r.report_date) AS week_count + FROM commodities c + LEFT JOIN reports r ON r.commodity_id = c.id + """ + params = [] + if exchange: + sql += " WHERE c.exchange_abbr = ?" + params.append(exchange) + sql += " GROUP BY c.id ORDER BY c.exchange_abbr, c.name" + + with get_db() as conn: + rows = conn.execute(sql, params).fetchall() + return [CommodityMeta(**dict(r)) for r in rows] + + +@router.get("/commodities/{cftc_code}", response_model=CommodityMeta) +def get_commodity(cftc_code: str): + with get_db() as conn: + row = conn.execute( + """ + SELECT c.cftc_code, c.name, c.exchange, c.exchange_abbr, c.contract_unit, + MIN(r.report_date) AS first_date, + MAX(r.report_date) AS last_date, + COUNT(DISTINCT r.report_date) AS week_count + FROM commodities c + LEFT JOIN reports r ON r.commodity_id = c.id + WHERE c.cftc_code = ? + GROUP BY c.id + """, + (cftc_code,), + ).fetchone() + if not row: + raise HTTPException(status_code=404, detail=f"Commodity {cftc_code} not found") + return CommodityMeta(**dict(row)) diff --git a/app/api/routes/positions.py b/app/api/routes/positions.py new file mode 100644 index 0000000..e6b4e2e --- /dev/null +++ b/app/api/routes/positions.py @@ -0,0 +1,299 @@ +from fastapi import APIRouter, HTTPException, Query +from typing import Optional + +from app.db import get_db +from app.api.models import ( + CommodityMeta, HistoryResponse, LatestResponse, + LatestRowData, PositionPoint, ExtremesResponse, CompareResponse, ComparePoint, +) + +router = APIRouter(prefix="/api/positions", tags=["positions"]) + + +def _commodity_meta(conn, cftc_code: str) -> dict: + row = conn.execute( + """ + SELECT c.cftc_code, c.name, c.exchange, c.exchange_abbr, c.contract_unit, + MIN(r.report_date) AS first_date, + MAX(r.report_date) AS last_date, + COUNT(DISTINCT r.report_date) AS week_count + FROM commodities c + LEFT JOIN reports r ON r.commodity_id = c.id + WHERE c.cftc_code = ? + GROUP BY c.id + """, + (cftc_code,), + ).fetchone() + if not row: + raise HTTPException(status_code=404, detail=f"Commodity {cftc_code} not found") + return dict(row) + + +def _row_to_point(row) -> PositionPoint: + d = dict(row) + d['noncomm_net'] = ( + (d['noncomm_long'] or 0) - (d['noncomm_short'] or 0) + if d.get('noncomm_long') is not None and d.get('noncomm_short') is not None + else None + ) + d['comm_net'] = ( + (d['comm_long'] or 0) - (d['comm_short'] or 0) + if d.get('comm_long') is not None and d.get('comm_short') is not None + else None + ) + d['nonrept_net'] = ( + (d.get('nonrept_long') or 0) - (d.get('nonrept_short') or 0) + if d.get('nonrept_long') is not None and d.get('nonrept_short') is not None + else None + ) + return PositionPoint(**{k: d.get(k) for k in PositionPoint.model_fields}) + + +@router.get("/{cftc_code}/history", response_model=HistoryResponse) +def get_history( + cftc_code: str, + from_date: Optional[str] = Query(None), + to_date: Optional[str] = Query(None), + row_type: str = Query("All", pattern="^(All|Old|Other)$"), +): + with get_db() as conn: + meta = _commodity_meta(conn, cftc_code) + + sql = """ + SELECT r.report_date, + p.open_interest, + p.noncomm_long, p.noncomm_short, p.noncomm_spreading, + p.comm_long, p.comm_short, + p.nonrept_long, p.nonrept_short, + p.chg_open_interest, p.chg_noncomm_long, p.chg_noncomm_short, + p.chg_comm_long, p.chg_comm_short, + p.pct_noncomm_long, p.pct_noncomm_short, + p.pct_comm_long, p.pct_comm_short, + p.traders_total, p.traders_noncomm_long, p.traders_noncomm_short, + p.traders_comm_long, p.traders_comm_short + FROM positions p + JOIN reports r ON r.id = p.report_id + JOIN commodities c ON c.id = r.commodity_id + WHERE c.cftc_code = ? AND p.row_type = ? + """ + params: list = [cftc_code, row_type] + if from_date: + sql += " AND r.report_date >= ?" + params.append(from_date) + if to_date: + sql += " AND r.report_date <= ?" + params.append(to_date) + sql += " ORDER BY r.report_date ASC" + + rows = conn.execute(sql, params).fetchall() + + data = [_row_to_point(r) for r in rows] + return HistoryResponse( + commodity=CommodityMeta(**meta), + row_type=row_type, + data=data, + ) + + +@router.get("/compare", response_model=CompareResponse) +def compare( + codes: str = Query(..., description="Comma-separated CFTC codes, max 8"), + metric: str = Query("noncomm_net"), + from_date: Optional[str] = Query(None), + to_date: Optional[str] = Query(None), + row_type: str = Query("All", pattern="^(All|Old|Other)$"), +): + code_list = [c.strip() for c in codes.split(",")][:8] + + COMPUTED = {"noncomm_net", "comm_net", "nonrept_net"} + DB_FIELDS = { + "open_interest", "noncomm_long", "noncomm_short", "noncomm_spreading", + "comm_long", "comm_short", "nonrept_long", "nonrept_short", + "pct_noncomm_long", "pct_noncomm_short", "pct_comm_long", "pct_comm_short", + "traders_total", + } + + if metric not in COMPUTED and metric not in DB_FIELDS: + raise HTTPException(status_code=400, detail=f"Unknown metric: {metric}") + + commodities = [] + series: dict[str, list[ComparePoint]] = {} + + with get_db() as conn: + for code in code_list: + try: + meta = _commodity_meta(conn, code) + commodities.append(CommodityMeta(**meta)) + except HTTPException: + continue + + if metric == "noncomm_net": + select_expr = "(p.noncomm_long - p.noncomm_short)" + elif metric == "comm_net": + select_expr = "(p.comm_long - p.comm_short)" + elif metric == "nonrept_net": + select_expr = "(p.nonrept_long - p.nonrept_short)" + else: + select_expr = f"p.{metric}" + + sql = f""" + SELECT r.report_date, {select_expr} AS value + FROM positions p + JOIN reports r ON r.id = p.report_id + JOIN commodities c ON c.id = r.commodity_id + WHERE c.cftc_code = ? AND p.row_type = ? + """ + params: list = [code, row_type] + if from_date: + sql += " AND r.report_date >= ?" + params.append(from_date) + if to_date: + sql += " AND r.report_date <= ?" + params.append(to_date) + sql += " ORDER BY r.report_date ASC" + + rows = conn.execute(sql, params).fetchall() + series[code] = [ComparePoint(report_date=r[0], value=r[1]) for r in rows] + + return CompareResponse(metric=metric, commodities=commodities, series=series) + + +@router.get("/{cftc_code}/latest", response_model=LatestResponse) +def get_latest(cftc_code: str): + with get_db() as conn: + meta = _commodity_meta(conn, cftc_code) + + latest_date = conn.execute( + """ + SELECT MAX(r.report_date) + FROM reports r + JOIN commodities c ON c.id = r.commodity_id + WHERE c.cftc_code = ? + """, + (cftc_code,), + ).fetchone()[0] + + if not latest_date: + raise HTTPException(status_code=404, detail="No data for this commodity") + + pos_rows = conn.execute( + """ + SELECT r.report_date, p.row_type, + p.open_interest, p.noncomm_long, p.noncomm_short, p.noncomm_spreading, + p.comm_long, p.comm_short, p.nonrept_long, p.nonrept_short, + p.chg_open_interest, p.chg_noncomm_long, p.chg_noncomm_short, + p.chg_comm_long, p.chg_comm_short, + p.pct_noncomm_long, p.pct_noncomm_short, + p.pct_comm_long, p.pct_comm_short, + p.traders_total, p.traders_noncomm_long, p.traders_noncomm_short, + p.traders_comm_long, p.traders_comm_short + FROM positions p + JOIN reports r ON r.id = p.report_id + JOIN commodities c ON c.id = r.commodity_id + WHERE c.cftc_code = ? AND r.report_date = ? + ORDER BY p.row_type + """, + (cftc_code, latest_date), + ).fetchall() + + conc_rows = conn.execute( + """ + SELECT cn.row_type, + cn.conc_gross_long_4, cn.conc_gross_short_4, + cn.conc_gross_long_8, cn.conc_gross_short_8, + cn.conc_net_long_4, cn.conc_net_short_4, + cn.conc_net_long_8, cn.conc_net_short_8 + FROM concentration cn + JOIN reports r ON r.id = cn.report_id + JOIN commodities c ON c.id = r.commodity_id + WHERE c.cftc_code = ? AND r.report_date = ? + """, + (cftc_code, latest_date), + ).fetchall() + + conc_by_type = {r['row_type']: dict(r) for r in conc_rows} + + result_rows = [] + for row in pos_rows: + rt = row['row_type'] + point = _row_to_point(row) + result_rows.append(LatestRowData( + row_type=rt, + positions=point, + concentration=conc_by_type.get(rt), + )) + + return LatestResponse( + commodity=CommodityMeta(**meta), + report_date=latest_date, + rows=result_rows, + ) + + +@router.get("/{cftc_code}/extremes", response_model=ExtremesResponse) +def get_extremes(cftc_code: str): + with get_db() as conn: + meta = _commodity_meta(conn, cftc_code) + + def minmax(col: str): + # col is a bare column name on the positions table + r = conn.execute( + f""" + SELECT + MAX(p.{col}) AS max_val, + MIN(p.{col}) AS min_val, + (SELECT r2.report_date FROM positions p2 + JOIN reports r2 ON r2.id = p2.report_id + JOIN commodities c2 ON c2.id = r2.commodity_id + WHERE c2.cftc_code = ? AND p2.row_type = 'All' + ORDER BY p2.{col} DESC LIMIT 1) AS max_date, + (SELECT r2.report_date FROM positions p2 + JOIN reports r2 ON r2.id = p2.report_id + JOIN commodities c2 ON c2.id = r2.commodity_id + WHERE c2.cftc_code = ? AND p2.row_type = 'All' + ORDER BY p2.{col} ASC LIMIT 1) AS min_date + FROM positions p + JOIN reports r ON r.id = p.report_id + JOIN commodities c ON c.id = r.commodity_id + WHERE c.cftc_code = ? AND p.row_type = 'All' + """, + (cftc_code, cftc_code, cftc_code), + ).fetchone() + return { + "max": {"value": r[0], "date": r[2]}, + "min": {"value": r[1], "date": r[3]}, + } + + oi = minmax("open_interest") + + # For net positions, pull the full series and compute in Python + def net_minmax(long_col: str, short_col: str): + rows2 = conn.execute( + f""" + SELECT r.report_date, p.{long_col}, p.{short_col} + FROM positions p + JOIN reports r ON r.id = p.report_id + JOIN commodities c ON c.id = r.commodity_id + WHERE c.cftc_code = ? AND p.row_type = 'All' + AND p.{long_col} IS NOT NULL AND p.{short_col} IS NOT NULL + ORDER BY (p.{long_col} - p.{short_col}) DESC + """, + (cftc_code,), + ).fetchall() + if not rows2: + return {"max": {"value": None, "date": None}, "min": {"value": None, "date": None}} + return { + "max": {"value": rows2[0][1] - rows2[0][2], "date": rows2[0][0]}, + "min": {"value": rows2[-1][1] - rows2[-1][2], "date": rows2[-1][0]}, + } + + nc_net = net_minmax("noncomm_long", "noncomm_short") + cm_net = net_minmax("comm_long", "comm_short") + + return ExtremesResponse( + cftc_code=cftc_code, + commodity=meta['name'], + noncomm_net=nc_net, + open_interest=oi, + comm_net=cm_net, + ) diff --git a/app/api/routes/reports.py b/app/api/routes/reports.py new file mode 100644 index 0000000..cc9e753 --- /dev/null +++ b/app/api/routes/reports.py @@ -0,0 +1,62 @@ +from fastapi import APIRouter, HTTPException, Query +from typing import Optional + +from app.db import get_db +from app.api.models import ReportDateInfo, ReportSnapshotRow + +router = APIRouter(prefix="/api/reports", tags=["reports"]) + + +@router.get("/dates", response_model=list[ReportDateInfo]) +def get_report_dates(exchange: Optional[str] = Query(None)): + with get_db() as conn: + if exchange: + rows = conn.execute( + """ + SELECT r.report_date AS date, COUNT(DISTINCT r.commodity_id) AS commodity_count + FROM reports r + JOIN commodities c ON c.id = r.commodity_id + WHERE c.exchange_abbr = ? + GROUP BY r.report_date + ORDER BY r.report_date DESC + """, + (exchange,), + ).fetchall() + else: + rows = conn.execute( + """ + SELECT report_date AS date, COUNT(DISTINCT commodity_id) AS commodity_count + FROM reports + GROUP BY report_date + ORDER BY report_date DESC + """ + ).fetchall() + return [ReportDateInfo(**dict(r)) for r in rows] + + +@router.get("/{date}", response_model=list[ReportSnapshotRow]) +def get_report_snapshot(date: str, exchange: Optional[str] = Query(None)): + with get_db() as conn: + sql = """ + SELECT c.cftc_code, c.name AS commodity, c.exchange_abbr AS exchange, + p.open_interest, + (p.noncomm_long - p.noncomm_short) AS noncomm_net, + (p.comm_long - p.comm_short) AS comm_net, + p.pct_noncomm_long, p.pct_noncomm_short, + p.traders_total + FROM positions p + JOIN reports r ON r.id = p.report_id + JOIN commodities c ON c.id = r.commodity_id + WHERE r.report_date = ? AND p.row_type = 'All' + """ + params: list = [date] + if exchange: + sql += " AND c.exchange_abbr = ?" + params.append(exchange) + sql += " ORDER BY c.exchange_abbr, c.name" + + rows = conn.execute(sql, params).fetchall() + + if not rows: + raise HTTPException(status_code=404, detail=f"No data for date {date}") + return [ReportSnapshotRow(**dict(r)) for r in rows] diff --git a/app/db.py b/app/db.py new file mode 100644 index 0000000..c671634 --- /dev/null +++ b/app/db.py @@ -0,0 +1,26 @@ +import os +import sqlite3 +from contextlib import contextmanager +from pathlib import Path + +DB_PATH = Path(os.environ.get("DB_PATH", Path(__file__).parent.parent / "data" / "cot.db")) + + +@contextmanager +def get_db(): + conn = sqlite3.connect(str(DB_PATH), check_same_thread=False) + conn.row_factory = sqlite3.Row + conn.execute("PRAGMA journal_mode=WAL") + conn.execute("PRAGMA foreign_keys=ON") + try: + yield conn + finally: + conn.close() + + +def init_db(): + schema_path = Path(__file__).parent.parent / "schema.sql" + with get_db() as conn: + conn.executescript(schema_path.read_text()) + conn.commit() + print(f"Database initialized at {DB_PATH}") diff --git a/app/ingestion/__init__.py b/app/ingestion/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/app/ingestion/cli.py b/app/ingestion/cli.py new file mode 100644 index 0000000..fc042c9 --- /dev/null +++ b/app/ingestion/cli.py @@ -0,0 +1,143 @@ +""" +CFTC COT Ingestion CLI + +Usage: + python -m app.ingestion.cli init-db + python -m app.ingestion.cli import-local-html [--data-dir ./data] + python -m app.ingestion.cli import-history [--start-year 1995] [--end-year 2026] + python -m app.ingestion.cli import-html + python -m app.ingestion.cli import-zip + python -m app.ingestion.cli download-and-import + python -m app.ingestion.cli status +""" + +import argparse +import sys +from pathlib import Path + + +def cmd_init_db(args): + from app.db import init_db + init_db() + + +def cmd_import_local_html(args): + from app.ingestion.importer import import_html_file + data_dir = Path(args.data_dir) + files = sorted(data_dir.glob("*_deacbtlof.htm")) + if not files: + print(f"No HTML files found in {data_dir}") + return + total_inserted = total_skipped = 0 + for f in files: + result = import_html_file(str(f)) + status = f"ERROR: {result.error}" if result.error else "OK" + print(f" {f.name}: {result.rows_inserted} inserted, {result.rows_skipped} skipped — {status}") + total_inserted += result.rows_inserted + total_skipped += result.rows_skipped + print(f"\nTotal: {total_inserted} inserted, {total_skipped} skipped") + + +def cmd_import_history(args): + from app.ingestion.importer import run_historical_import + print(f"Importing historical data {args.start_year}–{args.end_year}...") + run_historical_import(start_year=args.start_year, end_year=args.end_year, verbose=True) + print("Done.") + + +def cmd_import_html(args): + from app.ingestion.importer import import_html_file + result = import_html_file(args.file) + if result.error: + print(f"Error: {result.error}", file=sys.stderr) + sys.exit(1) + print(f"{result.rows_inserted} inserted, {result.rows_skipped} skipped") + + +def cmd_import_zip(args): + from app.ingestion.importer import import_zip_file + result = import_zip_file(args.file) + if result.error: + print(f"Error: {result.error}", file=sys.stderr) + sys.exit(1) + print(f"{result.rows_inserted} inserted, {result.rows_skipped} skipped") + + +def cmd_download_and_import(args): + from app.ingestion.importer import download_and_import + result = download_and_import() + if result.error: + print(f"Error: {result.error}", file=sys.stderr) + sys.exit(1) + print(f"Source: {result.source}") + print(f"{result.rows_inserted} inserted, {result.rows_skipped} skipped") + + +def cmd_status(args): + from app.db import get_db + with get_db() as conn: + # Summary counts + row = conn.execute("SELECT COUNT(*) FROM commodities").fetchone() + print(f"Commodities: {row[0]}") + row = conn.execute("SELECT COUNT(DISTINCT report_date) FROM reports").fetchone() + print(f"Report dates: {row[0]}") + row = conn.execute("SELECT COUNT(*) FROM positions").fetchone() + print(f"Position rows: {row[0]}") + row = conn.execute("SELECT MIN(report_date), MAX(report_date) FROM reports").fetchone() + print(f"Date range: {row[0]} to {row[1]}") + + # Exchanges + print("\nBy exchange:") + for r in conn.execute( + "SELECT exchange_abbr, COUNT(*) FROM commodities GROUP BY exchange_abbr ORDER BY COUNT(*) DESC" + ): + print(f" {r[0]}: {r[1]} markets") + + # Import log + print("\nImport log (last 10):") + for r in conn.execute( + "SELECT source, status, rows_inserted, rows_skipped, completed_at " + "FROM import_log ORDER BY id DESC LIMIT 10" + ): + print(f" {r['source']}: {r['status']} — " + f"{r['rows_inserted']} inserted, {r['rows_skipped']} skipped " + f"({r['completed_at']})") + + +def main(): + parser = argparse.ArgumentParser(description="CFTC COT data ingestion CLI") + sub = parser.add_subparsers(dest="command", required=True) + + sub.add_parser("init-db", help="Initialize the database schema") + + p = sub.add_parser("import-local-html", help="Import all local HTML files") + p.add_argument("--data-dir", default="data", help="Directory with HTML files") + + p = sub.add_parser("import-history", help="Download and import full historical archive") + p.add_argument("--start-year", type=int, default=1995) + p.add_argument("--end-year", type=int, default=2026) + + p = sub.add_parser("import-html", help="Import a single HTML file") + p.add_argument("file") + + p = sub.add_parser("import-zip", help="Import a single ZIP file") + p.add_argument("file") + + sub.add_parser("download-and-import", help="Download latest weekly report and import it") + sub.add_parser("status", help="Show database statistics") + + args = parser.parse_args() + commands = { + "init-db": cmd_init_db, + "import-local-html": cmd_import_local_html, + "import-history": cmd_import_history, + "import-html": cmd_import_html, + "import-zip": cmd_import_zip, + "download-and-import": cmd_download_and_import, + "status": cmd_status, + } + commands[args.command](args) + + +if __name__ == "__main__": + main() diff --git a/app/ingestion/importer.py b/app/ingestion/importer.py new file mode 100644 index 0000000..c40cd10 --- /dev/null +++ b/app/ingestion/importer.py @@ -0,0 +1,419 @@ +""" +CFTC COT Data Importer + +Inserts parsed CommodityBlock objects into the SQLite database. +All inserts use INSERT OR IGNORE for idempotency — safe to re-run. +""" + +import sqlite3 +import tempfile +from dataclasses import dataclass +from pathlib import Path +from typing import Optional + +import requests + +from app.db import get_db +from app.ingestion.parser import ( + CommodityBlock, + parse_html_file, + parse_zip_file, +) + +HEADERS = { + "User-Agent": ( + "Mozilla/5.0 (Windows NT 10.0; Win64; x64) " + "AppleWebKit/537.36 (KHTML, like Gecko) " + "Chrome/120.0.0.0 Safari/537.36" + ) +} + +HISTORICAL_BASE = "https://www.cftc.gov/files/dea/history" +WEEKLY_URL = "https://www.cftc.gov/dea/options/deacbtlof.htm" + + +@dataclass +class ImportResult: + source: str + rows_inserted: int = 0 + rows_skipped: int = 0 + error: Optional[str] = None + + +def _upsert_commodity(conn: sqlite3.Connection, block: CommodityBlock) -> int: + """Insert commodity if not exists; return its id.""" + conn.execute( + """ + INSERT OR IGNORE INTO commodities + (cftc_code, name, exchange, exchange_abbr, contract_unit) + VALUES (?, ?, ?, ?, ?) + """, + (block.cftc_code, block.name, block.exchange, + block.exchange_abbr, block.contract_unit), + ) + row = conn.execute( + "SELECT id FROM commodities WHERE cftc_code = ?", (block.cftc_code,) + ).fetchone() + return row[0] + + +def _upsert_report(conn: sqlite3.Connection, commodity_id: int, + block: CommodityBlock, source_file: str) -> Optional[int]: + """ + Insert report row. Returns report id, or None if already exists + (i.e. this report date was already imported for this commodity). + """ + cur = conn.execute( + """ + INSERT OR IGNORE INTO reports + (commodity_id, report_date, prev_report_date, source_file) + VALUES (?, ?, ?, ?) + """, + (commodity_id, block.report_date, block.prev_report_date, source_file), + ) + if cur.rowcount == 0: + return None # already existed + return cur.lastrowid + + +def import_commodity_block(conn: sqlite3.Connection, block: CommodityBlock, + source: str) -> tuple[int, int]: + """ + Insert one CommodityBlock into the DB. + Returns (rows_inserted, rows_skipped). + """ + commodity_id = _upsert_commodity(conn, block) + report_id = _upsert_report(conn, commodity_id, block, source) + + if report_id is None: + return 0, 1 # already imported + + inserted = 0 + for row_type in ('All', 'Old', 'Other'): + pos = block.positions.get(row_type) + if pos is None: + continue + + chg = block.changes if row_type == 'All' else None + pct = block.percentages.get(row_type) + trd = block.traders.get(row_type) + + conn.execute( + """ + INSERT OR IGNORE INTO positions ( + report_id, row_type, + open_interest, + noncomm_long, noncomm_short, noncomm_spreading, + comm_long, comm_short, + total_long, total_short, + nonrept_long, nonrept_short, + chg_open_interest, + chg_noncomm_long, chg_noncomm_short, chg_noncomm_spreading, + chg_comm_long, chg_comm_short, + chg_total_long, chg_total_short, + chg_nonrept_long, chg_nonrept_short, + pct_open_interest, + pct_noncomm_long, pct_noncomm_short, pct_noncomm_spreading, + pct_comm_long, pct_comm_short, + pct_total_long, pct_total_short, + pct_nonrept_long, pct_nonrept_short, + traders_total, + traders_noncomm_long, traders_noncomm_short, traders_noncomm_spread, + traders_comm_long, traders_comm_short, + traders_total_long, traders_total_short + ) VALUES ( + ?, ?, + ?, + ?, ?, ?, + ?, ?, + ?, ?, + ?, ?, + ?, + ?, ?, ?, + ?, ?, + ?, ?, + ?, ?, + ?, + ?, ?, ?, + ?, ?, + ?, ?, + ?, ?, + ?, + ?, ?, ?, + ?, ?, + ?, ? + ) + """, + ( + report_id, row_type, + pos.open_interest, + pos.noncomm_long, pos.noncomm_short, pos.noncomm_spreading, + pos.comm_long, pos.comm_short, + pos.total_long, pos.total_short, + pos.nonrept_long, pos.nonrept_short, + chg.chg_open_interest if chg else None, + chg.chg_noncomm_long if chg else None, + chg.chg_noncomm_short if chg else None, + chg.chg_noncomm_spreading if chg else None, + chg.chg_comm_long if chg else None, + chg.chg_comm_short if chg else None, + chg.chg_total_long if chg else None, + chg.chg_total_short if chg else None, + chg.chg_nonrept_long if chg else None, + chg.chg_nonrept_short if chg else None, + pct.pct_open_interest if pct else None, + pct.pct_noncomm_long if pct else None, + pct.pct_noncomm_short if pct else None, + pct.pct_noncomm_spreading if pct else None, + pct.pct_comm_long if pct else None, + pct.pct_comm_short if pct else None, + pct.pct_total_long if pct else None, + pct.pct_total_short if pct else None, + pct.pct_nonrept_long if pct else None, + pct.pct_nonrept_short if pct else None, + trd.traders_total if trd else None, + trd.traders_noncomm_long if trd else None, + trd.traders_noncomm_short if trd else None, + trd.traders_noncomm_spread if trd else None, + trd.traders_comm_long if trd else None, + trd.traders_comm_short if trd else None, + trd.traders_total_long if trd else None, + trd.traders_total_short if trd else None, + ), + ) + inserted += 1 + + # Concentration + conc = block.concentration.get(row_type) + if conc: + conn.execute( + """ + INSERT OR IGNORE INTO concentration ( + report_id, row_type, + conc_gross_long_4, conc_gross_short_4, + conc_gross_long_8, conc_gross_short_8, + conc_net_long_4, conc_net_short_4, + conc_net_long_8, conc_net_short_8 + ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?) + """, + ( + report_id, row_type, + conc.conc_gross_long_4, conc.conc_gross_short_4, + conc.conc_gross_long_8, conc.conc_gross_short_8, + conc.conc_net_long_4, conc.conc_net_short_4, + conc.conc_net_long_8, conc.conc_net_short_8, + ), + ) + + return inserted, 0 + + +def import_html_file(html_path: str) -> ImportResult: + """Import a single weekly HTML file.""" + source = Path(html_path).name + result = ImportResult(source=source) + with get_db() as conn: + if _already_imported(conn, source): + result.rows_skipped = 1 + return result + _log_start(conn, source, 'weekly_html') + try: + with get_db() as conn: + for block in parse_html_file(html_path): + ins, skp = import_commodity_block(conn, block, source) + result.rows_inserted += ins + result.rows_skipped += skp + conn.commit() + except Exception as e: + result.error = str(e) + with get_db() as conn: + _log_done(conn, source, result.rows_inserted, result.rows_skipped, result.error) + return result + + +def import_zip_file(zip_path: str, source_label: Optional[str] = None) -> ImportResult: + """Import a historical ZIP file.""" + source = source_label or Path(zip_path).name + result = ImportResult(source=source) + try: + with get_db() as conn: + for block in parse_zip_file(zip_path): + ins, skp = import_commodity_block(conn, block, source) + result.rows_inserted += ins + result.rows_skipped += skp + conn.commit() + except Exception as e: + result.error = str(e) + return result + + +def _download_zip(url: str, dest: Path) -> bool: + """Download a ZIP file, return True on success.""" + try: + r = requests.get(url, headers=HEADERS, timeout=120, stream=True) + r.raise_for_status() + dest.write_bytes(r.content) + return True + except requests.RequestException: + return False + + +def _log_start(conn: sqlite3.Connection, source: str, source_type: str) -> None: + conn.execute( + """ + INSERT OR REPLACE INTO import_log (source, source_type, status, started_at) + VALUES (?, ?, 'running', datetime('now')) + """, + (source, source_type), + ) + conn.commit() + + +def _log_done(conn: sqlite3.Connection, source: str, + inserted: int, skipped: int, error: Optional[str] = None) -> None: + status = 'error' if error else 'done' + conn.execute( + """ + UPDATE import_log + SET status = ?, rows_inserted = ?, rows_skipped = ?, + completed_at = datetime('now'), error_message = ? + WHERE source = ? + """, + (status, inserted, skipped, error, source), + ) + conn.commit() + + +def _already_imported(conn: sqlite3.Connection, source: str) -> bool: + row = conn.execute( + "SELECT status FROM import_log WHERE source = ?", (source,) + ).fetchone() + return row is not None and row[0] == 'done' + + +def run_historical_import(start_year: int = 1995, end_year: int = 2026, + verbose: bool = True) -> None: + """ + Download and import the full historical archive. + Uses import_log to skip already-completed sources. + """ + sources = [] + + # Combined 1995-2016 archive + sources.append(( + f"{HISTORICAL_BASE}/deahistfo_1995_2016.zip", + "deahistfo_1995_2016.zip", + "historical_zip", + )) + + # Per-year archives from 2017 onwards + for year in range(max(start_year, 2017), end_year + 1): + sources.append(( + f"{HISTORICAL_BASE}/deahistfo{year}.zip", + f"deahistfo{year}.zip", + "annual_zip", + )) + + with get_db() as conn: + pass # just to verify DB is accessible + + for url, label, source_type in sources: + with get_db() as conn: + if _already_imported(conn, label): + if verbose: + print(f" [skip] {label} (already imported)") + continue + + if verbose: + print(f" [download] {label} ...") + + with tempfile.NamedTemporaryFile(suffix='.zip', delete=False) as tmp: + tmp_path = Path(tmp.name) + + if not _download_zip(url, tmp_path): + if verbose: + print(f" [error] Failed to download {url}") + with get_db() as conn: + _log_start(conn, label, source_type) + _log_done(conn, label, 0, 0, f"Download failed: {url}") + tmp_path.unlink(missing_ok=True) + continue + + if verbose: + print(f" [import] {label} ...") + + with get_db() as conn: + _log_start(conn, label, source_type) + + result = import_zip_file(str(tmp_path), source_label=label) + tmp_path.unlink(missing_ok=True) + + with get_db() as conn: + _log_done(conn, label, result.rows_inserted, result.rows_skipped, result.error) + + if verbose: + status = "ERROR: " + result.error if result.error else "OK" + print(f" [done] {label}: {result.rows_inserted} inserted, " + f"{result.rows_skipped} skipped — {status}") + + +def download_and_import() -> ImportResult: + """ + Download the current weekly report and import it. + Used by the cron job. + """ + import re + from datetime import datetime + + result = ImportResult(source="weekly") + try: + r = requests.get(WEEKLY_URL, headers=HEADERS, timeout=30) + r.raise_for_status() + html = r.content.decode('latin-1') + except requests.RequestException as e: + result.error = str(e) + return result + + # Extract report date for filename + m = re.search( + r'(January|February|March|April|May|June|July|August|September|' + r'October|November|December)\s+(\d{1,2}),\s+(\d{4})', + html, + ) + if not m: + result.error = "Could not extract report date" + return result + + month, day, year = m.groups() + months = ['January','February','March','April','May','June', + 'July','August','September','October','November','December'] + from datetime import date as dt + report_date = dt(int(year), months.index(month) + 1, int(day)).isoformat() + + from app.db import DB_PATH + data_dir = DB_PATH.parent + data_dir.mkdir(exist_ok=True) + filename = f"{report_date}_deacbtlof.htm" + filepath = data_dir / filename + + if not filepath.exists(): + filepath.write_text(html, encoding='latin-1') + + result.source = filename + with get_db() as conn: + if _already_imported(conn, filename): + result.rows_skipped = 1 + return result + + with get_db() as conn: + _log_start(conn, filename, 'weekly_html') + + r2 = import_html_file(str(filepath)) + result.rows_inserted = r2.rows_inserted + result.rows_skipped = r2.rows_skipped + result.error = r2.error + + with get_db() as conn: + _log_done(conn, filename, r2.rows_inserted, r2.rows_skipped, r2.error) + + return result diff --git a/app/ingestion/parser.py b/app/ingestion/parser.py new file mode 100644 index 0000000..229695d --- /dev/null +++ b/app/ingestion/parser.py @@ -0,0 +1,618 @@ +""" +CFTC COT Report Parser + +Parses the fixed-width text format used by CFTC for Commitments of Traders +Long Reports. The format uses ':' as group separators within each data line. + +Handles both: +- Weekly HTML files (text wrapped in
 tag)
+- Historical ZIP files (.txt files, same format without HTML wrapper)
+"""
+
+import re
+import zipfile
+from dataclasses import dataclass, field
+from datetime import date
+from pathlib import Path
+from typing import Iterator, Optional
+
+# Regex patterns
+COMMODITY_HEADER_RE = re.compile(r'^(\S.+?)\s{2,}Code-(\d+)\s*$')
+DATE_RE = re.compile(
+    r'(January|February|March|April|May|June|July|August|September|October|November|December)'
+    r'\s+(\d{1,2}),\s+(\d{4})'
+)
+EXCHANGE_ABBR = {
+    'CHICAGO BOARD OF TRADE': 'CBT',
+    'CHICAGO MERCANTILE EXCHANGE': 'CME',
+    'NEW YORK MERCANTILE EXCHANGE': 'NYMEX',
+    'COMMODITY EXCHANGE INC': 'COMEX',
+    'COMMODITY EXCHANGE INC.': 'COMEX',
+    'ICE FUTURES U.S.': 'ICE',
+    'ICE FUTURES U.S': 'ICE',
+    'ICE FUTURES EUROPE': 'ICE-EU',
+    'KANSAS CITY BOARD OF TRADE': 'KCBT',
+    'MINNEAPOLIS GRAIN EXCHANGE': 'MGE',
+}
+
+
+def _parse_date(text: str) -> Optional[str]:
+    """Extract ISO date string from text like 'February 17, 2026'."""
+    m = DATE_RE.search(text)
+    if not m:
+        return None
+    month, day, year = m.groups()
+    try:
+        d = date(int(year), list(['January', 'February', 'March', 'April', 'May', 'June',
+                                   'July', 'August', 'September', 'October', 'November',
+                                   'December']).index(month) + 1, int(day))
+        return d.isoformat()
+    except (ValueError, IndexError):
+        return None
+
+
+def _nums(s: str, as_float: bool = False) -> list:
+    """Parse whitespace-separated numbers from a string, stripping commas."""
+    clean = s.replace(',', '').strip()
+    if not clean:
+        return []
+    result = []
+    for tok in clean.split():
+        try:
+            result.append(float(tok) if as_float else int(float(tok)))
+        except ValueError:
+            pass
+    return result
+
+
+def _parse_position_line(line: str, as_float: bool = False) -> tuple[str, list]:
+    """
+    Parse a data line like:
+      'All  :   544,127:   117,677    175,249    205,702    184,989    124,796    508,367    505,746:    35,760     38,381'
+
+    Returns (row_label, flat_list_of_values) where values are:
+      [open_interest, noncomm_long, noncomm_short, spreading,
+       comm_long, comm_short, total_long, total_short,
+       nonrept_long, nonrept_short]   -- 10 values total
+    """
+    parts = line.split(':')
+    label = parts[0].strip()
+    values = []
+    for part in parts[1:]:
+        values.extend(_nums(part, as_float=as_float))
+    return label, values
+
+
+def _parse_trader_line(line: str) -> tuple[str, list]:
+    """
+    Parse a traders line like:
+      'All  :       375:       122        119        146        105        104        309        296:'
+    Returns (label, [total, noncomm_long, noncomm_short, spread, comm_long, comm_short, total_long, total_short])
+    """
+    parts = line.split(':')
+    label = parts[0].strip()
+    values = []
+    for part in parts[1:]:
+        values.extend(_nums(part))
+    return label, values
+
+
+def _parse_concentration_line(line: str) -> tuple[str, list]:
+    """
+    Parse a concentration line like:
+      'All  :                 12.5       11.6       21.6       20.1        9.2        7.2       15.3       12.5'
+    Returns (label, [gross_long_4, gross_short_4, gross_long_8, gross_short_8,
+                     net_long_4, net_short_4, net_long_8, net_short_8])
+    """
+    # Only one colon (after label) -- but some lines may have more from header artefacts
+    idx = line.index(':')
+    label = line[:idx].strip()
+    values = _nums(line[idx + 1:], as_float=True)
+    return label, values
+
+
+@dataclass
+class PositionRow:
+    open_interest: Optional[int] = None
+    noncomm_long: Optional[int] = None
+    noncomm_short: Optional[int] = None
+    noncomm_spreading: Optional[int] = None
+    comm_long: Optional[int] = None
+    comm_short: Optional[int] = None
+    total_long: Optional[int] = None
+    total_short: Optional[int] = None
+    nonrept_long: Optional[int] = None
+    nonrept_short: Optional[int] = None
+
+
+@dataclass
+class ChangesRow:
+    chg_open_interest: Optional[int] = None
+    chg_noncomm_long: Optional[int] = None
+    chg_noncomm_short: Optional[int] = None
+    chg_noncomm_spreading: Optional[int] = None
+    chg_comm_long: Optional[int] = None
+    chg_comm_short: Optional[int] = None
+    chg_total_long: Optional[int] = None
+    chg_total_short: Optional[int] = None
+    chg_nonrept_long: Optional[int] = None
+    chg_nonrept_short: Optional[int] = None
+
+
+@dataclass
+class PctRow:
+    pct_open_interest: Optional[float] = None
+    pct_noncomm_long: Optional[float] = None
+    pct_noncomm_short: Optional[float] = None
+    pct_noncomm_spreading: Optional[float] = None
+    pct_comm_long: Optional[float] = None
+    pct_comm_short: Optional[float] = None
+    pct_total_long: Optional[float] = None
+    pct_total_short: Optional[float] = None
+    pct_nonrept_long: Optional[float] = None
+    pct_nonrept_short: Optional[float] = None
+
+
+@dataclass
+class TraderRow:
+    traders_total: Optional[int] = None
+    traders_noncomm_long: Optional[int] = None
+    traders_noncomm_short: Optional[int] = None
+    traders_noncomm_spread: Optional[int] = None
+    traders_comm_long: Optional[int] = None
+    traders_comm_short: Optional[int] = None
+    traders_total_long: Optional[int] = None
+    traders_total_short: Optional[int] = None
+
+
+@dataclass
+class ConcentrationRow:
+    conc_gross_long_4: Optional[float] = None
+    conc_gross_short_4: Optional[float] = None
+    conc_gross_long_8: Optional[float] = None
+    conc_gross_short_8: Optional[float] = None
+    conc_net_long_4: Optional[float] = None
+    conc_net_short_4: Optional[float] = None
+    conc_net_long_8: Optional[float] = None
+    conc_net_short_8: Optional[float] = None
+
+
+@dataclass
+class CommodityBlock:
+    cftc_code: str
+    name: str
+    exchange: str
+    exchange_abbr: str
+    contract_unit: str
+    report_date: str
+    prev_report_date: Optional[str]
+
+    positions: dict = field(default_factory=dict)      # row_type -> PositionRow
+    changes: Optional[ChangesRow] = None
+    percentages: dict = field(default_factory=dict)    # row_type -> PctRow
+    traders: dict = field(default_factory=dict)        # row_type -> TraderRow
+    concentration: dict = field(default_factory=dict)  # row_type -> ConcentrationRow
+
+
+def _assign_position_values(values: list, as_float: bool = False) -> dict:
+    """Map a 10-value list to position field names."""
+    keys = ['open_interest', 'noncomm_long', 'noncomm_short', 'noncomm_spreading',
+            'comm_long', 'comm_short', 'total_long', 'total_short',
+            'nonrept_long', 'nonrept_short']
+    return {k: values[i] if i < len(values) else None for i, k in enumerate(keys)}
+
+
+def _parse_block(lines: list[str]) -> Optional[CommodityBlock]:
+    """Parse a single commodity block into a CommodityBlock."""
+    if not lines:
+        return None
+
+    # --- Header line (line 0): NAME - EXCHANGE ... Code-XXXXXX ---
+    header = lines[0].strip()
+    m = COMMODITY_HEADER_RE.match(lines[0].rstrip())
+    if not m:
+        return None
+
+    full_name = m.group(1).strip()
+    cftc_code = m.group(2)
+
+    # Split "NAME - EXCHANGE" on first " - "
+    if ' - ' in full_name:
+        name, exchange = full_name.split(' - ', 1)
+    else:
+        name, exchange = full_name, ''
+    name = name.strip()
+    exchange = exchange.strip()
+    exchange_abbr = EXCHANGE_ABBR.get(exchange.upper(), exchange[:6].upper().replace(' ', ''))
+
+    # --- Report date line (line 1) ---
+    report_date = None
+    if len(lines) > 1:
+        report_date = _parse_date(lines[1])
+    if not report_date:
+        return None
+
+    contract_unit = ''
+    prev_report_date = None
+
+    positions: dict = {}
+    changes: Optional[ChangesRow] = None
+    percentages: dict = {}
+    traders: dict = {}
+    concentration: dict = {}
+
+    # State machine
+    section = 'POSITIONS'
+    expect_changes = False
+
+    for line in lines[2:]:
+        stripped = line.strip()
+
+        # Skip pure separator / empty lines
+        if not stripped or stripped.startswith('---') or stripped == ':':
+            continue
+
+        # Contract unit
+        if '(CONTRACTS OF' in line:
+            m2 = re.search(r'\(CONTRACTS OF[^)]+\)', line)
+            if m2:
+                contract_unit = m2.group(0)
+            continue
+
+        # Section triggers
+        if 'Changes in Commitments from' in line:
+            prev_report_date = _parse_date(line)
+            expect_changes = True
+            section = 'CHANGES'
+            continue
+        if 'Percent of Open Interest Represented' in line:
+            section = 'PERCENT'
+            expect_changes = False
+            continue
+        if '# Traders' in line or 'Number of Traders in Each Category' in line:
+            section = 'TRADERS'
+            expect_changes = False
+            continue
+        if 'Percent of Open Interest Held by' in line:
+            section = 'CONCENTRATION'
+            expect_changes = False
+            continue
+
+        # Skip other header/label-only lines
+        if ':' not in line:
+            continue
+
+        label_part = line.split(':')[0].strip()
+
+        if section == 'POSITIONS' or section == 'PERCENT':
+            if label_part not in ('All', 'Old', 'Other'):
+                continue
+            if section == 'POSITIONS':
+                _, vals = _parse_position_line(line, as_float=False)
+                if len(vals) >= 1:
+                    d = _assign_position_values(vals)
+                    positions[label_part] = PositionRow(**d)
+            else:
+                _, vals = _parse_position_line(line, as_float=True)
+                if len(vals) >= 1:
+                    keys = ['pct_open_interest', 'pct_noncomm_long', 'pct_noncomm_short',
+                            'pct_noncomm_spreading', 'pct_comm_long', 'pct_comm_short',
+                            'pct_total_long', 'pct_total_short', 'pct_nonrept_long', 'pct_nonrept_short']
+                    d = {k: vals[i] if i < len(vals) else None for i, k in enumerate(keys)}
+                    percentages[label_part] = PctRow(**d)
+
+        elif section == 'CHANGES':
+            # Changes row has blank label
+            if label_part == '' or label_part == ':':
+                _, vals = _parse_position_line(line, as_float=False)
+                if len(vals) >= 1:
+                    keys = ['chg_open_interest', 'chg_noncomm_long', 'chg_noncomm_short',
+                            'chg_noncomm_spreading', 'chg_comm_long', 'chg_comm_short',
+                            'chg_total_long', 'chg_total_short', 'chg_nonrept_long', 'chg_nonrept_short']
+                    d = {k: vals[i] if i < len(vals) else None for i, k in enumerate(keys)}
+                    changes = ChangesRow(**d)
+                    section = 'CHANGES_DONE'
+
+        elif section == 'TRADERS':
+            if label_part not in ('All', 'Old', 'Other'):
+                continue
+            _, vals = _parse_trader_line(line)
+            if len(vals) >= 1:
+                keys = ['traders_total', 'traders_noncomm_long', 'traders_noncomm_short',
+                        'traders_noncomm_spread', 'traders_comm_long', 'traders_comm_short',
+                        'traders_total_long', 'traders_total_short']
+                d = {k: vals[i] if i < len(vals) else None for i, k in enumerate(keys)}
+                traders[label_part] = TraderRow(**d)
+
+        elif section == 'CONCENTRATION':
+            if label_part not in ('All', 'Old', 'Other'):
+                continue
+            _, vals = _parse_concentration_line(line)
+            if len(vals) >= 8:
+                concentration[label_part] = ConcentrationRow(
+                    conc_gross_long_4=vals[0],
+                    conc_gross_short_4=vals[1],
+                    conc_gross_long_8=vals[2],
+                    conc_gross_short_8=vals[3],
+                    conc_net_long_4=vals[4],
+                    conc_net_short_4=vals[5],
+                    conc_net_long_8=vals[6],
+                    conc_net_short_8=vals[7],
+                )
+
+    if not positions:
+        return None
+
+    return CommodityBlock(
+        cftc_code=cftc_code,
+        name=name,
+        exchange=exchange,
+        exchange_abbr=exchange_abbr,
+        contract_unit=contract_unit,
+        report_date=report_date,
+        prev_report_date=prev_report_date,
+        positions=positions,
+        changes=changes,
+        percentages=percentages,
+        traders=traders,
+        concentration=concentration,
+    )
+
+
+def parse_text_blocks(text: str) -> Iterator[CommodityBlock]:
+    """
+    Split raw fixed-width text into commodity blocks and parse each one.
+    Each block starts with a line matching the commodity header pattern.
+    """
+    lines = text.splitlines()
+    block_lines: list[str] = []
+
+    for line in lines:
+        if COMMODITY_HEADER_RE.match(line.rstrip()):
+            if block_lines:
+                block = _parse_block(block_lines)
+                if block:
+                    yield block
+            block_lines = [line]
+        else:
+            block_lines.append(line)
+
+    if block_lines:
+        block = _parse_block(block_lines)
+        if block:
+            yield block
+
+
+def extract_text_from_html(html: str) -> str:
+    """Extract raw text content from the 
 block in a CFTC HTML file."""
+    from bs4 import BeautifulSoup
+    soup = BeautifulSoup(html, 'html.parser')
+    pre = soup.find('pre')
+    if pre:
+        return pre.get_text()
+    # Fallback: strip HTML tags
+    return re.sub(r'<[^>]+>', '', html)
+
+
+def parse_html_file(path: str) -> Iterator[CommodityBlock]:
+    """Parse a weekly HTML file downloaded from CFTC."""
+    content = Path(path).read_text(encoding='latin-1')
+    text = extract_text_from_html(content)
+    yield from parse_text_blocks(text)
+
+
+def parse_zip_file(zip_path: str) -> Iterator[CommodityBlock]:
+    """
+    Parse a historical CFTC ZIP archive.
+    Annual/historical ZIPs contain a CSV file ('annualof.txt' or similar).
+    Falls back to fixed-width text parsing if no CSV header detected.
+    """
+    with zipfile.ZipFile(zip_path) as zf:
+        txt_files = [n for n in zf.namelist() if n.lower().endswith('.txt')]
+        for fname in txt_files:
+            with zf.open(fname) as f:
+                text = f.read().decode('latin-1')
+            # Detect CSV by checking for quoted header on first line
+            if text.lstrip().startswith('"Market'):
+                yield from parse_csv_text(text)
+            else:
+                yield from parse_text_blocks(text)
+
+
+# ── CSV format (historical annual ZIPs) ────────────────────────────────────
+
+# Map CSV column name suffixes to our field names
+_POS_FIELDS = {
+    'Open Interest':                       'open_interest',
+    'Noncommercial Positions-Long':        'noncomm_long',
+    'Noncommercial Positions-Short':       'noncomm_short',
+    'Noncommercial Positions-Spreading':   'noncomm_spreading',
+    'Commercial Positions-Long':           'comm_long',
+    'Commercial Positions-Short':          'comm_short',
+    'Total Reportable Positions-Long':     'total_long',
+    'Total Reportable Positions-Short':    'total_short',
+    'Nonreportable Positions-Long':        'nonrept_long',
+    'Nonreportable Positions-Short':       'nonrept_short',
+}
+_CHG_FIELDS = {
+    'Change in Open Interest':                'chg_open_interest',
+    'Change in Noncommercial-Long':           'chg_noncomm_long',
+    'Change in Noncommercial-Short':          'chg_noncomm_short',
+    'Change in Noncommercial-Spreading':      'chg_noncomm_spreading',
+    'Change in Commercial-Long':              'chg_comm_long',
+    'Change in Commercial-Short':             'chg_comm_short',
+    'Change in Total Reportable-Long':        'chg_total_long',
+    'Change in Total Reportable-Short':       'chg_total_short',
+    'Change in Nonreportable-Long':           'chg_nonrept_long',
+    'Change in Nonreportable-Short':          'chg_nonrept_short',
+}
+_PCT_FIELDS = {
+    '% of Open Interest (OI)':               'pct_open_interest',
+    '% of OI-Noncommercial-Long':            'pct_noncomm_long',
+    '% of OI-Noncommercial-Short':           'pct_noncomm_short',
+    '% of OI-Noncommercial-Spreading':       'pct_noncomm_spreading',
+    '% of OI-Commercial-Long':               'pct_comm_long',
+    '% of OI-Commercial-Short':              'pct_comm_short',
+    '% of OI-Total Reportable-Long':         'pct_total_long',
+    '% of OI-Total Reportable-Short':        'pct_total_short',
+    '% of OI-Nonreportable-Long':            'pct_nonrept_long',
+    '% of OI-Nonreportable-Short':           'pct_nonrept_short',
+}
+_TRD_FIELDS = {
+    'Traders-Total':              'traders_total',
+    'Traders-Noncommercial-Long': 'traders_noncomm_long',
+    'Traders-Noncommercial-Short':'traders_noncomm_short',
+    'Traders-Noncommercial-Spreading': 'traders_noncomm_spread',
+    'Traders-Commercial-Long':    'traders_comm_long',
+    'Traders-Commercial-Short':   'traders_comm_short',
+    'Traders-Total Reportable-Long':  'traders_total_long',
+    'Traders-Total Reportable-Short': 'traders_total_short',
+}
+_CONC_FIELDS = {}  # populated dynamically — column names are inconsistent
+
+
+def _csv_val(row: dict, key: str, as_float: bool = False):
+    """Get a value from a CSV row by key prefix match, stripping whitespace."""
+    # Try exact key first, then strip leading/trailing spaces from all keys
+    for k, v in row.items():
+        if k.strip() == key.strip():
+            v = v.strip()
+            if not v:
+                return None
+            try:
+                return float(v) if as_float else int(float(v))
+            except ValueError:
+                return None
+    return None
+
+
+def _build_position_row_from_csv(row: dict, suffix: str) -> PositionRow:
+    kwargs = {}
+    for prefix, field in _POS_FIELDS.items():
+        col = f'{prefix} ({suffix})'
+        # open_interest has slightly different format for Old/Other
+        kwargs[field] = _csv_val(row, col)
+    return PositionRow(**kwargs)
+
+
+def _build_changes_from_csv(row: dict) -> ChangesRow:
+    kwargs = {}
+    for prefix, field in _CHG_FIELDS.items():
+        kwargs[field] = _csv_val(row, f'{prefix} (All)')
+    return ChangesRow(**kwargs)
+
+
+def _build_pct_row_from_csv(row: dict, suffix: str) -> PctRow:
+    kwargs = {}
+    for prefix, field in _PCT_FIELDS.items():
+        # Percent columns have slightly inconsistent naming between All and Old/Other
+        col_all = f'{prefix} (OI) ({suffix})' if '% of Open Interest' in prefix else f'{prefix} ({suffix})'
+        val = _csv_val(row, f'{prefix} ({suffix})', as_float=True)
+        if val is None:
+            # Try alternate form
+            val = _csv_val(row, f'{prefix}(OI) ({suffix})', as_float=True)
+        kwargs[field] = val
+    return PctRow(**kwargs)
+
+
+def _build_trader_row_from_csv(row: dict, suffix: str) -> TraderRow:
+    kwargs = {}
+    for prefix, field in _TRD_FIELDS.items():
+        kwargs[field] = _csv_val(row, f'{prefix} ({suffix})')
+    return TraderRow(**kwargs)
+
+
+def _build_concentration_from_csv(row: dict, suffix: str) -> ConcentrationRow:
+    """
+    Concentration columns have inconsistent spacing in CFTC CSVs, e.g.:
+      'Concentration-Gross LT = 4 TDR-Long (All)'
+      'Concentration-Gross LT =4 TDR-Short (All)'
+    Match by normalizing whitespace.
+    """
+    import re as _re
+
+    def _norm(s: str) -> str:
+        return _re.sub(r'\s+', '', s).lower()
+
+    # Build a normalized lookup for this row
+    norm_row = {_norm(k): v for k, v in row.items()}
+    suf = suffix.lower()
+
+    def _get(pattern: str):
+        key = _norm(pattern + f'({suffix})')
+        v = norm_row.get(key, '').strip()
+        if not v:
+            return None
+        try:
+            return float(v)
+        except ValueError:
+            return None
+
+    return ConcentrationRow(
+        conc_gross_long_4=_get('Concentration-Gross LT =4 TDR-Long '),
+        conc_gross_short_4=_get('Concentration-Gross LT =4 TDR-Short '),
+        conc_gross_long_8=_get('Concentration-Gross LT =8 TDR-Long '),
+        conc_gross_short_8=_get('Concentration-Gross LT =8 TDR-Short '),
+        conc_net_long_4=_get('Concentration-Net LT =4 TDR-Long '),
+        conc_net_short_4=_get('Concentration-Net LT =4 TDR-Short '),
+        conc_net_long_8=_get('Concentration-Net LT =8 TDR-Long '),
+        conc_net_short_8=_get('Concentration-Net LT =8 TDR-Short '),
+    )
+
+
+def _csv_row_to_block(row: dict) -> Optional[CommodityBlock]:
+    """Convert one CSV row (= one commodity × one date) to a CommodityBlock."""
+    import csv as _csv
+    full_name = row.get('Market and Exchange Names', '').strip()
+    report_date = row.get('As of Date in Form YYYY-MM-DD', '').strip()
+    cftc_code = row.get('CFTC Contract Market Code', '').strip()
+
+    if not full_name or not report_date or not cftc_code:
+        return None
+
+    if ' - ' in full_name:
+        name, exchange = full_name.split(' - ', 1)
+    else:
+        name, exchange = full_name, ''
+    name = name.strip()
+    exchange = exchange.strip()
+    exchange_abbr = EXCHANGE_ABBR.get(exchange.upper(),
+                                      exchange[:6].upper().replace(' ', ''))
+
+    positions = {}
+    percentages = {}
+    traders = {}
+    concentration = {}
+
+    for suffix, label in [('All', 'All'), ('Old', 'Old'), ('Other', 'Other')]:
+        positions[label] = _build_position_row_from_csv(row, suffix)
+        percentages[label] = _build_pct_row_from_csv(row, suffix)
+        traders[label] = _build_trader_row_from_csv(row, suffix)
+        concentration[label] = _build_concentration_from_csv(row, suffix)
+
+    changes = _build_changes_from_csv(row)
+
+    return CommodityBlock(
+        cftc_code=cftc_code,
+        name=name,
+        exchange=exchange,
+        exchange_abbr=exchange_abbr,
+        contract_unit='',
+        report_date=report_date,
+        prev_report_date=None,
+        positions=positions,
+        changes=changes,
+        percentages=percentages,
+        traders=traders,
+        concentration=concentration,
+    )
+
+
+def parse_csv_text(text: str) -> Iterator[CommodityBlock]:
+    """Parse a CFTC historical CSV file (annualof.txt format)."""
+    import csv as _csv
+    reader = _csv.DictReader(text.splitlines())
+    for row in reader:
+        block = _csv_row_to_block(row)
+        if block:
+            yield block
diff --git a/cftc_cot_analysis_2026-02-17.md b/cftc_cot_analysis_2026-02-17.md
new file mode 100644
index 0000000..9d4d60e
--- /dev/null
+++ b/cftc_cot_analysis_2026-02-17.md
@@ -0,0 +1,115 @@
+# CFTC Commitments of Traders Report Analysis
+**Report Date:** February 17, 2026 | **Exchange:** Chicago Board of Trade (CBT)
+
+---
+
+## Traditional Insights
+
+### Agricultural Commodities
+
+| Commodity | Open Interest | Non-Commercial Net | Commercial Net | Weekly Change OI |
+|-----------|--------------|-------------------|----------------|------------------|
+| Wheat-SRW | 544,127 | **-57,572** (bearish) | +60,193 | -46,415 |
+| Wheat-HRW | 329,255 | -5,427 | +10,156 | +13,330 |
+| Corn | 2,203,979 | **-37,540** (bearish) | +56,855 | +45,769 |
+| Soybeans | 1,281,116 | **+189,133** (very bullish) | -153,840 | +73,916 |
+| Soybean Oil | 864,090 | +36,919 | -47,669 | +22,657 |
+| Soybean Meal | 598,663 | +12,664 | -33,774 | -2,217 |
+
+**Key Observations:**
+- **Soybeans:** Speculators are extremely long (256k long vs 68k short) - the largest bullish positioning in the report. Commercials are heavily hedged short.
+- **Wheat-SRW:** Strong bearish sentiment from speculators (175k short vs 118k long). Open interest dropped 8.5% weekly.
+- **Corn:** Modestly bearish speculator positioning, but massive market (2.2M contracts).
+
+### Treasury Futures
+
+| Instrument | Open Interest | Non-Commercial Net | Commercial Net |
+|------------|--------------|-------------------|----------------|
+| 2Y Note | 4,945,848 | **-1,252,133** (extreme short) | +1,143,988 |
+| 5Y Note | 7,901,856 | **-2,109,670** (extreme short) | +1,976,918 |
+| 10Y Note | 7,386,053 | **-936,190** (extreme short) | +869,999 |
+| Ultra 10Y | 2,598,191 | -100,127 | +238,598 |
+| UST Bond | 2,172,484 | +4,672 | -141,824 |
+| Ultra UST Bond | 2,262,466 | **-271,517** (extreme short) | +251,929 |
+
+**Key Observations:**
+- **Massive short bets on the yield curve:** Non-commercial traders are aggressively short 2Y, 5Y, and Ultra UST Bonds. This signals expectations for higher rates or steepening.
+- **2Y Note:** 39.3% of open interest is speculative shorts - extremely one-sided.
+- **5Y Note:** 33.6% speculative shorts vs only 6.9% longs - historic imbalance.
+
+---
+
+## Innovative Insights
+
+### 1. Crowded Trade Risk Assessment
+
+**High Squeeze Risk Positions:**
+
+| Market | Concentration Score | Risk Level |
+|--------|-------------------|------------|
+| 2Y Note Shorts | 39.3% OI | **EXTREME** |
+| 5Y Note Shorts | 33.6% OI | **EXTREME** |
+| Soybean Longs | 20.0% OI | HIGH |
+| Wheat-SRW Shorts | 32.2% OI | HIGH |
+
+A dovish Fed surprise or weaker economic data could trigger violent short squeezes in Treasury futures.
+
+### 2. Commercial/Speculator Divergence Signal
+
+Using the **Commercial-Speculator Spread** as a contrarian indicator:
+
+```
+Soybeans:  Commercials heavily short while specs long → potential top forming
+Corn:      Commercials net long, specs slightly short → potential bottom
+2Y Note:   Extreme divergence → watch for reversal when specs cover
+```
+
+### 3. Cross-Market Flow Analysis
+
+**The "Risk-Off Rotation" Pattern:**
+- Speculators are betting *against* Treasuries (short rates) but *for* Soybeans
+- This is atypical - usually soybean longs and Treasury shorts don't coincide
+- Suggests: Either inflation hedge (commodities up, rates up) OR a fragmented macro view
+
+### 4. Concentration Risk in Smaller Markets
+
+| Market | Top 4 Traders (Long) | Top 4 Traders (Short) | Manipulation Risk |
+|--------|---------------------|----------------------|-------------------|
+| 3Y ERIS Swap | 100% | 82.1% | **CRITICAL** |
+| 2Y ERIS Swap | 97.9% | 62.9% | **HIGH** |
+| BBG Commodity | 80.3% | 88.1% | **HIGH** |
+| DJ Real Estate | 44.8% | 64.7% | MODERATE |
+
+ERIS SOFR Swap markets are dominated by a handful of players - liquidity events could be severe.
+
+### 5. Open Interest Momentum Signal
+
+**Weekly OI Changes (Conviction Indicator):**
+- **10Y Note:** +487,290 contracts (+7.1%) - massive new positioning
+- **Soybeans:** +73,916 contracts (+6.1%) - bulls adding aggressively
+- **Wheat-SRW:** -46,415 contracts (-8.5%) - traders exiting, trend exhaustion?
+
+### 6. Non-Reportable (Retail) Positioning
+
+| Market | Retail Long % | Retail Short % | Smart Money Alignment |
+|--------|--------------|----------------|----------------------|
+| Corn | 8.5% | 9.3% | Neutral |
+| Soybeans | 4.7% | 7.4% | Retail fading the rally |
+| UST Bond | 12.9% | 6.6% | Retail long, specs neutral |
+| Ultra Bond | 8.8% | 7.9% | Retail long, specs short |
+
+Retail is betting *against* the speculator short in bonds - a contrarian signal.
+
+---
+
+## Summary: Actionable Signals
+
+1. **High conviction trade watch:** Soybean long crowding + commercial hedging = potential reversal setup
+2. **Squeeze alert:** Treasury 2Y/5Y shorts are historically extreme - any Fed pivot triggers violent unwind
+3. **Wheat capitulation:** Large OI decline + bearish positioning = potential washout bottom
+4. **Illiquidity pockets:** ERIS swap markets are one-sided - avoid size positions
+5. **Divergence opportunity:** Corn commercials positioning bullish vs. weak speculator sentiment
+
+---
+
+*Source: CFTC Commitments of Traders Long Report - CBT Combined (February 17, 2026)*
diff --git a/cftc_downloader.py b/cftc_downloader.py
new file mode 100644
index 0000000..bc5efc9
--- /dev/null
+++ b/cftc_downloader.py
@@ -0,0 +1,153 @@
+#!/usr/bin/env python3
+"""
+CFTC COT Report Downloader
+
+Downloads the CFTC Commitments of Traders CBT Long Form Combined report
+and stores historical copies locally with dated filenames.
+
+Usage:
+    python cftc_downloader.py
+
+The report is typically published every Friday around 3:30 PM ET.
+"""
+
+import os
+import re
+import sys
+from datetime import datetime
+from pathlib import Path
+
+import requests
+from bs4 import BeautifulSoup
+
+# Configuration
+URL = "https://www.cftc.gov/dea/options/deacbtlof.htm"
+DATA_DIR = Path(__file__).parent / "data"
+LOG_FILE = DATA_DIR / "download_log.txt"
+
+# Headers to avoid 403 errors
+HEADERS = {
+    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"
+}
+
+
+def extract_report_date(html_content: str) -> str | None:
+    """
+    Extract the report date from the HTML content.
+    Looks for patterns like "February 17, 2026" in the report.
+    """
+    # Pattern matches dates like "February 17, 2026" or "January 5, 2026"
+    pattern = r"(January|February|March|April|May|June|July|August|September|October|November|December)\s+(\d{1,2}),\s+(\d{4})"
+    match = re.search(pattern, html_content)
+
+    if match:
+        month_name, day, year = match.groups()
+        # Convert to date object for formatting
+        date_str = f"{month_name} {day}, {year}"
+        try:
+            date_obj = datetime.strptime(date_str, "%B %d, %Y")
+            return date_obj.strftime("%Y-%m-%d")
+        except ValueError:
+            return None
+    return None
+
+
+def download_report() -> tuple[bool, str]:
+    """
+    Download the CFTC report and save it with a dated filename.
+
+    Returns:
+        tuple: (success: bool, message: str)
+    """
+    # Ensure data directory exists
+    DATA_DIR.mkdir(exist_ok=True)
+
+    # Download the page
+    print(f"Downloading from {URL}...")
+    try:
+        response = requests.get(URL, headers=HEADERS, timeout=30)
+        response.raise_for_status()
+    except requests.RequestException as e:
+        return False, f"Download failed: {e}"
+
+    html_content = response.text
+
+    # Extract the report date
+    report_date = extract_report_date(html_content)
+    if not report_date:
+        return False, "Could not extract report date from content"
+
+    print(f"Report date: {report_date}")
+
+    # Create filename
+    filename = f"{report_date}_deacbtlof.htm"
+    filepath = DATA_DIR / filename
+
+    # Check if already downloaded
+    if filepath.exists():
+        return False, f"Report for {report_date} already exists: {filepath}"
+
+    # Save the file
+    filepath.write_text(html_content, encoding="utf-8")
+    print(f"Saved to: {filepath}")
+
+    # Log the download
+    log_download(report_date, filepath)
+
+    # Import into database
+    try:
+        from app.ingestion.importer import import_html_file
+        result = import_html_file(str(filepath))
+        if result.error:
+            print(f"DB import warning: {result.error}")
+        else:
+            print(f"Imported to DB: {result.rows_inserted} rows inserted")
+    except ImportError:
+        pass  # app package not available, skip DB import
+
+    return True, f"Successfully downloaded report for {report_date}"
+
+
+def log_download(report_date: str, filepath: Path) -> None:
+    """Log the download to the log file."""
+    timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
+    log_entry = f"{timestamp} | Report date: {report_date} | File: {filepath.name}\n"
+
+    with open(LOG_FILE, "a", encoding="utf-8") as f:
+        f.write(log_entry)
+
+
+def list_downloads() -> None:
+    """List all downloaded reports."""
+    if not DATA_DIR.exists():
+        print("No downloads yet.")
+        return
+
+    files = sorted(DATA_DIR.glob("*_deacbtlof.htm"))
+    if not files:
+        print("No downloaded reports found.")
+        return
+
+    print(f"\nDownloaded reports ({len(files)} files):")
+    print("-" * 40)
+    for f in files:
+        size_kb = f.stat().st_size / 1024
+        print(f"  {f.name} ({size_kb:.1f} KB)")
+
+
+def main():
+    """Main entry point."""
+    print("CFTC COT Report Downloader")
+    print("=" * 40)
+
+    success, message = download_report()
+    print(f"\n{message}")
+
+    # Show current downloads
+    list_downloads()
+
+    return 0 if success else 1
+
+
+if __name__ == "__main__":
+    sys.exit(main())
diff --git a/docker-compose.yml b/docker-compose.yml
new file mode 100644
index 0000000..34f3d9c
--- /dev/null
+++ b/docker-compose.yml
@@ -0,0 +1,38 @@
+services:
+  web:
+    build: .
+    container_name: cot_web
+    ports:
+      - "8000:8000"
+    volumes:
+      - cot_data:/data
+    environment:
+      - DB_PATH=/data/cot.db
+    command: >
+      sh -c "python3 -m app.ingestion.cli init-db &&
+             uvicorn app.api.main:app --host 0.0.0.0 --port 8000"
+    restart: unless-stopped
+    healthcheck:
+      test: ["CMD", "curl", "-f", "http://localhost:8000/health"]
+      interval: 30s
+      timeout: 10s
+      retries: 3
+      start_period: 10s
+
+  cron:
+    build: .
+    container_name: cot_cron
+    volumes:
+      - cot_data:/data
+      - ./data:/app/data
+    environment:
+      - DB_PATH=/data/cot.db
+    command: /app/scripts/cron_entrypoint.sh
+    restart: unless-stopped
+    depends_on:
+      web:
+        condition: service_healthy
+
+volumes:
+  cot_data:
+    driver: local
diff --git a/frontend/app.js b/frontend/app.js
new file mode 100644
index 0000000..515b493
--- /dev/null
+++ b/frontend/app.js
@@ -0,0 +1,625 @@
+// ── State ──────────────────────────────────────────────────────────────────
+const state = {
+  view: 'detail',
+  selectedCode: null,
+  dateRange: '3Y',
+  rowType: 'All',
+  metric: 'noncomm_net',
+  overlayOI: true,
+  exchange: '',
+  compareMarkets: [],      // [{cftc_code, name, exchange_abbr, color}]
+  compareMetric: 'noncomm_net',
+  compareRange: '3Y',
+  allCommodities: [],      // full list from API
+};
+
+// ── API ────────────────────────────────────────────────────────────────────
+const API = {
+  async get(path) {
+    const r = await fetch(path);
+    if (!r.ok) throw new Error(`${r.status} ${r.statusText}`);
+    return r.json();
+  },
+  exchanges: () => API.get('/api/exchanges'),
+  commodities: (exchange) => API.get('/api/commodities' + (exchange ? `?exchange=${exchange}` : '')),
+  history: (code, fromDate, toDate, rowType) => {
+    const p = new URLSearchParams({ row_type: rowType });
+    if (fromDate) p.append('from_date', fromDate);
+    if (toDate) p.append('to_date', toDate);
+    return API.get(`/api/positions/${code}/history?${p}`);
+  },
+  extremes: (code) => API.get(`/api/positions/${code}/extremes`),
+  percentile: (code, weeks) => API.get(`/api/analytics/${code}/net-position-percentile?lookback_weeks=${weeks}`),
+  screener: (exchange, direction, lookback) => {
+    const p = new URLSearchParams({ lookback_weeks: lookback, top_n: 200 });
+    if (exchange) p.append('exchange', exchange);
+    if (direction) p.append('direction', direction === 'long' ? 'long' : 'short');
+    return API.get(`/api/analytics/screener?${p}`);
+  },
+  compare: (codes, metric, fromDate, toDate) => {
+    const p = new URLSearchParams({ codes: codes.join(','), metric, row_type: 'All' });
+    if (fromDate) p.append('from_date', fromDate);
+    if (toDate) p.append('to_date', toDate);
+    return API.get(`/api/positions/compare?${p}`);
+  },
+};
+
+// ── Utilities ──────────────────────────────────────────────────────────────
+function fmt(v) {
+  if (v === null || v === undefined) return '—';
+  if (typeof v === 'number') {
+    if (Math.abs(v) >= 1000000) return (v / 1000000).toFixed(2) + 'M';
+    if (Math.abs(v) >= 1000) return (v / 1000).toFixed(1) + 'K';
+    return v.toLocaleString();
+  }
+  return v;
+}
+
+function fmtPct(v) {
+  if (v === null || v === undefined) return '—';
+  return v.toFixed(1) + '%';
+}
+
+function fmtChange(v) {
+  if (v === null || v === undefined) return '—';
+  const s = (v >= 0 ? '+' : '') + fmt(v);
+  return s;
+}
+
+function dateRangeToFrom(range) {
+  if (range === 'Max') return null;
+  const d = new Date();
+  const years = { '1Y': 1, '3Y': 3, '5Y': 5 }[range] || 3;
+  d.setFullYear(d.getFullYear() - years);
+  return d.toISOString().split('T')[0];
+}
+
+const CHART_COLORS = [
+  '#3b82f6', '#22c55e', '#f97316', '#a855f7',
+  '#ec4899', '#14b8a6', '#eab308', '#ef4444',
+];
+
+// ── Charts ─────────────────────────────────────────────────────────────────
+let detailChart = null;
+let compareChart = null;
+
+function destroyChart(c) { if (c) { c.destroy(); } return null; }
+
+function buildDetailChart(data, metric, overlayOI) {
+  detailChart = destroyChart(detailChart);
+  const canvas = document.getElementById('detailChart');
+  const ctx = canvas.getContext('2d');
+
+  const labels = data.map(d => d.report_date);
+  const metricLabel = document.getElementById('metricSelect').selectedOptions[0]?.text || metric;
+
+  let values;
+  if (metric === 'noncomm_net') {
+    values = data.map(d => (d.noncomm_long ?? 0) - (d.noncomm_short ?? 0));
+  } else if (metric === 'comm_net') {
+    values = data.map(d => (d.comm_long ?? 0) - (d.comm_short ?? 0));
+  } else {
+    values = data.map(d => d[metric] ?? null);
+  }
+
+  const datasets = [{
+    type: 'line',
+    label: metricLabel,
+    data: values,
+    borderColor: '#3b82f6',
+    backgroundColor: 'rgba(59,130,246,0.08)',
+    fill: true,
+    tension: 0.2,
+    pointRadius: data.length < 60 ? 3 : 0,
+    pointHoverRadius: 5,
+    borderWidth: 2,
+    yAxisID: 'y',
+    order: 1,
+  }];
+
+  const scales = {
+    x: {
+      ticks: { color: '#6b7280', maxTicksLimit: 8, maxRotation: 0 },
+      grid: { color: '#2d3148' },
+    },
+    y: {
+      position: 'left',
+      ticks: { color: '#6b7280', callback: v => fmt(v) },
+      grid: { color: '#2d3148' },
+    },
+  };
+
+  if (overlayOI) {
+    datasets.push({
+      type: 'bar',
+      label: 'Open Interest',
+      data: data.map(d => d.open_interest ?? null),
+      backgroundColor: 'rgba(156,163,175,0.15)',
+      borderColor: 'rgba(156,163,175,0.3)',
+      borderWidth: 1,
+      yAxisID: 'y1',
+      order: 2,
+    });
+    scales.y1 = {
+      position: 'right',
+      ticks: { color: '#4b5563', callback: v => fmt(v) },
+      grid: { drawOnChartArea: false },
+    };
+  }
+
+  detailChart = new Chart(ctx, {
+    type: 'bar',
+    data: { labels, datasets },
+    options: {
+      responsive: true,
+      maintainAspectRatio: false,
+      interaction: { mode: 'index', intersect: false },
+      plugins: {
+        legend: { labels: { color: '#9ca3af', boxWidth: 12 } },
+        tooltip: {
+          backgroundColor: '#1a1d27',
+          borderColor: '#2d3148',
+          borderWidth: 1,
+          titleColor: '#e2e8f0',
+          bodyColor: '#9ca3af',
+          callbacks: {
+            label: (ctx) => ` ${ctx.dataset.label}: ${fmt(ctx.parsed.y)}`,
+          },
+        },
+      },
+      scales,
+    },
+  });
+}
+
+function buildCompareChart(seriesData, commodities, metric) {
+  compareChart = destroyChart(compareChart);
+  const canvas = document.getElementById('compareChart');
+  const ctx = canvas.getContext('2d');
+
+  const allDates = [...new Set(
+    Object.values(seriesData).flatMap(pts => pts.map(p => p.report_date))
+  )].sort();
+
+  const datasets = state.compareMarkets.map((m, i) => {
+    const pts = seriesData[m.cftc_code] || [];
+    const byDate = Object.fromEntries(pts.map(p => [p.report_date, p.value]));
+    return {
+      label: m.name,
+      data: allDates.map(d => byDate[d] ?? null),
+      borderColor: m.color,
+      backgroundColor: 'transparent',
+      tension: 0.2,
+      pointRadius: allDates.length < 60 ? 3 : 0,
+      pointHoverRadius: 5,
+      borderWidth: 2,
+      spanGaps: false,
+    };
+  });
+
+  compareChart = new Chart(ctx, {
+    type: 'line',
+    data: { labels: allDates, datasets },
+    options: {
+      responsive: true,
+      maintainAspectRatio: false,
+      interaction: { mode: 'index', intersect: false },
+      plugins: {
+        legend: { labels: { color: '#9ca3af', boxWidth: 12 } },
+        tooltip: {
+          backgroundColor: '#1a1d27',
+          borderColor: '#2d3148',
+          borderWidth: 1,
+          titleColor: '#e2e8f0',
+          bodyColor: '#9ca3af',
+          callbacks: {
+            label: (ctx) => ` ${ctx.dataset.label}: ${fmt(ctx.parsed.y)}`,
+          },
+        },
+      },
+      scales: {
+        x: { ticks: { color: '#6b7280', maxTicksLimit: 8, maxRotation: 0 }, grid: { color: '#2d3148' } },
+        y: { ticks: { color: '#6b7280', callback: v => fmt(v) }, grid: { color: '#2d3148' } },
+      },
+    },
+  });
+}
+
+// ── Market sidebar ─────────────────────────────────────────────────────────
+function buildMarketTree(commodities, filter = '') {
+  const tree = document.getElementById('market-tree');
+  const groups = {};
+  const q = filter.toLowerCase();
+
+  for (const c of commodities) {
+    const matches = !q || c.name.toLowerCase().includes(q) || c.exchange_abbr.toLowerCase().includes(q);
+    if (!matches) continue;
+    if (!groups[c.exchange_abbr]) groups[c.exchange_abbr] = [];
+    groups[c.exchange_abbr].push(c);
+  }
+
+  tree.innerHTML = '';
+  for (const [exch, list] of Object.entries(groups)) {
+    const grp = document.createElement('div');
+    grp.className = 'exchange-group';
+
+    const lbl = document.createElement('div');
+    lbl.className = 'exchange-label';
+    lbl.textContent = exch;
+    lbl.addEventListener('click', () => {
+      lbl.classList.toggle('collapsed');
+      ul.classList.toggle('hidden');
+    });
+
+    const ul = document.createElement('div');
+    ul.className = 'market-list';
+
+    for (const c of list) {
+      const item = document.createElement('div');
+      item.className = 'market-item' + (c.cftc_code === state.selectedCode ? ' active' : '');
+      item.textContent = c.name;
+      item.title = c.name;
+      item.dataset.code = c.cftc_code;
+      item.addEventListener('click', () => selectMarket(c.cftc_code));
+      ul.appendChild(item);
+    }
+
+    grp.appendChild(lbl);
+    grp.appendChild(ul);
+    tree.appendChild(grp);
+  }
+}
+
+// ── Detail view logic ──────────────────────────────────────────────────────
+async function selectMarket(code) {
+  state.selectedCode = code;
+  document.querySelectorAll('.market-item').forEach(el => {
+    el.classList.toggle('active', el.dataset.code === code);
+  });
+  document.getElementById('detail-placeholder').style.display = 'none';
+  document.getElementById('detail-content').style.display = 'flex';
+  document.getElementById('detail-content').style.flexDirection = 'column';
+  document.getElementById('detail-content').style.gap = '12px';
+
+  const meta = state.allCommodities.find(c => c.cftc_code === code);
+  if (meta) {
+    document.getElementById('detail-title').textContent = meta.name;
+    document.getElementById('detail-exchange').textContent = meta.exchange_abbr;
+    document.getElementById('detail-unit').textContent = meta.contract_unit || '';
+  }
+
+  await refreshDetailChart();
+}
+
+async function refreshDetailChart() {
+  if (!state.selectedCode) return;
+  const fromDate = dateRangeToFrom(state.dateRange);
+
+  try {
+    const [hist, pctileData] = await Promise.all([
+      API.history(state.selectedCode, fromDate, null, state.rowType),
+      API.percentile(state.selectedCode, 156).catch(() => null),
+    ]);
+
+    const data = hist.data;
+    if (!data.length) return;
+
+    buildDetailChart(data, state.metric, state.overlayOI);
+
+    // Stats bar
+    const latest = data[data.length - 1];
+    const metricValues = data.map(d => {
+      if (state.metric === 'noncomm_net') return (d.noncomm_long ?? 0) - (d.noncomm_short ?? 0);
+      if (state.metric === 'comm_net') return (d.comm_long ?? 0) - (d.comm_short ?? 0);
+      return d[state.metric];
+    }).filter(v => v !== null);
+
+    const currentVal = metricValues[metricValues.length - 1];
+    const maxVal = Math.max(...metricValues);
+    const minVal = Math.min(...metricValues);
+
+    const statCurrent = document.getElementById('statCurrent');
+    statCurrent.textContent = fmt(currentVal);
+    statCurrent.className = 'stat-value' + (currentVal > 0 ? ' positive' : currentVal < 0 ? ' negative' : '');
+
+    document.getElementById('statMax').textContent = fmt(maxVal);
+    document.getElementById('statMin').textContent = fmt(minVal);
+
+    if (pctileData?.percentile !== null && pctileData?.percentile !== undefined) {
+      document.getElementById('statPctile').textContent = fmtPct(pctileData.percentile);
+    } else {
+      document.getElementById('statPctile').textContent = '—';
+    }
+
+    // Week change for non-comm net
+    const chgLong = latest.chg_noncomm_long;
+    const chgShort = latest.chg_noncomm_short;
+    if (chgLong !== null && chgShort !== null) {
+      const netChg = chgLong - chgShort;
+      const el = document.getElementById('statChange');
+      el.textContent = fmtChange(netChg) + ' net';
+      el.className = 'stat-value' + (netChg > 0 ? ' positive' : netChg < 0 ? ' negative' : '');
+    }
+  } catch (e) {
+    console.error('Failed to load chart data:', e);
+  }
+}
+
+// ── Screener view ──────────────────────────────────────────────────────────
+async function loadScreener() {
+  const exchange = document.getElementById('screenerExchange').value;
+  const dirSel = document.getElementById('screenerDirection').value;
+  const lookback = document.getElementById('screenerLookback').value;
+
+  // Map UI direction to API direction
+  let direction = '';
+  if (dirSel === 'long') direction = 'long';
+  else if (dirSel === 'short') direction = 'short';
+
+  const tbody = document.getElementById('screenerBody');
+  tbody.innerHTML = 'Loading...';
+
+  try {
+    let rows = await API.screener(exchange, direction, lookback);
+
+    // Client-side direction filter (the API `direction` param filters by >=50 or <50,
+    // but UI wants >=70 and <=30 for "extreme")
+    if (dirSel === 'long') rows = rows.filter(r => r.pct_rank >= 70);
+    else if (dirSel === 'short') rows = rows.filter(r => r.pct_rank <= 30);
+
+    tbody.innerHTML = '';
+    for (const row of rows) {
+      const pct = row.pct_rank;
+      const pctClass = pct >= 70 ? 'extreme-long' : pct <= 30 ? 'extreme-short' : 'neutral';
+      const netChg = (row.chg_noncomm_long ?? 0) - (row.chg_noncomm_short ?? 0);
+
+      const tr = document.createElement('tr');
+      tr.innerHTML = `
+        ${row.commodity}
+        ${row.exchange}
+        ${fmt(row.noncomm_net)}
+        ${fmt(row.open_interest)}
+        
+          
+          ${pct !== null ? fmtPct(pct) : '—'}
+        
+        ${fmtChange(row.chg_noncomm_long)}
+        ${fmtChange(row.chg_noncomm_short)}
+      `;
+      tr.addEventListener('click', () => {
+        switchView('detail');
+        selectMarket(row.cftc_code);
+      });
+      tbody.appendChild(tr);
+    }
+
+    if (!rows.length) {
+      tbody.innerHTML = 'No markets match the current filters.';
+    }
+  } catch (e) {
+    tbody.innerHTML = `Error: ${e.message}`;
+  }
+}
+
+// ── Compare view ───────────────────────────────────────────────────────────
+function renderCompareTags() {
+  const container = document.getElementById('compareTags');
+  container.innerHTML = '';
+  state.compareMarkets.forEach((m, i) => {
+    const tag = document.createElement('div');
+    tag.className = 'compare-tag';
+    tag.style.background = m.color;
+    tag.innerHTML = `${m.name} `;
+    tag.querySelector('button').addEventListener('click', (e) => {
+      e.stopPropagation();
+      removeCompareMarket(m.cftc_code);
+    });
+    container.appendChild(tag);
+  });
+}
+
+function addCompareMarket(commodity) {
+  if (state.compareMarkets.find(m => m.cftc_code === commodity.cftc_code)) return;
+  if (state.compareMarkets.length >= 8) return;
+  state.compareMarkets.push({
+    ...commodity,
+    color: CHART_COLORS[state.compareMarkets.length % CHART_COLORS.length],
+  });
+  renderCompareTags();
+  loadCompareChart();
+}
+
+function removeCompareMarket(code) {
+  state.compareMarkets = state.compareMarkets.filter(m => m.cftc_code !== code);
+  // Reassign colors
+  state.compareMarkets.forEach((m, i) => { m.color = CHART_COLORS[i % CHART_COLORS.length]; });
+  renderCompareTags();
+  loadCompareChart();
+}
+
+async function loadCompareChart() {
+  const placeholder = document.getElementById('comparePlaceholder');
+  const canvas = document.getElementById('compareChart');
+
+  if (!state.compareMarkets.length) {
+    placeholder.style.display = 'flex';
+    canvas.style.display = 'none';
+    compareChart = destroyChart(compareChart);
+    return;
+  }
+
+  placeholder.style.display = 'none';
+  canvas.style.display = 'block';
+
+  const fromDate = dateRangeToFrom(state.compareRange);
+  const codes = state.compareMarkets.map(m => m.cftc_code);
+
+  try {
+    const resp = await API.compare(codes, state.compareMetric, fromDate, null);
+    buildCompareChart(resp.series, resp.commodities, state.compareMetric);
+  } catch (e) {
+    console.error('Compare chart error:', e);
+  }
+}
+
+// ── Compare autocomplete ───────────────────────────────────────────────────
+function setupCompareSearch() {
+  const input = document.getElementById('compareSearch');
+  const dropdown = document.getElementById('compareDropdown');
+
+  input.addEventListener('input', () => {
+    const q = input.value.trim().toLowerCase();
+    if (!q) { dropdown.style.display = 'none'; return; }
+
+    const matches = state.allCommodities
+      .filter(c => c.name.toLowerCase().includes(q) || c.exchange_abbr.toLowerCase().includes(q))
+      .slice(0, 10);
+
+    dropdown.innerHTML = '';
+    if (!matches.length) { dropdown.style.display = 'none'; return; }
+
+    matches.forEach(c => {
+      const item = document.createElement('div');
+      item.className = 'autocomplete-item';
+      item.innerHTML = `${c.name} ${c.exchange_abbr}`;
+      item.addEventListener('click', () => {
+        addCompareMarket(c);
+        input.value = '';
+        dropdown.style.display = 'none';
+      });
+      dropdown.appendChild(item);
+    });
+    dropdown.style.display = 'block';
+  });
+
+  document.addEventListener('click', (e) => {
+    if (!input.contains(e.target) && !dropdown.contains(e.target)) {
+      dropdown.style.display = 'none';
+    }
+  });
+}
+
+// ── View switching ─────────────────────────────────────────────────────────
+function switchView(view) {
+  state.view = view;
+  document.querySelectorAll('.view').forEach(el => { el.style.display = 'none'; });
+  document.querySelectorAll('.tab-btn').forEach(btn => {
+    btn.classList.toggle('active', btn.dataset.view === view);
+  });
+  document.getElementById(`view-${view}`).style.display = view === 'detail' ? 'flex' : 'flex';
+  document.getElementById(`view-${view}`).style.flexDirection = view === 'detail' ? 'row' : 'column';
+
+  if (view === 'screener') loadScreener();
+  if (view === 'compare') renderCompareTags();
+}
+
+// ── Populate exchange dropdowns ────────────────────────────────────────────
+async function populateExchangeDropdowns(exchanges) {
+  ['exchangeFilter', 'screenerExchange'].forEach(id => {
+    const sel = document.getElementById(id);
+    exchanges.forEach(e => {
+      const opt = document.createElement('option');
+      opt.value = e.exchange_abbr;
+      opt.textContent = `${e.exchange_abbr} (${e.commodity_count})`;
+      sel.appendChild(opt);
+    });
+  });
+}
+
+// ── Init ───────────────────────────────────────────────────────────────────
+async function init() {
+  try {
+    const [exchanges, commodities] = await Promise.all([
+      API.exchanges(),
+      API.commodities(),
+    ]);
+
+    state.allCommodities = commodities;
+    await populateExchangeDropdowns(exchanges);
+    buildMarketTree(commodities);
+
+    // Select first commodity by default
+    if (commodities.length > 0) {
+      selectMarket(commodities[0].cftc_code);
+    }
+  } catch (e) {
+    console.error('Init failed:', e);
+  }
+}
+
+// ── Event listeners ────────────────────────────────────────────────────────
+document.addEventListener('DOMContentLoaded', () => {
+  // Tab switching
+  document.querySelectorAll('.tab-btn').forEach(btn => {
+    btn.addEventListener('click', () => switchView(btn.dataset.view));
+  });
+
+  // Exchange filter (sidebar)
+  document.getElementById('exchangeFilter').addEventListener('change', async (e) => {
+    state.exchange = e.target.value;
+    const filtered = state.allCommodities.filter(c =>
+      !state.exchange || c.exchange_abbr === state.exchange
+    );
+    buildMarketTree(filtered);
+  });
+
+  // Market search
+  document.getElementById('marketSearch').addEventListener('input', (e) => {
+    const q = e.target.value;
+    const filtered = state.allCommodities.filter(c =>
+      !state.exchange || c.exchange_abbr === state.exchange
+    );
+    buildMarketTree(filtered, q);
+  });
+
+  // Date range buttons
+  document.querySelectorAll('.range-btn').forEach(btn => {
+    btn.addEventListener('click', () => {
+      document.querySelectorAll('.range-btn').forEach(b => b.classList.remove('active'));
+      btn.classList.add('active');
+      state.dateRange = btn.dataset.range;
+      refreshDetailChart();
+    });
+  });
+
+  // Row type buttons
+  document.querySelectorAll('.rt-btn').forEach(btn => {
+    btn.addEventListener('click', () => {
+      document.querySelectorAll('.rt-btn').forEach(b => b.classList.remove('active'));
+      btn.classList.add('active');
+      state.rowType = btn.dataset.rt;
+      refreshDetailChart();
+    });
+  });
+
+  // Metric select
+  document.getElementById('metricSelect').addEventListener('change', (e) => {
+    state.metric = e.target.value;
+    refreshDetailChart();
+  });
+
+  // OI overlay checkbox
+  document.getElementById('overlayOI').addEventListener('change', (e) => {
+    state.overlayOI = e.target.checked;
+    refreshDetailChart();
+  });
+
+  // Screener
+  document.getElementById('screenerRefresh').addEventListener('click', loadScreener);
+  document.getElementById('screenerDirection').addEventListener('change', loadScreener);
+  document.getElementById('screenerLookback').addEventListener('change', loadScreener);
+  document.getElementById('screenerExchange').addEventListener('change', loadScreener);
+
+  // Compare
+  document.getElementById('compareMetric').addEventListener('change', (e) => {
+    state.compareMetric = e.target.value;
+    loadCompareChart();
+  });
+  document.getElementById('compareRange').addEventListener('change', (e) => {
+    state.compareRange = e.target.value;
+    loadCompareChart();
+  });
+
+  setupCompareSearch();
+  init();
+});
diff --git a/frontend/index.html b/frontend/index.html
new file mode 100644
index 0000000..0dc8fcb
--- /dev/null
+++ b/frontend/index.html
@@ -0,0 +1,188 @@
+
+
+
+
+
+CFTC COT Explorer
+
+
+
+
+
+
+ + +
+ +
+ +
+ +
+ +
+
+

Select a market from the left panel to begin exploring.

+
+ +
+
+ + + + + + +
+ + + + diff --git a/frontend/style.css b/frontend/style.css new file mode 100644 index 0000000..01a9f02 --- /dev/null +++ b/frontend/style.css @@ -0,0 +1,517 @@ +*, *::before, *::after { box-sizing: border-box; margin: 0; padding: 0; } + +:root { + --bg: #0f1117; + --surface: #1a1d27; + --surface2: #222536; + --border: #2d3148; + --text: #e2e8f0; + --text-muted: #6b7280; + --accent: #3b82f6; + --accent-dim: #1d4ed8; + --green: #22c55e; + --red: #ef4444; + --orange: #f97316; + --yellow: #eab308; + --radius: 6px; + --font: -apple-system, BlinkMacSystemFont, 'Segoe UI', sans-serif; +} + +body { + background: var(--bg); + color: var(--text); + font-family: var(--font); + font-size: 14px; + line-height: 1.5; + height: 100vh; + display: flex; + flex-direction: column; + overflow: hidden; +} + +/* ── Header ────────────────────────────────────────────────── */ +header { + display: flex; + align-items: center; + justify-content: space-between; + padding: 0 16px; + height: 48px; + background: var(--surface); + border-bottom: 1px solid var(--border); + flex-shrink: 0; +} + +.header-left { display: flex; align-items: center; gap: 12px; } + +.logo { + font-weight: 700; + font-size: 15px; + color: var(--accent); + letter-spacing: 0.5px; +} + +nav { display: flex; gap: 4px; } + +.tab-btn { + background: none; + border: none; + color: var(--text-muted); + padding: 6px 14px; + border-radius: var(--radius); + cursor: pointer; + font-size: 13px; + transition: all 0.15s; +} + +.tab-btn:hover { color: var(--text); background: var(--surface2); } +.tab-btn.active { color: var(--accent); background: var(--surface2); font-weight: 600; } + +/* ── Main layout ───────────────────────────────────────────── */ +main { flex: 1; overflow: hidden; position: relative; } + +.view { height: 100%; } + +/* ── Detail view ───────────────────────────────────────────── */ +#view-detail { + display: flex; + height: 100%; +} + +#market-sidebar { + width: 220px; + flex-shrink: 0; + border-right: 1px solid var(--border); + display: flex; + flex-direction: column; + overflow: hidden; +} + +#marketSearch { + margin: 10px 8px 6px; + padding: 6px 10px; + background: var(--surface2); + border: 1px solid var(--border); + border-radius: var(--radius); + color: var(--text); + font-size: 13px; + outline: none; + width: calc(100% - 16px); +} + +#marketSearch:focus { border-color: var(--accent); } + +#market-tree { + flex: 1; + overflow-y: auto; + padding: 4px 0; +} + +.exchange-group { margin-bottom: 4px; } + +.exchange-label { + padding: 5px 10px; + font-size: 11px; + font-weight: 700; + color: var(--text-muted); + text-transform: uppercase; + letter-spacing: 0.8px; + cursor: pointer; + user-select: none; + display: flex; + align-items: center; + gap: 4px; +} + +.exchange-label:hover { color: var(--text); } + +.exchange-label::before { + content: '▾'; + font-size: 10px; + transition: transform 0.15s; +} + +.exchange-label.collapsed::before { transform: rotate(-90deg); } + +.market-list { display: block; } +.market-list.hidden { display: none; } + +.market-item { + padding: 5px 10px 5px 18px; + cursor: pointer; + white-space: nowrap; + overflow: hidden; + text-overflow: ellipsis; + font-size: 13px; + color: var(--text-muted); + border-radius: 4px; + margin: 0 4px; + transition: all 0.1s; +} + +.market-item:hover { background: var(--surface2); color: var(--text); } +.market-item.active { background: var(--accent-dim); color: #fff; } + +/* ── Detail main area ──────────────────────────────────────── */ +#detail-main { + flex: 1; + overflow-y: auto; + padding: 16px 20px; + display: flex; + flex-direction: column; +} + +.placeholder { + flex: 1; + display: flex; + align-items: center; + justify-content: center; + color: var(--text-muted); + font-size: 15px; +} + +#detail-content { flex: 1; display: flex; flex-direction: column; gap: 12px; } + +#detail-header { + display: flex; + align-items: baseline; + gap: 10px; + flex-wrap: wrap; +} + +#detail-title { font-size: 20px; font-weight: 700; } + +.badge { + background: var(--surface2); + border: 1px solid var(--border); + border-radius: 4px; + padding: 2px 8px; + font-size: 11px; + font-weight: 600; + color: var(--text-muted); +} + +.unit { font-size: 12px; color: var(--text-muted); } + +/* ── Chart controls ────────────────────────────────────────── */ +.chart-controls { + display: flex; + align-items: center; + gap: 16px; + flex-wrap: wrap; +} + +.control-group { + display: flex; + align-items: center; + gap: 6px; +} + +.control-group > label { + font-size: 11px; + font-weight: 600; + color: var(--text-muted); + text-transform: uppercase; + letter-spacing: 0.5px; + white-space: nowrap; +} + +.btn-group { display: flex; gap: 2px; } + +.range-btn, .rt-btn { + background: var(--surface2); + border: 1px solid var(--border); + color: var(--text-muted); + padding: 4px 10px; + border-radius: 4px; + font-size: 12px; + cursor: pointer; + transition: all 0.1s; +} + +.range-btn:hover, .rt-btn:hover { color: var(--text); border-color: var(--accent); } +.range-btn.active, .rt-btn.active { + background: var(--accent); + border-color: var(--accent); + color: #fff; + font-weight: 600; +} + +.checkbox-label { + display: flex; + align-items: center; + gap: 5px; + cursor: pointer; + font-size: 12px; + color: var(--text-muted); +} + +.checkbox-label:hover { color: var(--text); } + +select { + background: var(--surface2); + border: 1px solid var(--border); + color: var(--text); + padding: 4px 8px; + border-radius: 4px; + font-size: 12px; + cursor: pointer; + outline: none; +} + +select:focus { border-color: var(--accent); } + +/* ── Chart ─────────────────────────────────────────────────── */ +.chart-wrapper { + position: relative; + flex: 1; + min-height: 280px; + max-height: 420px; + background: var(--surface); + border: 1px solid var(--border); + border-radius: var(--radius); + padding: 12px; +} + +/* ── Stats bar ─────────────────────────────────────────────── */ +#stats-bar { + display: flex; + gap: 0; + background: var(--surface); + border: 1px solid var(--border); + border-radius: var(--radius); + overflow: hidden; +} + +.stat-item { + flex: 1; + padding: 10px 16px; + border-right: 1px solid var(--border); + display: flex; + flex-direction: column; + gap: 2px; +} + +.stat-item:last-child { border-right: none; } + +.stat-label { + font-size: 10px; + font-weight: 700; + text-transform: uppercase; + letter-spacing: 0.5px; + color: var(--text-muted); +} + +.stat-value { + font-size: 16px; + font-weight: 700; + color: var(--text); +} + +.stat-value.positive { color: var(--green); } +.stat-value.negative { color: var(--red); } + +/* ── Screener view ─────────────────────────────────────────── */ +#view-screener { + display: flex; + flex-direction: column; + height: 100%; + padding: 16px 20px; + gap: 12px; +} + +.screener-toolbar { + display: flex; + align-items: center; + gap: 16px; + flex-wrap: wrap; +} + +.screener-toolbar h2 { font-size: 18px; font-weight: 700; } + +.screener-filters { + display: flex; + align-items: center; + gap: 8px; + flex-wrap: wrap; +} + +.btn-primary { + background: var(--accent); + border: none; + color: #fff; + padding: 5px 14px; + border-radius: var(--radius); + cursor: pointer; + font-size: 13px; + font-weight: 600; + transition: background 0.15s; +} + +.btn-primary:hover { background: var(--accent-dim); } + +.table-wrapper { + flex: 1; + overflow-y: auto; + border: 1px solid var(--border); + border-radius: var(--radius); +} + +table { + width: 100%; + border-collapse: collapse; + font-size: 13px; +} + +thead { + position: sticky; + top: 0; + background: var(--surface2); + z-index: 1; +} + +th { + padding: 10px 12px; + text-align: left; + font-size: 11px; + font-weight: 700; + text-transform: uppercase; + letter-spacing: 0.5px; + color: var(--text-muted); + border-bottom: 1px solid var(--border); + cursor: pointer; + user-select: none; +} + +th:hover { color: var(--text); } +th.num { text-align: right; } + +td { + padding: 8px 12px; + border-bottom: 1px solid var(--border); + color: var(--text); +} + +td.num { text-align: right; font-variant-numeric: tabular-nums; } + +tbody tr { cursor: pointer; transition: background 0.1s; } +tbody tr:hover { background: var(--surface2); } +tbody tr:last-child td { border-bottom: none; } + +.pctile-cell { font-weight: 700; } +.extreme-long { color: var(--green); } +.extreme-short { color: var(--red); } +.neutral { color: var(--text-muted); } + +.pctile-bar { + display: inline-block; + height: 6px; + border-radius: 3px; + background: var(--accent); + margin-right: 6px; + vertical-align: middle; +} + +/* ── Compare view ──────────────────────────────────────────── */ +#view-compare { + display: flex; + flex-direction: column; + height: 100%; + padding: 16px 20px; + gap: 12px; +} + +.compare-toolbar { + display: flex; + flex-direction: column; + gap: 10px; +} + +.compare-toolbar h2 { font-size: 18px; font-weight: 700; } + +.compare-controls { + display: flex; + align-items: center; + gap: 12px; + flex-wrap: wrap; +} + +.compare-search-wrap { position: relative; } + +#compareSearch { + padding: 6px 10px; + background: var(--surface2); + border: 1px solid var(--border); + border-radius: var(--radius); + color: var(--text); + font-size: 13px; + outline: none; + width: 220px; +} + +#compareSearch:focus { border-color: var(--accent); } + +.autocomplete-dropdown { + position: absolute; + top: calc(100% + 2px); + left: 0; + width: 280px; + background: var(--surface2); + border: 1px solid var(--border); + border-radius: var(--radius); + z-index: 100; + max-height: 200px; + overflow-y: auto; + box-shadow: 0 4px 16px rgba(0,0,0,0.4); +} + +.autocomplete-item { + padding: 7px 12px; + cursor: pointer; + font-size: 13px; + border-bottom: 1px solid var(--border); +} + +.autocomplete-item:last-child { border-bottom: none; } +.autocomplete-item:hover { background: var(--surface); color: var(--accent); } +.autocomplete-item-exchange { font-size: 10px; color: var(--text-muted); margin-left: 6px; } + +#compareTags { display: flex; flex-wrap: wrap; gap: 6px; } + +.compare-tag { + display: flex; + align-items: center; + gap: 6px; + padding: 4px 10px; + border-radius: 20px; + font-size: 12px; + font-weight: 600; + color: #fff; + opacity: 0.9; +} + +.compare-tag-remove { + background: none; + border: none; + color: #fff; + cursor: pointer; + font-size: 14px; + line-height: 1; + opacity: 0.7; + padding: 0; +} + +.compare-tag-remove:hover { opacity: 1; } + +#compareChartWrap { + flex: 1; + min-height: 300px; +} + +/* ── Scrollbars ────────────────────────────────────────────── */ +::-webkit-scrollbar { width: 6px; height: 6px; } +::-webkit-scrollbar-track { background: transparent; } +::-webkit-scrollbar-thumb { background: var(--border); border-radius: 3px; } +::-webkit-scrollbar-thumb:hover { background: var(--text-muted); } + +/* ── Loading state ─────────────────────────────────────────── */ +.loading { opacity: 0.5; pointer-events: none; } diff --git a/package.json b/package.json new file mode 100644 index 0000000..b6909cf --- /dev/null +++ b/package.json @@ -0,0 +1,5 @@ +{ + "dependencies": { + "@anthropic-ai/claude-code": "^2.1.50" + } +} diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..63bbcb4 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,5 @@ +requests>=2.28.0 +beautifulsoup4>=4.11.0 +fastapi>=0.110.0 +uvicorn[standard]>=0.29.0 +tqdm>=4.66.0 diff --git a/schema.sql b/schema.sql new file mode 100644 index 0000000..c7bdf1f --- /dev/null +++ b/schema.sql @@ -0,0 +1,183 @@ +PRAGMA journal_mode = WAL; +PRAGMA foreign_keys = ON; + +-- ---------------------------------------------------------------- +-- commodities: one row per unique market (stable reference table) +-- ---------------------------------------------------------------- +CREATE TABLE IF NOT EXISTS commodities ( + id INTEGER PRIMARY KEY, + cftc_code TEXT NOT NULL UNIQUE, + name TEXT NOT NULL, + exchange TEXT NOT NULL, + exchange_abbr TEXT NOT NULL, + contract_unit TEXT, + created_at TEXT DEFAULT (datetime('now')) +); + +CREATE INDEX IF NOT EXISTS idx_commodities_name ON commodities(name); +CREATE INDEX IF NOT EXISTS idx_commodities_exchange ON commodities(exchange_abbr); + +-- ---------------------------------------------------------------- +-- reports: one row per (commodity x report_date) +-- ---------------------------------------------------------------- +CREATE TABLE IF NOT EXISTS reports ( + id INTEGER PRIMARY KEY, + commodity_id INTEGER NOT NULL REFERENCES commodities(id), + report_date TEXT NOT NULL, + prev_report_date TEXT, + source_file TEXT, + imported_at TEXT DEFAULT (datetime('now')), + UNIQUE (commodity_id, report_date) +); + +CREATE INDEX IF NOT EXISTS idx_reports_date ON reports(report_date); +CREATE INDEX IF NOT EXISTS idx_reports_commodity ON reports(commodity_id); + +-- ---------------------------------------------------------------- +-- positions: core position data, one row per (report x row_type) +-- row_type: 'All', 'Old', 'Other' +-- ---------------------------------------------------------------- +CREATE TABLE IF NOT EXISTS positions ( + id INTEGER PRIMARY KEY, + report_id INTEGER NOT NULL REFERENCES reports(id), + row_type TEXT NOT NULL CHECK (row_type IN ('All', 'Old', 'Other')), + + -- Open interest + open_interest INTEGER, + + -- Non-commercial + noncomm_long INTEGER, + noncomm_short INTEGER, + noncomm_spreading INTEGER, + + -- Commercial + comm_long INTEGER, + comm_short INTEGER, + + -- Total reportable + total_long INTEGER, + total_short INTEGER, + + -- Nonreportable (small traders) + nonrept_long INTEGER, + nonrept_short INTEGER, + + -- Week-over-week changes (stored on All rows only) + chg_open_interest INTEGER, + chg_noncomm_long INTEGER, + chg_noncomm_short INTEGER, + chg_noncomm_spreading INTEGER, + chg_comm_long INTEGER, + chg_comm_short INTEGER, + chg_total_long INTEGER, + chg_total_short INTEGER, + chg_nonrept_long INTEGER, + chg_nonrept_short INTEGER, + + -- Percent of open interest + pct_open_interest REAL, + pct_noncomm_long REAL, + pct_noncomm_short REAL, + pct_noncomm_spreading REAL, + pct_comm_long REAL, + pct_comm_short REAL, + pct_total_long REAL, + pct_total_short REAL, + pct_nonrept_long REAL, + pct_nonrept_short REAL, + + -- Number of traders + traders_total INTEGER, + traders_noncomm_long INTEGER, + traders_noncomm_short INTEGER, + traders_noncomm_spread INTEGER, + traders_comm_long INTEGER, + traders_comm_short INTEGER, + traders_total_long INTEGER, + traders_total_short INTEGER, + + UNIQUE (report_id, row_type) +); + +CREATE INDEX IF NOT EXISTS idx_positions_report ON positions(report_id); + +-- ---------------------------------------------------------------- +-- concentration: largest-trader data (separate -- less-queried) +-- row_type: 'All', 'Old', 'Other' +-- ---------------------------------------------------------------- +CREATE TABLE IF NOT EXISTS concentration ( + id INTEGER PRIMARY KEY, + report_id INTEGER NOT NULL REFERENCES reports(id), + row_type TEXT NOT NULL CHECK (row_type IN ('All', 'Old', 'Other')), + + -- By Gross Position + conc_gross_long_4 REAL, + conc_gross_short_4 REAL, + conc_gross_long_8 REAL, + conc_gross_short_8 REAL, + + -- By Net Position + conc_net_long_4 REAL, + conc_net_short_4 REAL, + conc_net_long_8 REAL, + conc_net_short_8 REAL, + + UNIQUE (report_id, row_type) +); + +CREATE INDEX IF NOT EXISTS idx_concentration_report ON concentration(report_id); + +-- ---------------------------------------------------------------- +-- import_log: tracks which source files have been processed +-- ---------------------------------------------------------------- +CREATE TABLE IF NOT EXISTS import_log ( + id INTEGER PRIMARY KEY, + source TEXT NOT NULL UNIQUE, + source_type TEXT NOT NULL, + rows_inserted INTEGER DEFAULT 0, + rows_skipped INTEGER DEFAULT 0, + started_at TEXT, + completed_at TEXT, + status TEXT DEFAULT 'pending', + error_message TEXT +); + +-- ---------------------------------------------------------------- +-- v_net_positions: convenience view for common analytical queries +-- ---------------------------------------------------------------- +CREATE VIEW IF NOT EXISTS v_net_positions AS +SELECT + c.cftc_code, + c.name AS commodity, + c.exchange_abbr AS exchange, + r.report_date, + r.prev_report_date, + p.row_type, + p.open_interest, + p.noncomm_long, + p.noncomm_short, + p.noncomm_spreading, + (p.noncomm_long - p.noncomm_short) AS noncomm_net, + p.comm_long, + p.comm_short, + (p.comm_long - p.comm_short) AS comm_net, + p.nonrept_long, + p.nonrept_short, + (p.nonrept_long - p.nonrept_short) AS nonrept_net, + p.chg_open_interest, + p.chg_noncomm_long, + p.chg_noncomm_short, + p.chg_comm_long, + p.chg_comm_short, + p.pct_noncomm_long, + p.pct_noncomm_short, + p.pct_comm_long, + p.pct_comm_short, + p.traders_total, + p.traders_noncomm_long, + p.traders_noncomm_short, + p.traders_comm_long, + p.traders_comm_short +FROM positions p +JOIN reports r ON r.id = p.report_id +JOIN commodities c ON c.id = r.commodity_id; diff --git a/scripts/cron_entrypoint.sh b/scripts/cron_entrypoint.sh new file mode 100755 index 0000000..e078a7f --- /dev/null +++ b/scripts/cron_entrypoint.sh @@ -0,0 +1,22 @@ +#!/bin/sh +set -e + +echo "[cron] Initializing database..." +python3 -m app.ingestion.cli init-db + +echo "[cron] Importing local HTML files from /app/data ..." +python3 -m app.ingestion.cli import-local-html --data-dir /app/data + +echo "[cron] Running historical import (skips already-imported sources)..." +python3 -m app.ingestion.cli import-history + +echo "[cron] Database status:" +python3 -m app.ingestion.cli status + +echo "[cron] Installing crontab..." +cp /app/scripts/crontab /etc/cron.d/cot-cron +chmod 0644 /etc/cron.d/cot-cron +crontab /etc/cron.d/cot-cron + +echo "[cron] Starting crond..." +exec cron -f diff --git a/scripts/crontab b/scripts/crontab new file mode 100644 index 0000000..a0a0c06 --- /dev/null +++ b/scripts/crontab @@ -0,0 +1,2 @@ +# CFTC COT weekly download — every Friday at 20:45 UTC (≈ 3:45 PM ET) +45 20 * * 5 root cd /app && python3 -m app.ingestion.cli download-and-import >> /data/cron.log 2>&1