From 55506eff9a751a7cfe52a8da95ddc7b069615746 Mon Sep 17 00:00:00 2001 From: SepComet <202308010230@stu.csust.edu.cn> Date: Thu, 30 Apr 2026 00:09:30 +0800 Subject: [PATCH] introduce ccgs --- .claude/settings.local.json | 8 + .omc/skills/adopt/SKILL.md | 440 +++++++++ .omc/skills/architecture-decision/SKILL.md | 455 +++++++++ .omc/skills/architecture-review/SKILL.md | 639 ++++++++++++ .omc/skills/art-bible/SKILL.md | 248 +++++ .omc/skills/asset-audit/SKILL.md | 94 ++ .omc/skills/asset-spec/SKILL.md | 264 +++++ .omc/skills/balance-check/SKILL.md | 118 +++ .omc/skills/brainstorm/SKILL.md | 350 +++++++ .omc/skills/bug-report/SKILL.md | 163 ++++ .omc/skills/bug-triage/SKILL.md | 243 +++++ .omc/skills/changelog/SKILL.md | 177 ++++ .omc/skills/code-review/SKILL.md | 166 ++++ .omc/skills/consistency-check/SKILL.md | 275 ++++++ .omc/skills/content-audit/SKILL.md | 204 ++++ .omc/skills/create-architecture/SKILL.md | 402 ++++++++ .omc/skills/create-control-manifest/SKILL.md | 276 ++++++ .omc/skills/create-epics/SKILL.md | 225 +++++ .omc/skills/create-stories/SKILL.md | 313 ++++++ .omc/skills/day-one-patch/SKILL.md | 218 +++++ .omc/skills/design-review/SKILL.md | 257 +++++ .omc/skills/design-system/SKILL.md | 841 ++++++++++++++++ .omc/skills/dev-story/SKILL.md | 323 ++++++ .omc/skills/estimate/SKILL.md | 131 +++ .omc/skills/gate-check/SKILL.md | 508 ++++++++++ .omc/skills/help/SKILL.md | 228 +++++ .omc/skills/hotfix/SKILL.md | 154 +++ .omc/skills/launch-checklist/SKILL.md | 239 +++++ .omc/skills/localize/SKILL.md | 440 +++++++++ .omc/skills/map-systems/SKILL.md | 363 +++++++ .omc/skills/milestone-review/SKILL.md | 139 +++ .omc/skills/onboard/SKILL.md | 96 ++ .omc/skills/patch-notes/SKILL.md | 186 ++++ .omc/skills/perf-profile/SKILL.md | 125 +++ .omc/skills/playtest-report/SKILL.md | 146 +++ .omc/skills/project-stage-detect/SKILL.md | 195 ++++ .omc/skills/propagate-design-change/SKILL.md | 238 +++++ .omc/skills/prototype/SKILL.md | 157 +++ .omc/skills/qa-plan/SKILL.md | 259 +++++ .omc/skills/quick-design/SKILL.md | 274 ++++++ .omc/skills/regression-suite/SKILL.md | 250 +++++ .omc/skills/release-checklist/SKILL.md | 181 ++++ .omc/skills/retrospective/SKILL.md | 210 ++++ .omc/skills/reverse-document/SKILL.md | 262 +++++ .omc/skills/review-all-gdds/SKILL.md | 628 ++++++++++++ .omc/skills/scope-check/SKILL.md | 128 +++ .omc/skills/security-audit/SKILL.md | 244 +++++ .omc/skills/setup-engine/SKILL.md | 715 ++++++++++++++ .omc/skills/skill-improve/SKILL.md | 144 +++ .omc/skills/skill-test/SKILL.md | 356 +++++++ .omc/skills/smoke-check/SKILL.md | 417 ++++++++ .omc/skills/soak-test/SKILL.md | 283 ++++++ .omc/skills/sprint-plan/SKILL.md | 228 +++++ .omc/skills/sprint-status/SKILL.md | 208 ++++ .omc/skills/start/SKILL.md | 225 +++++ .omc/skills/story-done/SKILL.md | 428 ++++++++ .omc/skills/story-readiness/SKILL.md | 348 +++++++ .omc/skills/team-audio/SKILL.md | 129 +++ .omc/skills/team-combat/SKILL.md | 120 +++ .omc/skills/team-level/SKILL.md | 175 ++++ .omc/skills/team-live-ops/SKILL.md | 145 +++ .omc/skills/team-narrative/SKILL.md | 111 +++ .omc/skills/team-polish/SKILL.md | 124 +++ .omc/skills/team-qa/SKILL.md | 222 +++++ .omc/skills/team-release/SKILL.md | 148 +++ .omc/skills/team-ui/SKILL.md | 170 ++++ .omc/skills/tech-debt/SKILL.md | 121 +++ .omc/skills/test-evidence-review/SKILL.md | 250 +++++ .omc/skills/test-flakiness/SKILL.md | 210 ++++ .omc/skills/test-helpers/SKILL.md | 394 ++++++++ .omc/skills/test-setup/SKILL.md | 425 ++++++++ .omc/skills/ux-design/SKILL.md | 975 +++++++++++++++++++ .omc/skills/ux-review/SKILL.md | 262 +++++ .omc/state/hud-stdin-cache.json | 1 + Assets/Launcher.unity | 4 + CLAUDE.md | 180 ++++ design/gdd/event-system.md | 338 +++++++ design/gdd/gdd-cross-review-2026-04-29-v2.md | 207 ++++ design/gdd/gdd-cross-review-2026-04-29.md | 189 ++++ design/gdd/node-system.md | 420 ++++++++ design/gdd/progression.md | 421 ++++++++ design/gdd/reviews/node-system-review-log.md | 47 + design/gdd/shop.md | 509 ++++++++++ design/gdd/systems-index.md | 37 + design/gdd/tower-assembly.md | 367 +++++++ design/registry/entities.yaml | 155 +++ production/session-state/active.md | 16 + 87 files changed, 22704 insertions(+) create mode 100644 .claude/settings.local.json create mode 100644 .omc/skills/adopt/SKILL.md create mode 100644 .omc/skills/architecture-decision/SKILL.md create mode 100644 .omc/skills/architecture-review/SKILL.md create mode 100644 .omc/skills/art-bible/SKILL.md create mode 100644 .omc/skills/asset-audit/SKILL.md create mode 100644 .omc/skills/asset-spec/SKILL.md create mode 100644 .omc/skills/balance-check/SKILL.md create mode 100644 .omc/skills/brainstorm/SKILL.md create mode 100644 .omc/skills/bug-report/SKILL.md create mode 100644 .omc/skills/bug-triage/SKILL.md create mode 100644 .omc/skills/changelog/SKILL.md create mode 100644 .omc/skills/code-review/SKILL.md create mode 100644 .omc/skills/consistency-check/SKILL.md create mode 100644 .omc/skills/content-audit/SKILL.md create mode 100644 .omc/skills/create-architecture/SKILL.md create mode 100644 .omc/skills/create-control-manifest/SKILL.md create mode 100644 .omc/skills/create-epics/SKILL.md create mode 100644 .omc/skills/create-stories/SKILL.md create mode 100644 .omc/skills/day-one-patch/SKILL.md create mode 100644 .omc/skills/design-review/SKILL.md create mode 100644 .omc/skills/design-system/SKILL.md create mode 100644 .omc/skills/dev-story/SKILL.md create mode 100644 .omc/skills/estimate/SKILL.md create mode 100644 .omc/skills/gate-check/SKILL.md create mode 100644 .omc/skills/help/SKILL.md create mode 100644 .omc/skills/hotfix/SKILL.md create mode 100644 .omc/skills/launch-checklist/SKILL.md create mode 100644 .omc/skills/localize/SKILL.md create mode 100644 .omc/skills/map-systems/SKILL.md create mode 100644 .omc/skills/milestone-review/SKILL.md create mode 100644 .omc/skills/onboard/SKILL.md create mode 100644 .omc/skills/patch-notes/SKILL.md create mode 100644 .omc/skills/perf-profile/SKILL.md create mode 100644 .omc/skills/playtest-report/SKILL.md create mode 100644 .omc/skills/project-stage-detect/SKILL.md create mode 100644 .omc/skills/propagate-design-change/SKILL.md create mode 100644 .omc/skills/prototype/SKILL.md create mode 100644 .omc/skills/qa-plan/SKILL.md create mode 100644 .omc/skills/quick-design/SKILL.md create mode 100644 .omc/skills/regression-suite/SKILL.md create mode 100644 .omc/skills/release-checklist/SKILL.md create mode 100644 .omc/skills/retrospective/SKILL.md create mode 100644 .omc/skills/reverse-document/SKILL.md create mode 100644 .omc/skills/review-all-gdds/SKILL.md create mode 100644 .omc/skills/scope-check/SKILL.md create mode 100644 .omc/skills/security-audit/SKILL.md create mode 100644 .omc/skills/setup-engine/SKILL.md create mode 100644 .omc/skills/skill-improve/SKILL.md create mode 100644 .omc/skills/skill-test/SKILL.md create mode 100644 .omc/skills/smoke-check/SKILL.md create mode 100644 .omc/skills/soak-test/SKILL.md create mode 100644 .omc/skills/sprint-plan/SKILL.md create mode 100644 .omc/skills/sprint-status/SKILL.md create mode 100644 .omc/skills/start/SKILL.md create mode 100644 .omc/skills/story-done/SKILL.md create mode 100644 .omc/skills/story-readiness/SKILL.md create mode 100644 .omc/skills/team-audio/SKILL.md create mode 100644 .omc/skills/team-combat/SKILL.md create mode 100644 .omc/skills/team-level/SKILL.md create mode 100644 .omc/skills/team-live-ops/SKILL.md create mode 100644 .omc/skills/team-narrative/SKILL.md create mode 100644 .omc/skills/team-polish/SKILL.md create mode 100644 .omc/skills/team-qa/SKILL.md create mode 100644 .omc/skills/team-release/SKILL.md create mode 100644 .omc/skills/team-ui/SKILL.md create mode 100644 .omc/skills/tech-debt/SKILL.md create mode 100644 .omc/skills/test-evidence-review/SKILL.md create mode 100644 .omc/skills/test-flakiness/SKILL.md create mode 100644 .omc/skills/test-helpers/SKILL.md create mode 100644 .omc/skills/test-setup/SKILL.md create mode 100644 .omc/skills/ux-design/SKILL.md create mode 100644 .omc/skills/ux-review/SKILL.md create mode 100644 .omc/state/hud-stdin-cache.json create mode 100644 CLAUDE.md create mode 100644 design/gdd/event-system.md create mode 100644 design/gdd/gdd-cross-review-2026-04-29-v2.md create mode 100644 design/gdd/gdd-cross-review-2026-04-29.md create mode 100644 design/gdd/node-system.md create mode 100644 design/gdd/progression.md create mode 100644 design/gdd/reviews/node-system-review-log.md create mode 100644 design/gdd/shop.md create mode 100644 design/gdd/systems-index.md create mode 100644 design/gdd/tower-assembly.md create mode 100644 design/registry/entities.yaml create mode 100644 production/session-state/active.md diff --git a/.claude/settings.local.json b/.claude/settings.local.json new file mode 100644 index 0000000..24a258f --- /dev/null +++ b/.claude/settings.local.json @@ -0,0 +1,8 @@ +{ + "permissions": { + "allow": [ + "Skill(consistency-check)", + "Bash(xargs grep *)" + ] + } +} diff --git a/.omc/skills/adopt/SKILL.md b/.omc/skills/adopt/SKILL.md new file mode 100644 index 0000000..dca3fe0 --- /dev/null +++ b/.omc/skills/adopt/SKILL.md @@ -0,0 +1,440 @@ +--- +name: adopt +description: "Brownfield onboarding — audits existing project artifacts for template format compliance (not just existence), classifies gaps by impact, and produces a numbered migration plan. Run this when joining an in-progress project or upgrading from an older template version. Distinct from /project-stage-detect (which checks what exists) — this checks whether what exists will actually work with the template's skills." +argument-hint: "[focus: full | gdds | adrs | stories | infra]" +user-invocable: true +allowed-tools: Read, Glob, Grep, Write, AskUserQuestion +agent: technical-director +--- + +# Adopt — Brownfield Template Adoption + +This skill audits an existing project's artifacts for **format compliance** with +the template's skill pipeline, then produces a prioritised migration plan. + +**This is not `/project-stage-detect`.** +`/project-stage-detect` answers: *what exists?* +`/adopt` answers: *will what exists actually work with the template's skills?* + +A project can have GDDs, ADRs, and stories — and every format-sensitive skill +will still fail silently or produce wrong results if those artifacts are in the +wrong internal format. + +**Output:** `docs/adoption-plan-[date].md` — a persistent, checkable migration plan. + +**Argument modes:** + +**Audit mode:** `$ARGUMENTS[0]` (blank = `full`) + +- **No argument / `full`**: Complete audit — all artifact types +- **`gdds`**: GDD format compliance only +- **`adrs`**: ADR format compliance only +- **`stories`**: Story format compliance only +- **`infra`**: Infrastructure artifact gaps only (registry, manifest, sprint-status, stage.txt) + +--- + +## Phase 1: Detect Project State + +Emit one line before reading: `"Scanning project artifacts..."` — this confirms the +skill is running during the silent read phase. + +Then read silently before presenting anything else. + +### Existence check +- `production/stage.txt` — if present, read it (authoritative phase) +- `design/gdd/game-concept.md` — concept exists? +- `design/gdd/systems-index.md` — systems index exists? +- Count GDD files: `design/gdd/*.md` (excluding game-concept.md and systems-index.md) +- Count ADR files: `docs/architecture/adr-*.md` +- Count story files: `production/epics/**/*.md` (excluding EPIC.md) +- `.claude/docs/technical-preferences.md` — engine configured? +- `docs/engine-reference/` — engine reference docs present? +- Glob `docs/adoption-plan-*.md` — note the filename of the most recent prior plan if any exist + +### Infer phase (if no stage.txt) +Use the same heuristic as `/project-stage-detect`: +- 10+ source files in `src/` → Production +- Stories in `production/epics/` → Pre-Production +- ADRs exist → Technical Setup +- systems-index.md exists → Systems Design +- game-concept.md exists → Concept +- Nothing → Fresh (not a brownfield project — suggest `/start`) + +If the project appears fresh (no artifacts at all), use `AskUserQuestion`: +- "This looks like a fresh project — no existing artifacts found. `/adopt` is for + projects with work to migrate. What would you like to do?" + - "Run `/start` — begin guided first-time onboarding" + - "My artifacts are in a non-standard location — help me find them" + - "Cancel" + +Then stop — do not proceed with the audit regardless of which option the user picks +(each option leads to a different skill or manual investigation). + +Report: "Detected phase: [phase]. Found: [N] GDDs, [M] ADRs, [P] stories." + +--- + +## Phase 2: Format Audit + +For each artifact type in scope (based on argument mode), check not just that +the file exists but that it contains the internal structure the template requires. + +### 2a: GDD Format Audit + +For each GDD file found, check for the 8 required sections by scanning headings: + +| Required Section | Heading pattern to look for | +|---|---| +| Overview | `## Overview` | +| Player Fantasy | `## Player Fantasy` | +| Detailed Rules / Design | `## Detailed` or `## Core Rules` or `## Detailed Design` | +| Formulas | `## Formulas` or `## Formula` | +| Edge Cases | `## Edge Cases` | +| Dependencies | `## Dependencies` or `## Depends` | +| Tuning Knobs | `## Tuning` | +| Acceptance Criteria | `## Acceptance` | + +For each GDD, record: +- Which sections are present +- Which sections are missing +- Whether it has any content in present sections or just placeholder text + (`[To be designed]` or equivalent) + +Also check: does each GDD have a `**Status**:` field in its header block? +Valid values: `In Design`, `Designed`, `In Review`, `Approved`, `Needs Revision`. + +### 2b: ADR Format Audit + +For each ADR file found, check for these critical sections: + +| Section | Impact if missing | +|---|---| +| `## Status` | **BLOCKING** — `/story-readiness` ADR status check silently passes everything | +| `## ADR Dependencies` | HIGH — dependency ordering in `/architecture-review` breaks | +| `## Engine Compatibility` | HIGH — post-cutoff API risk is unknown | +| `## GDD Requirements Addressed` | MEDIUM — traceability matrix loses coverage | +| `## Performance Implications` | LOW — not pipeline-critical | + +For each ADR, record: which sections present, which missing, current Status value +if the Status section exists. + +### 2c: systems-index.md Format Audit + +If `design/gdd/systems-index.md` exists: + +1. **Parenthetical status values** — Grep for any Status cell containing + parentheses: `"Needs Revision ("`, `"In Progress ("`, etc. + These break exact-string matching in `/gate-check`, `/create-stories`, + and `/architecture-review`. **BLOCKING.** + +2. **Valid status values** — check that Status column values are only from: + `Not Started`, `In Progress`, `In Review`, `Designed`, `Approved`, `Needs Revision` + Flag any unrecognised values. + +3. **Column structure** — check that the table has at minimum: System name, + Layer, Priority, Status columns. Missing columns degrade skill functionality. + +### 2d: Story Format Audit + +For each story file found: + +- **`Manifest Version:` field** — present in story header? (LOW — auto-passes if absent) +- **TR-ID reference** — does story contain `TR-[a-z]+-[0-9]+` pattern? (MEDIUM — no staleness tracking) +- **ADR reference** — does story reference at least one ADR? (check for `ADR-` pattern) +- **Status field** — present and readable? +- **Acceptance criteria** — does the story have a checkbox list (`- [ ]`)? + +### 2e: Infrastructure Audit + +| Artifact | Path | Impact if missing | +|---|---|---| +| TR registry | `docs/architecture/tr-registry.yaml` | HIGH — no stable requirement IDs | +| Control manifest | `docs/architecture/control-manifest.md` | HIGH — no layer rules for stories | +| Manifest version stamp | In manifest header: `Manifest Version:` | MEDIUM — staleness checks blind | +| Sprint status | `production/sprint-status.yaml` | MEDIUM — `/sprint-status` falls back to markdown | +| Stage file | `production/stage.txt` | MEDIUM — phase auto-detect unreliable | +| Engine reference | `docs/engine-reference/[engine]/VERSION.md` | HIGH — ADR engine checks blind | +| Architecture traceability | `docs/architecture/architecture-traceability.md` | MEDIUM — no persistent matrix | + +### 2f: Technical Preferences Audit + +Read `.claude/docs/technical-preferences.md`. Check each field for `[TO BE CONFIGURED]`: +- Engine, Language, Rendering, Physics → HIGH if unconfigured (ADR skills fail) +- Naming conventions → MEDIUM +- Performance budgets → MEDIUM +- Forbidden Patterns, Allowed Libraries → LOW (starts empty by design) + +--- + +## Phase 3: Classify and Prioritise Gaps + +Organise every gap found across all audits into four severity tiers: + +**BLOCKING** — Will cause template skills to silently produce wrong results *right now*. +Examples: ADR missing Status field, systems-index parenthetical status values, +engine not configured when ADRs exist. + +**HIGH** — Will cause stories to be generated with missing safety checks, or +infrastructure bootstrapping will fail. +Examples: ADRs missing Engine Compatibility, GDDs missing Acceptance Criteria +(stories can't be generated from them), tr-registry.yaml missing. + +**MEDIUM** — Degrades quality and pipeline tracking but does not break functionality. +Examples: GDDs missing Tuning Knobs or Formulas sections, stories missing TR-IDs, +sprint-status.yaml missing. + +**LOW** — Retroactive improvements that are nice-to-have but not urgent. +Examples: Stories missing Manifest Version stamps, GDDs missing Open Questions section. + +Count totals per tier. If zero BLOCKING and zero HIGH gaps: report that the project +is template-compatible and only advisory improvements remain. + +--- + +## Phase 4: Build the Migration Plan + +Compose a numbered, ordered action plan. Ordering rules: +1. BLOCKING gaps first (must fix before any pipeline skill runs reliably) +2. HIGH gaps next, infrastructure before GDD/ADR content (bootstrapping needs correct formats) +3. MEDIUM gaps ordered: GDD gaps before ADR gaps before story gaps (stories depend on GDDs and ADRs) +4. LOW gaps last + +For each gap, produce a plan entry with: +- A clear problem statement (one sentence, no jargon) +- The exact command to fix it, if a skill handles it +- Manual steps if it requires direct editing +- A time estimate (rough: 5 min / 30 min / 1 session) +- A checkbox `- [ ]` for tracking + +**Special case — systems-index parenthetical status values:** +This is always the first item if present. Show the exact values that need changing +and the exact replacement text. Offer to fix this immediately before writing the plan. + +**Special case — ADRs missing Status field:** +For each affected ADR, the fix is: +`/architecture-decision retrofit docs/architecture/adr-[NNNN]-[slug].md` +List each ADR as a separate checkable item. + +**Special case — GDDs missing sections:** +For each affected GDD, list which sections are missing and the fix: +`/design-system retrofit design/gdd/[filename].md` + +**Infrastructure bootstrap ordering** — always present in this sequence: +1. Fix ADR formats first (registry depends on reading ADR Status fields) +2. Run `/architecture-review` → bootstraps `tr-registry.yaml` +3. Run `/create-control-manifest` → creates manifest with version stamp +4. Run `/sprint-plan update` → creates `sprint-status.yaml` +5. Run `/gate-check [phase]` → writes `stage.txt` authoritatively + +**Existing stories** — note explicitly: +> "Existing stories continue to work with all template skills — all new format +> checks auto-pass when the fields are absent. They won't benefit from TR-ID +> staleness tracking or manifest version checks until they're regenerated. This +> is intentional: do not regenerate stories that are already in progress." + +--- + +## Phase 5: Present Summary and Ask to Write + +Present a compact summary before writing: + +``` +## Adoption Audit Summary +Phase detected: [phase] +Engine: [configured / NOT CONFIGURED] +GDDs audited: [N] ([X] fully compliant, [Y] with gaps) +ADRs audited: [N] ([X] fully compliant, [Y] with gaps) +Stories audited: [N] + +Gap counts: + BLOCKING: [N] — template skills will malfunction without these fixes + HIGH: [N] — unsafe to run /create-stories or /story-readiness + MEDIUM: [N] — quality degradation + LOW: [N] — optional improvements + +Estimated remediation: [X blocking items × ~Y min each = roughly Z hours] +``` + +Before asking to write, show a **Gap Preview**: +- List every BLOCKING gap as a one-line bullet describing the actual problem + (e.g. `systems-index.md: 3 rows have parenthetical status values`, + `adr-0002.md: missing ## Status section`). No counts — show the actual items. +- Show HIGH / MEDIUM / LOW as counts only (e.g. `HIGH: 4, MEDIUM: 2, LOW: 1`). + +This gives the user enough context to judge scope before committing to writing the file. + +If a prior adoption plan was detected in Phase 1, add a note: +> "A previous plan exists at `docs/adoption-plan-[prior-date].md`. The new plan will +> reflect current project state — it does not diff against the prior run." + +Use `AskUserQuestion`: +- "Ready to write the migration plan?" + - "Yes — write `docs/adoption-plan-[date].md`" + - "Show me the full plan preview first (don't write yet)" + - "Cancel — I'll handle migration manually" + +If the user picks "Show me the full plan preview", output the complete plan as a +fenced markdown block. Then ask again with the same three options. + +--- + +## Phase 6: Write the Adoption Plan + +If approved, write `docs/adoption-plan-[date].md` with this structure: + +```markdown +# Adoption Plan + +> **Generated**: [date] +> **Project phase**: [phase] +> **Engine**: [name + version, or "Not configured"] +> **Template version**: v1.0+ + +Work through these steps in order. Check off each item as you complete it. +Re-run `/adopt` anytime to check remaining gaps. + +--- + +## Step 1: Fix Blocking Gaps + +[One sub-section per blocking gap with problem, fix command, time estimate, checkbox] + +--- + +## Step 2: Fix High-Priority Gaps + +[One sub-section per high gap] + +--- + +## Step 3: Bootstrap Infrastructure + +### 3a. Register existing requirements (creates tr-registry.yaml) +Run `/architecture-review` — even if ADRs already exist, this run bootstraps +the TR registry from your existing GDDs and ADRs. +**Time**: 1 session (review can be long for large codebases) +- [ ] tr-registry.yaml created + +### 3b. Create control manifest +Run `/create-control-manifest` +**Time**: 30 min +- [ ] docs/architecture/control-manifest.md created + +### 3c. Create sprint tracking file +Run `/sprint-plan update` +**Time**: 5 min (if sprint plan already exists as markdown) +- [ ] production/sprint-status.yaml created + +### 3d. Set authoritative project stage +Run `/gate-check [current-phase]` +**Time**: 5 min +- [ ] production/stage.txt written + +--- + +## Step 4: Medium-Priority Gaps + +[One sub-section per medium gap] + +--- + +## Step 5: Optional Improvements + +[One sub-section per low gap] + +--- + +## What to Expect from Existing Stories + +Existing stories continue to work with all template skills. New format checks +(TR-ID validation, manifest version staleness) auto-pass when the fields are +absent — so nothing breaks. They won't benefit from staleness tracking until +regenerated. Do not regenerate stories that are in progress or done. + +--- + +## Re-run + +Run `/adopt` again after completing Step 3 to verify all blocking and high gaps +are resolved. The new run will reflect the current state of the project. +``` + +--- + +## Phase 6b: Set Review Mode + +After writing the adoption plan (or if the user cancels writing), check whether +`production/review-mode.txt` exists. + +**If it exists**: Read it and note the current mode — "Review mode is already set to `[current]`." — skip the prompt. + +**If it does not exist**: Use `AskUserQuestion`: + +- **Prompt**: "One more setup step: how much design review would you like as you work through the workflow?" +- **Options**: + - `Full` — Director specialists review at each key workflow step. Best for teams, learning the workflow, or when you want thorough feedback on every decision. + - `Lean (recommended)` — Directors only at phase gate transitions (/gate-check). Skips per-skill reviews. Balanced for solo devs and small teams. + - `Solo` — No director reviews at all. Maximum speed. Best for game jams, prototypes, or if reviews feel like overhead. + +Write the choice to `production/review-mode.txt` immediately after selection — no separate "May I write?" needed: +- `Full` → write `full` +- `Lean (recommended)` → write `lean` +- `Solo` → write `solo` + +Create the `production/` directory if it does not exist. + +--- + +## Phase 7: Offer First Action + +After writing the plan, don't stop there. Pick the single highest-priority gap +and offer to handle it immediately using `AskUserQuestion`. Choose the first +branch that applies: + +**If there are parenthetical status values in systems-index.md:** +Use `AskUserQuestion`: +- "The most urgent fix is `systems-index.md` — [N] rows have parenthetical status + values (e.g. `Needs Revision (see notes)`) that break /gate-check, + /create-stories, and /architecture-review right now. I can fix these in-place." + - "Fix it now — edit systems-index.md" + - "I'll fix it myself" + - "Done — leave me with the plan" + +**If ADRs are missing `## Status` (and no parenthetical issue):** +Use `AskUserQuestion`: +- "The most urgent fix is adding `## Status` to [N] ADR(s): [list filenames]. + Without it, /story-readiness silently passes all ADR checks. Start with + [first affected filename]?" + - "Yes — retrofit [first affected filename] now" + - "Retrofit all [N] ADRs one by one" + - "I'll handle ADRs myself" + +**If GDDs are missing Acceptance Criteria (and no blocking issues above):** +Use `AskUserQuestion`: +- "The most urgent gap is missing Acceptance Criteria in [N] GDD(s): + [list filenames]. Without them, /create-stories can't generate stories. + Start with [highest-priority GDD filename]?" + - "Yes — add Acceptance Criteria to [GDD filename] now" + - "Do all [N] GDDs one by one" + - "I'll handle GDDs myself" + +**If no BLOCKING or HIGH gaps exist:** +Use `AskUserQuestion`: +- "No blocking gaps — this project is template-compatible. What next?" + - "Walk me through the medium-priority improvements" + - "Run /project-stage-detect for a broader health check" + - "Done — I'll work through the plan at my own pace" + +--- + +## Collaborative Protocol + +1. **Read silently** — complete the full audit before presenting anything +2. **Show the summary first** — let the user see scope before asking to write +3. **Ask before writing** — always confirm before creating the adoption plan file +4. **Offer, don't force** — the plan is advisory; the user decides what to fix and when +5. **One action at a time** — after handing off the plan, offer one specific next step, + not a list of six things to do simultaneously +6. **Never regenerate existing artifacts** — only fill gaps in what exists; + do not rewrite GDDs, ADRs, or stories that already have content diff --git a/.omc/skills/architecture-decision/SKILL.md b/.omc/skills/architecture-decision/SKILL.md new file mode 100644 index 0000000..92b6f59 --- /dev/null +++ b/.omc/skills/architecture-decision/SKILL.md @@ -0,0 +1,455 @@ +--- +name: architecture-decision +description: "Creates an Architecture Decision Record (ADR) documenting a significant technical decision, its context, alternatives considered, and consequences. Every major technical choice should have an ADR." +argument-hint: "[title] [--review full|lean|solo]" +user-invocable: true +allowed-tools: Read, Glob, Grep, Write, Edit, Task, AskUserQuestion +--- + +When this skill is invoked: + +## 0. Parse Arguments — Detect Retrofit Mode + +Resolve the review mode (once, store for all gate spawns this run): +1. If `--review [full|lean|solo]` was passed → use that +2. Else read `production/review-mode.txt` → use that value +3. Else → default to `lean` + +See `.claude/docs/director-gates.md` for the full check pattern. + +**If the argument starts with `retrofit` followed by a file path** +(e.g., `/architecture-decision retrofit docs/architecture/adr-0001-event-system.md`): + +Enter **retrofit mode**: + +1. Read the existing ADR file completely. +2. Identify which template sections are present by scanning headings: + - `## Status` — **BLOCKING if missing**: `/story-readiness` cannot check ADR acceptance + - `## ADR Dependencies` — HIGH if missing: dependency ordering breaks + - `## Engine Compatibility` — HIGH if missing: post-cutoff risk unknown + - `## GDD Requirements Addressed` — MEDIUM if missing: traceability lost +3. Present to the user: + ``` + ## Retrofit: [ADR title] + File: [path] + + Sections already present (will not be touched): + ✓ Status: [current value, or "MISSING — will add"] + ✓ [section] + + Missing sections to add: + ✗ Status — BLOCKING (stories cannot validate ADR acceptance without this) + ✗ ADR Dependencies — HIGH + ✗ Engine Compatibility — HIGH + ``` +4. Ask: "Shall I add the [N] missing sections? I will not modify any existing content." +5. If yes: + - For **Status**: ask the user — "What is the current status of this decision?" + Options: "Proposed", "Accepted", "Deprecated", "Superseded by ADR-XXXX" + - For **ADR Dependencies**: ask — "Does this decision depend on any other ADR? + Does it enable or block any other ADR or epic?" Accept "None" for each field. + - For **Engine Compatibility**: read the engine reference docs (same as Step 0 below) + and ask the user to confirm the domain. Then generate the table with verified data. + - For **GDD Requirements Addressed**: ask — "Which GDD systems motivated this decision? + What specific requirement in each GDD does this ADR address?" + - Append each missing section to the ADR file using the Edit tool. + - **Never modify any existing section.** Only append or fill absent sections. +6. After adding all missing sections, update the ADR's `## Date` field if it is absent. +7. Suggest: "Run `/architecture-review` to re-validate coverage now that this ADR + has its Status and Dependencies fields." + +If NOT in retrofit mode, proceed to Step 0 below (normal ADR authoring). + +**No-argument guard**: If no argument was provided (title is empty), ask before +running Phase 0: + +> "What technical decision are you documenting? Please provide a short title +> (e.g., `event-system-architecture`, `physics-engine-choice`)." + +Use the user's response as the title, then proceed to Step 0. + +--- + +## 0. Load Engine Context (ALWAYS FIRST) + +Before doing anything else, establish the engine environment: + +1. Read `docs/engine-reference/[engine]/VERSION.md` to get: + - Engine name and version + - LLM knowledge cutoff date + - Post-cutoff version risk levels (LOW / MEDIUM / HIGH) + +2. Identify the **domain** of this architecture decision from the title or + user description. Common domains: Physics, Rendering, UI, Audio, Navigation, + Animation, Networking, Core, Input, Scripting. + +3. Read the corresponding module reference if it exists: + `docs/engine-reference/[engine]/modules/[domain].md` + +4. Read `docs/engine-reference/[engine]/breaking-changes.md` — flag any + changes in the relevant domain that post-date the LLM's training cutoff. + +5. Read `docs/engine-reference/[engine]/deprecated-apis.md` — flag any APIs + in the relevant domain that should not be used. + +6. **Display a knowledge gap warning** before proceeding if the domain carries + MEDIUM or HIGH risk: + + ``` + ⚠️ ENGINE KNOWLEDGE GAP WARNING + Engine: [name + version] + Domain: [domain] + Risk Level: HIGH — This version is post-LLM-cutoff. + + Key changes verified from engine-reference docs: + - [Change 1 relevant to this domain] + - [Change 2] + + This ADR will be cross-referenced against the engine reference library. + Proceed with verified information only — do NOT rely solely on training data. + ``` + + If no engine has been configured yet, prompt: "No engine is configured. + Run `/setup-engine` first, or tell me which engine you are using." + +--- + +## 1. Determine the next ADR number + +Scan `docs/architecture/` for existing ADRs to find the next number. + +--- + +## 2. Gather context + +Read related code, existing ADRs, and relevant GDDs from `design/gdd/`. + +### 2a: Architecture Registry Check (BLOCKING gate) + +Read `docs/registry/architecture.yaml`. Extract entries relevant to this ADR's +domain and decision (grep by system name, domain keyword, or state being touched). + +Present any relevant stances to the user **before** the collaborative design +begins, as locked constraints: + +``` +## Existing Architectural Stances (must not contradict) + +State Ownership: + player_health → owned by health-system (ADR-0001) + Interface: HealthComponent.current_health (read-only float) + → If this ADR reads or writes player health, it must use this interface. + +Interface Contracts: + damage_delivery → signal pattern (ADR-0003) + Signal: damage_dealt(amount, target, is_crit) + → If this ADR delivers or receives damage events, it must use this signal. + +Forbidden Patterns: + ✗ autoload_singleton_coupling (ADR-0001) + ✗ direct_cross_system_state_write (ADR-0000) + → The proposed approach must not use these patterns. +``` + +If the user's proposed decision would contradict any registered stance, surface +the conflict immediately: + +> "⚠️ Conflict: This ADR proposes [X], but ADR-[NNNN] established that [Y] is +> the accepted pattern for this purpose. Proceeding without resolving this will +> produce contradictory ADRs and inconsistent stories. +> Options: (1) Align with the existing stance, (2) Supersede ADR-[NNNN] with +> an explicit replacement, (3) Explain why this case is an exception." + +Do not proceed to Step 3 (collaborative design) until any conflict is resolved +or explicitly accepted as an intentional exception. + +--- + +## 3. Guide the decision collaboratively + +Before asking anything, derive the skill's best guesses from the context already +gathered (GDDs read, engine reference loaded, existing ADRs scanned). Then present +a **confirm/adjust** prompt using `AskUserQuestion` — not open-ended questions. + +**Derive assumptions first:** +- **Problem**: Infer from the title + GDD context what decision needs to be made +- **Alternatives**: Propose 2-3 concrete options from engine reference + GDD requirements +- **Dependencies**: Scan existing ADRs for upstream dependencies; assume None if unclear +- **GDD linkage**: Extract which GDD systems the title directly relates to +- **Status**: Always `Proposed` for new ADRs — never ask the user what the status is + +**Scope of assumptions tab**: Assumptions cover only: problem framing, alternative approaches, upstream dependencies, GDD linkage, and status. Schema design questions (e.g., "How should spawn timing work?", "Should data be inline or external?") are NOT assumptions — they are design decisions belonging to a separate step after the assumptions are confirmed. Do not include schema design questions in the assumptions AskUserQuestion widget. + +**After assumptions are confirmed**, if the ADR involves schema or data design choices, use a separate multi-tab `AskUserQuestion` to ask each design question independently before drafting. + +**Present assumptions with `AskUserQuestion`:** + +``` +Here's what I'm assuming before drafting: + +Problem: [one-sentence problem statement derived from context] +Alternatives I'll consider: + A) [option derived from engine reference] + B) [option derived from GDD requirements] + C) [option from common patterns] +GDD systems driving this: [list derived from context] +Dependencies: [upstream ADRs if any, otherwise "None"] +Status: Proposed + +[A] Proceed — draft with these assumptions +[B] Change the alternatives list +[C] Adjust the GDD linkage +[D] Add a performance budget constraint +[E] Something else needs changing first +``` + +Do not generate the ADR until the user confirms assumptions or provides corrections. + +**After engine specialist and TD reviews return** (Step 4.5/4.6), if unresolved +decisions remain, present each one as a separate `AskUserQuestion` with the proposed +options as choices plus a free-text escape: + +``` +Decision: [specific unresolved point] +[A] [option from specialist review] +[B] [alternative option] +[C] Different approach — I'll describe it +``` + +**ADR Dependencies** — derive from existing ADRs, then confirm: +- Does this decision depend on any other ADR not yet Accepted? +- Does it unlock or unblock any other ADR or epic? +- Does it block any specific epic from starting? + +Record answers in the **ADR Dependencies** section. Write "None" for each field if no constraints apply. + +--- + +## 4. Generate the ADR + +Following this format: + +```markdown +# ADR-[NNNN]: [Title] + +## Status +[Proposed | Accepted | Deprecated | Superseded by ADR-XXXX] + +## Date +[Date of decision] + +## Engine Compatibility + +| Field | Value | +|-------|-------| +| **Engine** | [e.g. Godot 4.6] | +| **Domain** | [Physics / Rendering / UI / Audio / Navigation / Animation / Networking / Core / Input] | +| **Knowledge Risk** | [LOW / MEDIUM / HIGH — from VERSION.md] | +| **References Consulted** | [List engine-reference docs read, e.g. `docs/engine-reference/godot/modules/physics.md`] | +| **Post-Cutoff APIs Used** | [Any APIs from post-LLM-cutoff versions this decision depends on, or "None"] | +| **Verification Required** | [Specific behaviours to test before shipping, or "None"] | + +## ADR Dependencies + +| Field | Value | +|-------|-------| +| **Depends On** | [ADR-NNNN (must be Accepted before this can be implemented), or "None"] | +| **Enables** | [ADR-NNNN (this ADR unlocks that decision), or "None"] | +| **Blocks** | [Epic/Story name — cannot start until this ADR is Accepted, or "None"] | +| **Ordering Note** | [Any sequencing constraint that isn't captured above] | + +## Context + +### Problem Statement +[What problem are we solving? Why does this decision need to be made now?] + +### Constraints +- [Technical constraints] +- [Timeline constraints] +- [Resource constraints] +- [Compatibility requirements] + +### Requirements +- [Must support X] +- [Must perform within Y budget] +- [Must integrate with Z] + +## Decision + +[The specific technical decision made, described in enough detail for someone +to implement it.] + +### Architecture Diagram +[ASCII diagram or description of the system architecture this creates] + +### Key Interfaces +[API contracts or interface definitions this decision creates] + +## Alternatives Considered + +### Alternative 1: [Name] +- **Description**: [How this would work] +- **Pros**: [Advantages] +- **Cons**: [Disadvantages] +- **Rejection Reason**: [Why this was not chosen] + +### Alternative 2: [Name] +- **Description**: [How this would work] +- **Pros**: [Advantages] +- **Cons**: [Disadvantages] +- **Rejection Reason**: [Why this was not chosen] + +## Consequences + +### Positive +- [Good outcomes of this decision] + +### Negative +- [Trade-offs and costs accepted] + +### Risks +- [Things that could go wrong] +- [Mitigation for each risk] + +## GDD Requirements Addressed + +| GDD System | Requirement | How This ADR Addresses It | +|------------|-------------|--------------------------| +| [system-name].md | [specific rule, formula, or performance constraint from that GDD] | [how this decision satisfies it] | + +## Performance Implications +- **CPU**: [Expected impact] +- **Memory**: [Expected impact] +- **Load Time**: [Expected impact] +- **Network**: [Expected impact, if applicable] + +## Migration Plan +[If this changes existing code, how do we get from here to there?] + +## Validation Criteria +[How will we know this decision was correct? What metrics or tests?] + +## Related Decisions +- [Links to related ADRs] +- [Links to related design documents] +``` + +4.5. **Engine Specialist Validation** — Before saving, spawn the **primary engine specialist** via Task to validate the drafted ADR: + - Read `.claude/docs/technical-preferences.md` `Engine Specialists` section to get the primary specialist + - If no engine is configured (`[TO BE CONFIGURED]`), skip this step + - Spawn `subagent_type: [primary specialist]` with: the ADR's Engine Compatibility section, Decision section, Key Interfaces, and the engine reference docs path. Ask them to: + 1. Confirm the proposed approach is idiomatic for the pinned engine version + 2. Flag any APIs or patterns that are deprecated or changed post-training-cutoff + 3. Identify engine-specific risks or gotchas not captured in the current ADR draft + - If the specialist identifies a **blocking issue** (wrong API, deprecated approach, engine version incompatibility): revise the Decision and Engine Compatibility sections accordingly, then confirm the changes with the user before proceeding + - If the specialist finds **minor notes** only: incorporate them into the ADR's Risks subsection + +**Review mode check** — apply before spawning TD-ADR: +- `solo` → skip. Note: "TD-ADR skipped — Solo mode." Proceed to Step 4.7 (GDD sync check). +- `lean` → skip (not a PHASE-GATE). Note: "TD-ADR skipped — Lean mode." Proceed to Step 4.7 (GDD sync check). +- `full` → spawn as normal. + +4.6. **Technical Director Strategic Review** — After the engine specialist validation, spawn `technical-director` via Task using gate **TD-ADR** (`.claude/docs/director-gates.md`): + - Pass: the ADR file path (or draft content), engine version, domain, any existing ADRs in the same domain + - The TD validates architectural coherence (is this decision consistent with the whole system?) — distinct from the engine specialist's API-level check + - If CONCERNS or REJECT: revise the Decision or Alternatives sections accordingly before proceeding + +4.7. **GDD Sync Check** — Before presenting the write approval, scan all GDDs +referenced in the "GDD Requirements Addressed" section for naming inconsistencies +with the ADR's Key Interfaces and Decision sections (renamed signals, API methods, +or data types). If any are found, surface them as a **prominent warning block** +immediately before the write approval — not as a footnote: + +``` +⚠️ GDD SYNC REQUIRED +[gdd-filename].md uses names this ADR has renamed: + [old_name] → [new_name_from_adr] + [old_name_2] → [new_name_2_from_adr] +The GDD must be updated before or alongside writing this ADR to prevent +developers reading the GDD from implementing the wrong interface. +``` + +If no inconsistencies: skip this block silently. + +5. **Write approval** — Use `AskUserQuestion`: + +If GDD sync issues were found: +- "ADR draft is complete. How would you like to proceed?" + - [A] Write ADR + update GDD in the same pass + - [B] Write ADR only — I'll update the GDD manually + - [C] Not yet — I need to review further + +If no GDD sync issues: +- "ADR draft is complete. May I write it?" + - [A] Write ADR to `docs/architecture/adr-[NNNN]-[slug].md` + - [B] Not yet — I need to review further + +If yes to any write option, write the file, creating the directory if needed. +For option [A] with GDD update: also update the GDD file(s) to use the new names. + +6. **Update Architecture Registry** + +Scan the written ADR for new architectural stances that should be registered: +- State it claims ownership of +- Interface contracts it defines (signal signatures, method APIs) +- Performance budget it claims +- API choices it makes explicitly +- Patterns it bans (Consequences → Negative or explicit "do not use X") + +Present candidates: +``` +Registry candidates from this ADR: + NEW state ownership: player_stamina → stamina-system + NEW interface contract: stamina_depleted signal + NEW performance budget: stamina-system: 0.5ms/frame + NEW forbidden pattern: polling stamina each frame (use signal instead) + EXISTING (referenced_by update only): player_health → already registered ✅ +``` + +**Registry append logic**: When writing to `docs/registry/architecture.yaml`, do NOT assume sections are empty. The file may already have entries from previous ADRs written in this session. Before each Edit call: +1. Read the current state of `docs/registry/architecture.yaml` +2. Find the correct section (state_ownership, interfaces, forbidden_patterns, api_decisions) +3. Append the new entry AFTER the last existing entry in that section — do not try to replace a `[]` placeholder that may no longer exist +4. If the section has entries already, use the closing content of the last entry as the `old_string` anchor, and append the new entry after it + +**BLOCKING — do not write to `docs/registry/architecture.yaml` without explicit user approval.** + +Ask using `AskUserQuestion`: +- "May I update `docs/registry/architecture.yaml` with these [N] new stances?" + - Options: "Yes — update the registry", "Not yet — I want to review the candidates", "Skip registry update" + +Only proceed if the user selects yes. If yes: append new entries. Never modify existing entries — if a stance is +changing, set the old entry to `status: superseded_by: ADR-[NNNN]` and add the new entry. + +--- + +## 7. Closing Next Steps + +After the ADR is written (and registry optionally updated), close with `AskUserQuestion`. + +Before generating the widget: +1. Read `docs/registry/architecture.yaml` — check if any priority ADRs are still unwritten (look for ADRs flagged in technical-preferences.md or systems-index.md as prerequisites) +2. Check if all prerequisite ADRs are now written. If yes, include a "Start writing GDDs" option. +3. List ALL remaining priority ADRs as individual options — not just the next one or two. + +Widget format: +``` +ADR-[NNNN] written and registry updated. What would you like to do next? +[1] Write [next-priority-adr-name] — [brief description from prerequisites list] +[2] Write [another-priority-adr] — [brief description] (include ALL remaining ones) +[N] Start writing GDDs — run `/design-system [first-undesigned-system]` (only show if all prerequisite ADRs are written) +[N+1] Stop here for this session +``` + +If there are no remaining priority ADRs and no undesigned GDD systems, offer only "Stop here" and suggest running `/architecture-review` in a fresh session. + +**Always include this fixed notice in the closing output (do NOT omit it):** + +> To validate ADR coverage against your GDDs, open a **fresh Claude Code session** +> and run `/architecture-review`. +> +> **Never run `/architecture-review` in the same session as `/architecture-decision`.** +> The reviewing agent must be independent of the authoring context to give an unbiased +> assessment. Running it here would invalidate the review. + +Update any stories that were `Status: Blocked` pending this ADR to `Status: Ready`. diff --git a/.omc/skills/architecture-review/SKILL.md b/.omc/skills/architecture-review/SKILL.md new file mode 100644 index 0000000..381cacd --- /dev/null +++ b/.omc/skills/architecture-review/SKILL.md @@ -0,0 +1,639 @@ +--- +name: architecture-review +description: "Validates completeness and consistency of the project architecture against all GDDs. Builds a traceability matrix mapping every GDD technical requirement to ADRs, identifies coverage gaps, detects cross-ADR conflicts, verifies engine compatibility consistency across all decisions, and produces a PASS/CONCERNS/FAIL verdict. The architecture equivalent of /design-review." +argument-hint: "[focus: full | coverage | consistency | engine | single-gdd path/to/gdd.md]" +user-invocable: true +allowed-tools: Read, Glob, Grep, Write, Task, AskUserQuestion +agent: technical-director +model: opus +--- + +# Architecture Review + +The architecture review validates that the complete body of architectural decisions +covers all game design requirements, is internally consistent, and correctly targets +the project's pinned engine version. It is the quality gate between Technical Setup +and Pre-Production. + +**Argument modes:** +- **No argument / `full`**: Full review — all phases +- **`coverage`**: Traceability only — which GDD requirements have no ADR +- **`consistency`**: Cross-ADR conflict detection only +- **`engine`**: Engine compatibility audit only +- **`single-gdd [path]`**: Review architecture coverage for one specific GDD +- **`rtm`**: Requirements Traceability Matrix — extends the standard matrix + to include story file paths and test file paths; outputs + `docs/architecture/requirements-traceability.md` with the full + GDD requirement → ADR → Story → Test chain. Use in Production phase when + stories and tests exist. + +--- + +## Phase 1: Load Everything + +### Phase 1a — L0: Summary Scan (fast, low tokens) + +Before reading any full document, use Grep to extract `## Summary` sections +from all GDDs and ADRs: + +``` +Grep pattern="## Summary" glob="design/gdd/*.md" output_mode="content" -A 4 +Grep pattern="## Summary" glob="docs/architecture/adr-*.md" output_mode="content" -A 3 +``` + +For `single-gdd [path]` mode: use the target GDD's summary to identify which +ADRs reference the same system (Grep ADRs for the system name), then full-read +only those ADRs. Skip full-reading unrelated GDDs entirely. + +For `engine` mode: only full-read ADRs — GDDs are not needed for engine checks. + +For `coverage` or `full` mode: proceed to full-read everything below. + +### Phase 1b — L1/L2: Full Document Load + +Read all inputs appropriate to the mode: + +### Design Documents +- All in-scope GDDs in `design/gdd/` — read every file completely +- `design/gdd/systems-index.md` — the authoritative list of systems + +### Architecture Documents +- All in-scope ADRs in `docs/architecture/` — read every file completely +- `docs/architecture/architecture.md` if it exists + +### Engine Reference +- `docs/engine-reference/[engine]/VERSION.md` +- `docs/engine-reference/[engine]/breaking-changes.md` +- `docs/engine-reference/[engine]/deprecated-apis.md` +- All files in `docs/engine-reference/[engine]/modules/` + +### Project Standards +- `.claude/docs/technical-preferences.md` + +Report a count: "Loaded [N] GDDs, [M] ADRs, engine: [name + version]." + +**Also read `docs/consistency-failures.md`** if it exists. Extract entries with +Domain matching the systems under review (Architecture, Engine, or any GDD domain +being covered). Surface recurring patterns as a "Known conflict-prone areas" note +at the top of the Phase 4 conflict detection output. + +--- + +## Phase 2: Extract Technical Requirements from Every GDD + +### Pre-load the TR Registry + +Before extracting any requirements, read `docs/architecture/tr-registry.yaml` +if it exists. Index existing entries by `id` and by normalized `requirement` +text (lowercase, trimmed). This prevents ID renumbering across review runs. + +For each requirement you extract, the matching rule is: +1. **Exact/near match** to an existing registry entry for the same system → + reuse that entry's TR-ID unchanged. Update the `requirement` text in the + registry only if the GDD wording changed (same intent, clearer phrasing) — + add a `revised: [date]` field. +2. **No match** → assign a new ID: next available `TR-[system]-NNN` for that + system, starting from the highest existing sequence + 1. +3. **Ambiguous** (partial match, intent unclear) → ask the user: + > "Does '[new requirement text]' refer to the same requirement as + > `TR-[system]-NNN: [existing text]'`, or is it a new requirement?" + User answers: "Same requirement" (reuse ID) or "New requirement" (new ID). + +For any requirement with `status: deprecated` in the registry — skip it. +It was removed from the GDD intentionally. + +For each GDD, read it and extract all **technical requirements** — things the +architecture must provide for the system to work. A technical requirement is any +statement that implies a specific architectural decision. + +Categories to extract: + +| Category | Example | +|----------|---------| +| **Data structures** | "Each entity has health, max health, status effects" → needs a component/data schema | +| **Performance constraints** | "Collision detection must run at 60fps with 200 entities" → physics budget ADR | +| **Engine capability** | "Inverse kinematics for character animation" → IK system ADR | +| **Cross-system communication** | "Damage system notifies UI and audio simultaneously" → event/signal architecture ADR | +| **State persistence** | "Player progress persists between sessions" → save system ADR | +| **Threading/timing** | "AI decisions happen off the main thread" → concurrency ADR | +| **Platform requirements** | "Supports keyboard, gamepad, touch" → input system ADR | + +For each GDD, produce a structured list: + +``` +GDD: [filename] +System: [system name] +Technical Requirements: + TR-[GDD]-001: [requirement text] → Domain: [Physics/Rendering/etc] + TR-[GDD]-002: [requirement text] → Domain: [...] +``` + +This becomes the **requirements baseline** — the complete set of what the +architecture must cover. + +--- + +## Phase 3: Build the Traceability Matrix + +For each technical requirement extracted in Phase 2, search the ADRs: + +1. Read every ADR's "GDD Requirements Addressed" section +2. Check if it explicitly references the requirement or its GDD +3. Check if the ADR's decision text implicitly covers the requirement +4. Mark coverage status: + +| Status | Meaning | +|--------|---------| +| ✅ **Covered** | An ADR explicitly addresses this requirement | +| ⚠️ **Partial** | An ADR partially covers this, or coverage is ambiguous | +| ❌ **Gap** | No ADR addresses this requirement | + +Build the full matrix: + +``` +## Traceability Matrix + +| Requirement ID | GDD | System | Requirement | ADR Coverage | Status | +|---------------|-----|--------|-------------|--------------|--------| +| TR-combat-001 | combat.md | Combat | Hitbox detection < 1 frame | ADR-0003 | ✅ | +| TR-combat-002 | combat.md | Combat | Combo window timing | — | ❌ GAP | +| TR-inventory-001 | inventory.md | Inventory | Persistent item storage | ADR-0005 | ✅ | +``` + +Count the totals: X covered, Y partial, Z gaps. + +--- + +## Phase 3b: Story and Test Linkage (RTM mode only) + +*Skip this phase unless the argument is `rtm` or `full` with stories present.* + +This phase extends the Phase 3 matrix to include the story that implements +each requirement and the test that verifies it — producing the full +Requirements Traceability Matrix (RTM). + +### Step 3b-1 — Load stories + +Glob `production/epics/**/*.md` (excluding EPIC.md index files). For each +story file: +- Extract `TR-ID` from the story's Context section +- Extract story file path, title, Status +- Extract `## Test Evidence` section — the stated test file path + +### Step 3b-2 — Load test files + +Glob `tests/unit/**/*_test.*` and `tests/integration/**/*_test.*`. +Build an index: system → [test file paths]. + +For each test file path from Step 3b-1, confirm via Glob whether the file +actually exists. Note MISSING if the stated path does not exist. + +### Step 3b-3 — Build the extended RTM + +For each TR-ID in the Phase 3 matrix, add: +- **Story**: the story file path(s) that reference this TR-ID (may be multiple) +- **Test File**: the test file path stated in the story's Test Evidence section +- **Test Status**: COVERED (test file exists) / MISSING (path stated but not + found) / NONE (no test path stated, story type may be Visual/Feel/UI) / + NO STORY (requirement has no story yet — pre-production gap) + +Extended matrix format: + +``` +## Requirements Traceability Matrix (RTM) + +| TR-ID | GDD | Requirement | ADR | Story | Test File | Test Status | +|-------|-----|-------------|-----|-------|-----------|-------------| +| TR-combat-001 | combat.md | Hitbox < 1 frame | ADR-0003 | story-001-hitbox.md | tests/unit/combat/hitbox_test.gd | COVERED | +| TR-combat-002 | combat.md | Combo window | — | story-002-combo.md | — | NONE (Visual/Feel) | +| TR-inventory-001 | inventory.md | Persistent storage | ADR-0005 | — | — | NO STORY | +``` + +RTM coverage summary: +- COVERED: [N] — requirements with ADR + story + passing test +- MISSING test: [N] — story exists but test file not found +- NO STORY: [N] — requirements with ADR but no story yet +- NO ADR: [N] — requirements without architectural coverage (from Phase 3 gaps) +- Full chain complete (COVERED): [N/total] ([%]) + +--- + +## Phase 4: Cross-ADR Conflict Detection + +Compare every ADR against every other ADR to detect contradictions. A conflict +exists when: + +- **Data ownership conflict**: Two ADRs claim exclusive ownership of the same data +- **Integration contract conflict**: ADR-A assumes System X has interface Y, but + ADR-B defines System X with a different interface +- **Performance budget conflict**: ADR-A allocates N ms to physics, ADR-B allocates + N ms to AI, together they exceed the total frame budget +- **Dependency cycle**: ADR-A says System X initialises before Y; ADR-B says Y + initialises before X +- **Architecture pattern conflict**: ADR-A uses event-driven communication for a + subsystem; ADR-B uses direct function calls to the same subsystem +- **State management conflict**: Two ADRs define authority over the same game state + (e.g. both Combat ADR and Character ADR claim to own the health value) + +For each conflict found: + +``` +## Conflict: [ADR-NNNN] vs [ADR-MMMM] +Type: [Data ownership / Integration / Performance / Dependency / Pattern / State] +ADR-NNNN claims: [...] +ADR-MMMM claims: [...] +Impact: [What breaks if both are implemented as written] +Resolution options: + 1. [Option A] + 2. [Option B] +``` + +### ADR Dependency Ordering + +After conflict detection, analyse the dependency graph across all ADRs: + +1. **Collect all `Depends On` fields** from every ADR's "ADR Dependencies" section +2. **Topological sort**: Determine the correct implementation order — ADRs with no + dependencies come first (Foundation), ADRs that depend on those come next, etc. +3. **Flag unresolved dependencies**: If ADR-A's "Depends On" field references an ADR + that is still `Proposed` or does not exist, flag it: + ``` + ⚠️ ADR-0005 depends on ADR-0002 — but ADR-0002 is still Proposed. + ADR-0005 cannot be safely implemented until ADR-0002 is Accepted. + ``` +4. **Cycle detection**: If ADR-A depends on ADR-B and ADR-B depends on ADR-A (directly + or transitively), flag it as a `DEPENDENCY CYCLE`: + ``` + 🔴 DEPENDENCY CYCLE: ADR-0003 → ADR-0006 → ADR-0003 + This cycle must be broken before either can be implemented. + ``` +5. **Output recommended implementation order**: + ``` + ### Recommended ADR Implementation Order (topologically sorted) + Foundation (no dependencies): + 1. ADR-0001: [title] + 2. ADR-0003: [title] + Depends on Foundation: + 3. ADR-0002: [title] (requires ADR-0001) + 4. ADR-0005: [title] (requires ADR-0003) + Feature layer: + 5. ADR-0004: [title] (requires ADR-0002, ADR-0005) + ``` + +--- + +## Phase 5: Engine Compatibility Cross-Check + +Across all ADRs, check for engine consistency: + +### Version Consistency +- Do all ADRs that mention an engine version agree on the same version? +- If any ADR was written for an older engine version, flag it as potentially stale + +### Post-Cutoff API Consistency +- Collect all "Post-Cutoff APIs Used" fields from all ADRs +- For each, verify against the relevant module reference doc +- Check that no two ADRs make contradictory assumptions about the same post-cutoff API + +### Deprecated API Check +- Grep all ADRs for API names listed in `deprecated-apis.md` +- Flag any ADR referencing a deprecated API + +### Missing Engine Compatibility Sections +- List all ADRs that are missing the Engine Compatibility section entirely +- These are blind spots — their engine assumptions are unknown + +Output format: +``` +### Engine Audit Results +Engine: [name + version] +ADRs with Engine Compatibility section: X / Y total + +Deprecated API References: + - ADR-0002: uses [deprecated API] — deprecated since [version] + +Stale Version References: + - ADR-0001: written for [older version] — current project version is [version] + +Post-Cutoff API Conflicts: + - ADR-0004 and ADR-0007 both use [API] with incompatible assumptions +``` + +--- + +### Engine Specialist Consultation + +After completing the engine audit above, spawn the **primary engine specialist** via Task for a domain-expert second opinion: +- Read `.claude/docs/technical-preferences.md` `Engine Specialists` section to get the primary specialist +- If no engine is configured, skip this consultation +- Spawn `subagent_type: [primary specialist]` with: all ADRs that contain engine-specific decisions or `Post-Cutoff APIs Used` fields, the engine reference docs, and the Phase 5 audit findings. Ask them to: + 1. Confirm or challenge each audit finding — specialists may know of engine nuances not captured in the reference docs + 2. Identify engine-specific anti-patterns in the ADRs that the audit may have missed (e.g., using the wrong Godot node type, Unity component coupling, Unreal subsystem misuse) + 3. Flag ADRs that make assumptions about engine behaviour that differ from the actual pinned version + +Incorporate additional findings under `### Engine Specialist Findings` in the Phase 5 output. These feed into the final verdict — specialist-identified issues carry the same weight as audit-identified issues. + +--- + +## Phase 5b: Design Revision Flags (Architecture → GDD Feedback) + +For each **HIGH RISK engine finding** from Phase 5, check whether any GDD makes an +assumption that the verified engine reality contradicts. + +Specific cases to check: + +1. **Post-cutoff API behaviour differs from training-data assumptions**: If an ADR + records a verified API behaviour that differs from the default LLM assumption, + check all GDDs that reference the related system. Look for design rules written + around the old (assumed) behaviour. + +2. **Known engine limitations in ADRs**: If an ADR records a known engine limitation + (e.g. "Jolt ignores HingeJoint3D damp", "D3D12 is now the default backend"), check + GDDs that design mechanics around the affected feature. + +3. **Deprecated API conflicts**: If Phase 5 flagged a deprecated API used in an ADR, + check whether any GDD contains mechanics that assume the deprecated API's behaviour. + +For each conflict found, record it in the GDD Revision Flags table: + +``` +### GDD Revision Flags (Architecture → Design Feedback) +These GDD assumptions conflict with verified engine behaviour or accepted ADRs. +The GDD should be revised before its system enters implementation. + +| GDD | Assumption | Reality (from ADR/engine-reference) | Action | +|-----|-----------|--------------------------------------|--------| +| combat.md | "Use HingeJoint3D damp for weapon recoil" | Jolt ignores damp — ADR-0003 | Revise GDD | +``` + +If no revision flags are found, write: "No GDD revision flags — all GDD assumptions +are consistent with verified engine behaviour." + +Ask: "Should I flag these GDDs for revision in the systems index?" +- If yes: update the relevant systems' Status field to "Needs Revision" + and add a short inline note in the adjacent Notes/Description column explaining the conflict. + Ask for approval before writing. + (Do NOT use parentheticals like "Needs Revision (Architecture Feedback)" — other skills + match the exact string "Needs Revision" and parentheticals break that match.) + +--- + +## Phase 6: Architecture Document Coverage + +If `docs/architecture/architecture.md` exists, validate it against GDDs: + +- Does every system from `systems-index.md` appear in the architecture layers? +- Does the data flow section cover all cross-system communication defined in GDDs? +- Do the API boundaries support all integration requirements from GDDs? +- Are there systems in the architecture doc that have no corresponding GDD + (orphaned architecture)? + +--- + +## Phase 7: Output the Review Report + +``` +## Architecture Review Report +Date: [date] +Engine: [name + version] +GDDs Reviewed: [N] +ADRs Reviewed: [M] + +--- + +### Traceability Summary +Total requirements: [N] +✅ Covered: [X] +⚠️ Partial: [Y] +❌ Gaps: [Z] + +### Coverage Gaps (no ADR exists) +For each gap: + ❌ TR-[id]: [GDD] → [system] → [requirement] + Suggested ADR: "/architecture-decision [suggested title]" + Domain: [Physics/Rendering/etc] + Engine Risk: [LOW/MEDIUM/HIGH] + +### Cross-ADR Conflicts +[List all conflicts from Phase 4] + +### ADR Dependency Order +[Topologically sorted implementation order from Phase 4 — dependency ordering section] +[Unresolved dependencies and cycles if any] + +### GDD Revision Flags +[GDD assumptions that conflict with verified engine behaviour — from Phase 5b] +[Or: "None — all GDD assumptions consistent with verified engine behaviour"] + +### Engine Compatibility Issues +[List all engine issues from Phase 5] + +### Architecture Document Coverage +[List missing systems and orphaned architecture from Phase 6] + +--- + +### Verdict: [PASS / CONCERNS / FAIL] + +PASS: All requirements covered, no conflicts, engine consistent +CONCERNS: Some gaps or partial coverage, but no blocking conflicts +FAIL: Critical gaps (Foundation/Core layer requirements uncovered), + or blocking cross-ADR conflicts detected + +### Blocking Issues (must resolve before PASS) +[List items that must be resolved — FAIL verdict only] + +### Required ADRs +[Prioritised list of ADRs to create, most foundational first] +``` + +--- + +## Phase 8: Write and Update Traceability Index + +Use `AskUserQuestion` for the write approval: +- "Review complete. What would you like to write?" + - [A] Write all three files (review report + traceability index + TR registry) + - [B] Write review report only — `docs/architecture/architecture-review-[date].md` + - [C] Don't write anything yet — I need to review the findings first + +### RTM Output (rtm mode only) + +For `rtm` mode, additionally ask: "May I write the full Requirements Traceability +Matrix to `docs/architecture/requirements-traceability.md`?" + +RTM file format: + +```markdown +# Requirements Traceability Matrix (RTM) + +> Last Updated: [date] +> Mode: /architecture-review rtm +> Coverage: [N]% full chain complete (GDD → ADR → Story → Test) + +## How to read this matrix + +| Column | Meaning | +|--------|---------| +| TR-ID | Stable requirement ID from tr-registry.yaml | +| GDD | Source design document | +| ADR | Architectural decision governing implementation | +| Story | Story file that implements this requirement | +| Test File | Automated test file path | +| Test Status | COVERED / MISSING / NONE / NO STORY | + +## Full Traceability Matrix + +| TR-ID | GDD | Requirement | ADR | Story | Test File | Status | +|-------|-----|-------------|-----|-------|-----------|--------| +[Full matrix rows from Phase 3b] + +## Coverage Summary + +| Status | Count | % | +|--------|-------|---| +| COVERED — full chain complete | [N] | [%] | +| MISSING test — story exists, no test | [N] | [%] | +| NO STORY — ADR exists, not yet implemented | [N] | [%] | +| NO ADR — architectural gap | [N] | [%] | +| **Total requirements** | **[N]** | **100%** | + +## Uncovered Requirements (Priority Fix List) + +Requirements where the full chain is broken, prioritised by layer: + +### Foundation layer gaps +[list with suggested action per gap] + +### Core layer gaps +[list] + +### Feature / Presentation layer gaps +[list — lower priority] + +## History + +| Date | Full Chain % | Notes | +|------|-------------|-------| +| [date] | [%] | Initial RTM | +``` + +### TR Registry Update + +Also ask: "May I update `docs/architecture/tr-registry.yaml` with new requirement +IDs from this review?" + +If yes: +- **Append** any new TR-IDs that weren't in the registry before this review +- **Update** `requirement` text and `revised` date for any entries whose GDD + wording changed (ID stays the same) +- **Mark** `status: deprecated` for any registry entries whose GDD requirement + no longer exists (confirm with user before marking deprecated) +- **Never** renumber or delete existing entries +- Update the `last_updated` and `version` fields at the top + +This ensures all future story files can reference stable TR-IDs that persist +across every subsequent architecture review. + +### Reflexion Log Update + +After writing the review report, append any 🔴 CONFLICT entries found in Phase 4 +to `docs/consistency-failures.md` (if the file exists): + +```markdown +### [YYYY-MM-DD] — /architecture-review — 🔴 CONFLICT +**Domain**: Architecture / [specific domain e.g. State Ownership, Performance] +**Documents involved**: [ADR-NNNN] vs [ADR-MMMM] +**What happened**: [specific conflict — what each ADR claims] +**Resolution**: [how it was or should be resolved] +**Pattern**: [generalised lesson for future ADR authors in this domain] +``` + +Only append CONFLICT entries — do not log GAP entries (missing ADRs are expected +before the architecture is complete). Do not create the file if missing — only +append when it already exists. + +### Session State Update + +After writing all approved files, silently append to +`production/session-state/active.md`: + + ## Session Extract — /architecture-review [date] + - Verdict: [PASS / CONCERNS / FAIL] + - Requirements: [N] total — [X] covered, [Y] partial, [Z] gaps + - New TR-IDs registered: [N, or "None"] + - GDD revision flags: [comma-separated GDD names, or "None"] + - Top ADR gaps: [top 3 gap titles from the report, or "None"] + - Report: docs/architecture/architecture-review-[date].md + +If `active.md` does not exist, create it with this block as the initial content. +Confirm in conversation: "Session state updated." + +The traceability index format: + +```markdown +# Architecture Traceability Index +Last Updated: [date] +Engine: [name + version] + +## Coverage Summary +- Total requirements: [N] +- Covered: [X] ([%]) +- Partial: [Y] +- Gaps: [Z] + +## Full Matrix +[Complete traceability matrix from Phase 3] + +## Known Gaps +[All ❌ items with suggested ADRs] + +## Superseded Requirements +[Requirements whose GDD was changed after the ADR was written] +``` + +--- + +## Phase 9: Handoff + +After completing the review and writing approved files, present: + +1. **Immediate actions**: List the top 3 ADRs to create (highest-impact gaps first, + Foundation layer before Feature layer) +2. **Gate guidance**: "When all blocking issues are resolved, run `/gate-check + pre-production` to advance" +3. **Rerun trigger**: "Re-run `/architecture-review` after each new ADR is written + to verify coverage improves" + +Then close with `AskUserQuestion`: +- "Architecture review complete. What would you like to do next?" + - [A] Write a missing ADR — open a fresh session and run `/architecture-decision [system]` + - [B] Run `/gate-check pre-production` — if all blocking gaps are resolved + - [C] Stop here for this session + +--- + +## Error Recovery Protocol + +If any spawned agent returns BLOCKED, errors, or fails to complete: + +1. **Surface immediately**: Report "[AgentName]: BLOCKED — [reason]" before continuing +2. **Assess dependencies**: If the blocked agent's output is required by a later phase, do not proceed past that phase without user input +3. **Offer options** via AskUserQuestion with three choices: + - Skip this agent and note the gap in the final report + - Retry with narrower scope (fewer GDDs, single-system focus) + - Stop here and resolve the blocker first +4. **Always produce a partial report** — output whatever was completed so work is not lost + +--- + +## Collaborative Protocol + +1. **Read silently** — do not narrate every file read +2. **Show the matrix** — present the full traceability matrix before asking for + anything; let the user see the state +3. **Don't guess** — if a requirement is ambiguous, ask: "Is [X] a technical + requirement or a design preference?" +4. **Ask before writing** — always confirm before writing the report file +5. **Non-blocking** — the verdict is advisory; the user decides whether to continue + despite CONCERNS or even FAIL findings diff --git a/.omc/skills/art-bible/SKILL.md b/.omc/skills/art-bible/SKILL.md new file mode 100644 index 0000000..8a3c740 --- /dev/null +++ b/.omc/skills/art-bible/SKILL.md @@ -0,0 +1,248 @@ +--- +name: art-bible +description: "Guided, section-by-section Art Bible authoring. Creates the visual identity specification that gates all asset production. Run after /brainstorm is approved and before /map-systems or any GDD authoring begins." +argument-hint: "[--review full|lean|solo]" +user-invocable: true +allowed-tools: Read, Glob, Grep, Write, Edit, Task, AskUserQuestion +--- + +## Phase 0: Parse Arguments and Context Check + +Resolve the review mode (once, store for all gate spawns this run): +1. If `--review [full|lean|solo]` was passed → use that +2. Else read `production/review-mode.txt` → use that value +3. Else → default to `lean` + +See `.claude/docs/director-gates.md` for the full check pattern. + +Read `design/gdd/game-concept.md`. If it does not exist, fail with: +> "No game concept found. Run `/brainstorm` first — the art bible is authored after the game concept is approved." + +Extract from game-concept.md: +- Game title (working title) +- Core fantasy and elevator pitch +- Game pillars (all of them) +- **Visual Identity Anchor** section if present (from brainstorm Phase 4 art-director output) +- Target platform (if noted) + +**Retrofit mode detection**: Glob `design/art/art-bible.md`. If the file exists: +- Read it in full +- For each of the 9 sections, check whether the body contains real content (more than a `[To be designed]` placeholder or similar) vs. is empty/placeholder +- Build a section status table: + +``` +Section | Status +--------|-------- +1. Visual Identity Statement | [Complete / Empty / Placeholder] +2. Color Palette | ... +3. Lighting & Atmosphere | ... +4. Character Art Direction | ... +5. Environment & Level Art | ... +6. UI Visual Language | ... +7. VFX & Particle Style | ... +8. Asset Standards | ... +9. Style Prohibitions | ... +``` + +- Present this table to the user: + > "Found existing art bible at `design/art/art-bible.md`. [N] sections are complete, [M] need content. I'll work on the incomplete sections only — existing content will not be touched." +- Only work on sections with Status: Empty or Placeholder. Do not re-author sections that are already complete. + +If the file does not exist, this is a fresh authoring session — proceed normally. + +Read `.claude/docs/technical-preferences.md` if it exists — extract performance budgets and engine for asset standard constraints. + +--- + +## Phase 1: Framing + +Present the session context and ask two questions before authoring anything: + +Use `AskUserQuestion` with two tabs: +- Tab **"Scope"** — "Which sections need to be authored today?" + Options: `Full bible — all 9 sections` / `Visual identity core (sections 1–4 only)` / `Asset standards only (section 8)` / `Resume — fill in missing sections` +- Tab **"References"** — "Do you have reference games, films, or art that define the visual direction?" + (Free text — let the user type specific titles. Do NOT preset options here.) + +If the game-concept.md has a Visual Identity Anchor section, note it: +> "Found a visual identity anchor from brainstorm: '[anchor name] — [one-line rule]'. I'll use this as the foundation for the art bible." + +--- + +## Phase 2: Visual Identity Foundation (Sections 1–4) + +These four sections define the core visual language. **All other sections flow from them.** Author and write each to file before moving to the next. + +### Section 1: Visual Identity Statement + +**Goal**: A one-line visual rule plus 2–3 supporting principles that resolve visual ambiguity. + +If a visual anchor exists from game-concept.md: present it and ask: +- "Build directly from this anchor?" +- "Revise it before expanding?" +- "Start fresh with new options?" + +**Agent delegation (MANDATORY)**: Spawn `art-director` via Task: +- Provide: game concept (elevator pitch, core fantasy), full pillar set, platform target, any reference games/art from Phase 1 framing, the visual anchor if it exists +- Ask: "Draft a Visual Identity Statement for this game. Provide: (1) a one-line visual rule that could resolve any visual decision ambiguity, (2) 2–3 supporting visual principles, each with a one-sentence design test ('when X is ambiguous, this principle says choose Y'). Anchor all principles directly in the stated pillars — each principle must serve a specific pillar." + +Present the art-director's draft to the user. Use `AskUserQuestion`: +- Options: `[A] Lock this in` / `[B] Revise the one-liner` / `[C] Revise a supporting principle` / `[D] Describe my own direction` + +Write the approved section to file immediately. + +### Section 2: Mood & Atmosphere + +**Goal**: Emotional targets by game state — specific enough for a lighting artist to work from. + +For each major game state (e.g., exploration, combat, victory, defeat, menus — adapt to this game's states), define: +- Primary emotion/mood target +- Lighting character (time of day, color temperature, contrast level) +- Atmospheric descriptors (3–5 adjectives) +- Energy level (frenetic / measured / contemplative / etc.) + +**Agent delegation**: Spawn `art-director` via Task with the Visual Identity Statement and pillar set. Ask: "Define mood and atmosphere targets for each major game state in this game. Be specific — 'dark and foreboding' is not enough. Name the exact emotional target, the lighting character (warm/cool, high/low contrast, time of day direction), and at least one visual element that carries the mood. Each game state must feel visually distinct from the others." + +Write the approved section to file immediately. + +### Section 3: Shape Language + +**Goal**: The geometric vocabulary that makes this game's world visually coherent and distinguishable. + +Cover: +- Character silhouette philosophy (how readable at thumbnail size? Distinguishing trait per archetype?) +- Environment geometry (angular/curved/organic/geometric — which dominates and why?) +- UI shape grammar (does UI echo the world aesthetic, or is it a distinct HUD language?) +- Hero shapes vs. supporting shapes (what draws the eye, what recedes?) + +**Agent delegation**: Spawn `art-director` via Task with Visual Identity Statement and mood targets. Ask: "Define the shape language for this game. Connect each shape principle back to the visual identity statement and a specific game pillar. Explain what these shape choices communicate to the player emotionally." + +Write the approved section to file immediately. + +### Section 4: Color System + +**Goal**: A complete, producible palette system that serves both aesthetic and communication needs. + +Cover: +- Primary palette (5–7 colors with roles — not just hex codes, but what each color means in this world) +- Semantic color usage (what does red communicate? Gold? Blue? White? Establish the color vocabulary) +- Per-biome or per-area color temperature rules (if the game has distinct areas) +- UI palette (may differ from world palette — define the divergence explicitly) +- Colorblind safety: which semantic colors need shape/icon/sound backup + +**Agent delegation**: Spawn `art-director` via Task with Visual Identity Statement and mood targets. Ask: "Design the color system for this game. Every semantic color assignment must be explained — why does this color mean danger/safety/reward in this world? Identify which color pairs might fail colorblind players and specify what backup cues are needed." + +Write the approved section to file immediately. + +--- + +## Phase 3: Production Guides (Sections 5–8) + +These sections translate the visual identity into concrete production rules. They should be specific enough that an outsourcing team can follow them without additional briefing. + +### Section 5: Character Design Direction + +**Agent delegation**: Spawn `art-director` via Task with sections 1–4. Ask: "Define character design direction for this game. Cover: visual archetype for the player character (if any), distinguishing feature rules per character type (how do players tell enemies/NPCs/allies apart at a glance?), expression/pose style targets (stiff/expressive/realistic/exaggerated), and LOD philosophy (how much detail is preserved at game camera distance?)." + +Write the approved section to file. + +### Section 6: Environment Design Language + +**Agent delegation**: Spawn `art-director` via Task with sections 1–4. Ask: "Define the environment design language for this game. Cover: architectural style and its relationship to the world's culture/history, texture philosophy (painted vs. PBR vs. stylized — why this choice for this game?), prop density rules (sparse/dense — what drives the choice per area type?), and environmental storytelling guidelines (what visual details should tell the story without text?)." + +Write the approved section to file. + +### Section 7: UI/HUD Visual Direction + +**Agent delegation**: Spawn in parallel: +- **`art-director`**: Visual style for UI — diegetic vs. screen-space HUD, typography direction (font personality, weight, size hierarchy), iconography style (flat/outlined/illustrated/photorealistic), animation feel for UI elements +- **`ux-designer`**: UX alignment check — does the visual direction support the interaction patterns this game requires? Flag any conflicts between art direction and readability/accessibility needs. + +Collect both. If they conflict (e.g., art-director wants elaborate diegetic UI but ux-designer flags it would reduce combat readability), surface the conflict explicitly with both positions. Do NOT silently resolve — use `AskUserQuestion` to let the user decide. + +Write the approved section to file. + +### Section 8: Asset Standards + +**Agent delegation**: Spawn in parallel: +- **`art-director`**: File format preferences, naming convention direction, texture resolution tiers, LOD level expectations, export settings philosophy +- **`technical-artist`**: Engine-specific hard constraints — poly count budgets per asset category, texture memory limits, material slot counts, importer constraints, anything from the performance budgets in `.claude/docs/technical-preferences.md` + +If any art preference conflicts with a technical constraint (e.g., art-director wants 4K textures but performance budget requires 2K for mobile), resolve the conflict explicitly — note both the ideal and the constrained standard, and explain the tradeoff. Ambiguity in asset standards is where production costs are born. + +Write the approved section to file. + +--- + +## Phase 4: Reference Direction (Section 9) + +**Goal**: A curated reference set that is specific about what to take and what to avoid from each source. + +**Agent delegation**: Spawn `art-director` via Task with the completed sections 1–8. Ask: "Compile a reference direction for this game. Provide 3–5 reference sources (games, films, art styles, or specific artists). For each: name it, specify exactly what visual element to draw from it (not 'the general aesthetic' — a specific technique, color choice, or compositional rule), and specify what to explicitly avoid or diverge from (to prevent the 'trying to copy X' reading). References should be additive — no two references should be pointing in exactly the same direction." + +Write the approved section to file. + +--- + +## Phase 5: Art Director Sign-Off + +**Review mode check** — apply before spawning AD-ART-BIBLE: +- `solo` → skip. Note: "AD-ART-BIBLE skipped — Solo mode." Proceed to Phase 6. +- `lean` → skip (not a PHASE-GATE). Note: "AD-ART-BIBLE skipped — Lean mode." Proceed to Phase 6. +- `full` → spawn as normal. + +After all sections are complete (or the scoped set from Phase 1 is complete), spawn `creative-director` via Task using gate **AD-ART-BIBLE** (`.claude/docs/director-gates.md`). + +Pass: art bible file path, game pillars, visual identity anchor. + +Handle verdict per standard rules in `director-gates.md`. Record the verdict in the art bible's status header: +`> **Art Director Sign-Off (AD-ART-BIBLE)**: APPROVED [date] / CONCERNS (accepted) [date] / REVISED [date]` + +--- + +## Phase 6: Close + +Before presenting next steps, check project state: +- Does `design/gdd/systems-index.md` exist? → map-systems is done, skip that option +- Does `.claude/docs/technical-preferences.md` contain a configured engine (not `[TO BE CONFIGURED]`)? → setup-engine is done, skip that option +- Does `design/gdd/` contain any `*.md` files? → design-system has been run, skip that option +- Does `design/gdd/gdd-cross-review-*.md` exist? → review-all-gdds is done +- Do GDDs exist (check above)? → include /consistency-check option + +Use `AskUserQuestion` for next steps. Only include options that are genuinely next based on the state check above: + +**Option pool — include only if not already done:** +- `[_] Run /map-systems — decompose the concept into systems before writing GDDs` (skip if systems-index.md exists) +- `[_] Run /setup-engine — configure the engine (asset standards may need revisiting after engine is set)` (skip if engine configured) +- `[_] Run /design-system — start the first GDD` (skip if any GDDs exist) +- `[_] Run /review-all-gdds — cross-GDD consistency check (required before Technical Setup gate)` (skip if gdd-cross-review-*.md exists) +- `[_] Run /asset-spec — generate per-asset visual specs and AI generation prompts from approved GDDs` (include if GDDs exist) +- `[_] Run /consistency-check — scan existing GDDs against the art bible for visual direction conflicts` (include if GDDs exist) +- `[_] Run /create-architecture — author the master architecture document (next Technical Setup step)` +- `[_] Stop here` + +Assign letters A, B, C… only to the options actually included. Mark the most logical pipeline-advancing option as `(recommended)`. + +> **Always include** `/create-architecture` and Stop here as options — these are always valid next steps once the art bible is complete. + +--- + +## Collaborative Protocol + +Every section follows: **Question → Options → Decision → Draft (from art-director agent) → Approval → Write to file** + +- Never draft a section without first spawning the relevant agent(s) +- Write each section to file immediately after approval — do not batch +- Surface all agent disagreements to the user — never silently resolve conflicts between art-director and technical-artist +- The art bible is a constraint document: it restricts future decisions in exchange for visual coherence. Every section should feel like it narrows the solution space productively. + +--- + +## Recommended Next Steps + +After the art bible is approved: +- Run `/map-systems` to decompose the concept into game systems before authoring GDDs +- Run `/setup-engine` if the engine is not yet configured (asset standards may need revisiting after engine selection) +- Run `/design-system [first-system]` to start authoring per-system GDDs +- Run `/consistency-check` once GDDs exist to validate them against the art bible's visual rules +- Run `/create-architecture` to produce the master architecture document diff --git a/.omc/skills/asset-audit/SKILL.md b/.omc/skills/asset-audit/SKILL.md new file mode 100644 index 0000000..3edfb4c --- /dev/null +++ b/.omc/skills/asset-audit/SKILL.md @@ -0,0 +1,94 @@ +--- +name: asset-audit +description: "Audits game assets for compliance with naming conventions, file size budgets, format standards, and pipeline requirements. Identifies orphaned assets, missing references, and standard violations." +argument-hint: "[category|all]" +user-invocable: true +allowed-tools: Read, Glob, Grep +# Read-only diagnostic skill — no specialist agent delegation needed +--- + +## Phase 1: Read Standards + +Read the art bible or asset standards from the relevant design docs and the CLAUDE.md naming conventions. + +--- + +## Phase 2: Scan Asset Directories + +Scan the target asset directory using Glob: + +- `assets/art/**/*` for art assets +- `assets/audio/**/*` for audio assets +- `assets/vfx/**/*` for VFX assets +- `assets/shaders/**/*` for shaders +- `assets/data/**/*` for data files + +--- + +## Phase 3: Run Compliance Checks + +**Naming conventions:** +- Art: `[category]_[name]_[variant]_[size].[ext]` +- Audio: `[category]_[context]_[name]_[variant].[ext]` +- All files must be lowercase with underscores + +**File standards:** +- Textures: Power-of-two dimensions, correct format (PNG for UI, compressed for 3D), within size budget +- Audio: Correct sample rate, format (OGG for SFX, OGG/MP3 for music), within duration limits +- Data: Valid JSON/YAML, schema-compliant + +**Orphaned assets:** Search code for references to each asset file. Flag any with no references. + +**Missing assets:** Search code for asset references and verify the files exist. + +--- + +## Phase 4: Output Audit Report + +```markdown +# Asset Audit Report -- [Category] -- [Date] + +## Summary +- **Total assets scanned**: [N] +- **Naming violations**: [N] +- **Size violations**: [N] +- **Format violations**: [N] +- **Orphaned assets**: [N] +- **Missing assets**: [N] +- **Overall health**: [CLEAN / MINOR ISSUES / NEEDS ATTENTION] + +## Naming Violations +| File | Expected Pattern | Issue | +|------|-----------------|-------| + +## Size Violations +| File | Budget | Actual | Overage | +|------|--------|--------|---------| + +## Format Violations +| File | Expected Format | Actual Format | +|------|----------------|---------------| + +## Orphaned Assets (no code references found) +| File | Last Modified | Size | Recommendation | +|------|-------------|------|---------------| + +## Missing Assets (referenced but not found) +| Reference Location | Expected Path | +|-------------------|---------------| + +## Recommendations +[Prioritized list of fixes] + +## Verdict: [COMPLIANT / WARNINGS / NON-COMPLIANT] +``` + +This skill is read-only — it produces a report but does not write files. + +--- + +## Phase 5: Next Steps + +- Fix naming violations using the patterns defined in CLAUDE.md. +- Delete confirmed orphaned assets after manual review. +- Run `/content-audit` to cross-check asset counts against GDD-specified requirements. diff --git a/.omc/skills/asset-spec/SKILL.md b/.omc/skills/asset-spec/SKILL.md new file mode 100644 index 0000000..337fc4b --- /dev/null +++ b/.omc/skills/asset-spec/SKILL.md @@ -0,0 +1,264 @@ +--- +name: asset-spec +description: "Generate per-asset visual specifications and AI generation prompts from GDDs, level docs, or character profiles. Produces structured spec files and updates the master asset manifest. Run after art bible and GDD/level design are approved, before production begins." +argument-hint: "[system: | level: | character:] [--review full|lean|solo]" +user-invocable: true +allowed-tools: Read, Glob, Grep, Write, Edit, Task, AskUserQuestion +--- + +If no argument is provided, check whether `design/assets/asset-manifest.md` exists: +- If it exists: read it, find the first context (system/level/character) with any asset at status "Needed" but no spec file written yet, and use `AskUserQuestion`: + - Prompt: "The next unspecced context is **[target]**. Generate asset specs for it?" + - Options: `[A] Yes — spec [target]` / `[B] Pick a different target` / `[C] Stop here` +- If no manifest: fail with: + > "Usage: `/asset-spec system:` — e.g., `/asset-spec system:tower-defense` + > Or: `/asset-spec level:iron-gate-fortress` / `/asset-spec character:frost-warden` + > Run after your art bible and GDDs are approved." + +--- + +## Phase 0: Parse Arguments + +Extract: +- **Target type**: `system`, `level`, or `character` +- **Target name**: the name after the colon (normalize to kebab-case) +- **Review mode**: `--review [full|lean|solo]` if present + +**Mode behavior:** +- `full` (default): spawn both `art-director` and `technical-artist` in parallel +- `lean`: spawn `art-director` only — faster, skips technical constraint pass +- `solo`: no agent spawning — main session writes specs from art bible rules alone. Use for simple asset categories or when speed matters more than depth. + +--- + +## Phase 1: Gather Context + +Read all source material **before** asking the user anything. + +### Required reads: +- **Art bible**: Read `design/art/art-bible.md` — fail if missing: + > "No art bible found. Run `/art-bible` first — asset specs are anchored to the art bible's visual rules and asset standards." + Extract: Visual Identity Statement, Color System (semantic colors), Shape Language, Asset Standards (Section 8 — dimensions, formats, polycount budgets, texture resolution tiers). + +- **Technical preferences**: Read `.claude/docs/technical-preferences.md` — extract performance budgets and naming conventions. + +### Source doc reads (by target type): +- **system**: Read `design/gdd/[target-name].md`. Extract the **Visual/Audio Requirements** section. If it doesn't exist or reads `[To be designed]`: + > "The Visual/Audio section of `design/gdd/[target-name].md` is empty. Either run `/design-system [target-name]` to complete the GDD, or describe the visual needs manually." + Use `AskUserQuestion`: `[A] Describe needs manually` / `[B] Stop — complete the GDD first` +- **level**: Read `design/levels/[target-name].md`. Extract art requirements, asset list, VFX needs, and the art-director's production concept specs from Step 4. +- **character**: Read `design/narrative/characters/[target-name].md` or search `design/narrative/` for the character profile. Extract visual description, role, and any specified distinguishing features. + +### Optional reads: +- **Existing manifest**: Read `design/assets/asset-manifest.md` if it exists — extract already-specced assets for this target to avoid duplicates. +- **Related specs**: Glob `design/assets/specs/*.md` — scan for assets that could be shared (e.g., a common UI element specced for one system might apply here too). + +### Present context summary: +> **Asset Spec: [Target Type] — [Target Name]** +> - Source doc: [path] — [N] asset types identified +> - Art bible: found — Asset Standards at Section 8 +> - Existing specs for this target: [N already specced / none] +> - Shared assets found in other specs: [list or "none"] + +--- + +## Phase 2: Asset Identification + +From the source doc, extract every asset type mentioned — explicit and implied. + +**For systems**: look for VFX events, sprite references, UI elements, audio triggers, particle effects, icon needs, and any "visual feedback" language. + +**For levels**: look for unique environment props, atmospheric VFX, lighting setups, ambient audio, skybox/background, and any area-specific materials. + +**For characters**: look for sprite sheets (idle, walk, attack, death), portrait/avatar, VFX attached to abilities, UI representation (icon, health bar skin). + +Group assets into categories: +- **Sprite / 2D Art** — character sprites, UI icons, tile sheets +- **VFX / Particles** — hit effects, ambient particles, screen effects +- **Environment** — props, tiles, backgrounds, skyboxes +- **UI** — HUD elements, menu art, fonts (if custom) +- **Audio** — SFX, music tracks, ambient loops *(note: audio specs are descriptions only — no generation prompts)* +- **3D Assets** — meshes, materials (if applicable per engine) + +Present the full identified list to the user. Use `AskUserQuestion`: +- Prompt: "I identified [N] assets across [N] categories for **[target]**. Review before speccing:" +- Show the grouped list in conversation text first +- Options: `[A] Proceed — spec all of these` / `[B] Remove some assets` / `[C] Add assets I didn't catch` / `[D] Adjust categories` + +Do NOT proceed to Phase 3 without user confirmation of the asset list. + +--- + +## Phase 3: Spec Generation + +Spawn specialist agents based on review mode. **Issue all Task calls simultaneously — do not wait for one before starting the next.** + +### Full mode — spawn in parallel: + +**`art-director`** via Task: +- Provide: full asset list from Phase 2, art bible Visual Identity Statement, Color System, Shape Language, the source doc's visual requirements, and any reference games/art mentioned in the art bible Section 9 +- Ask: "For each asset in this list, produce: (1) a 2–3 sentence visual description anchored to the art bible's shape language and color system — be specific enough that two different artists would produce consistent results; (2) a generation prompt ready for use with AI image tools (Midjourney/Stable Diffusion style — include style keywords, composition, color palette anchors, negative prompts); (3) which art bible rules directly govern this asset (cite by section). For audio assets, describe the sonic character instead of a generation prompt." + +**`technical-artist`** via Task: +- Provide: full asset list, art bible Asset Standards (Section 8), technical-preferences.md performance budgets, engine name and version +- Ask: "For each asset in this list, specify: (1) exact dimensions or polycount (match the art bible Asset Standards tiers — do not invent new sizes); (2) file format and export settings; (3) naming convention (from technical-preferences.md); (4) any engine-specific constraints this asset type must respect; (5) LOD requirements if applicable. Flag any asset type where the art bible's preferred standard conflicts with the engine's constraints." + +### Lean mode — spawn art-director only (skip technical-artist). + +### Solo mode — skip both. Derive specs from art bible rules alone, noting that technical constraints were not validated. + +**Collect both responses before Phase 4.** If any conflict exists between art-director and technical-artist (e.g., art-director specifies 4K textures but technical-artist flags the engine budget requires 512px), surface it explicitly — do NOT silently resolve. + +--- + +## Phase 4: Compile and Review + +Combine the agent outputs into a draft spec per asset. Present all specs in conversation text using this format: + +``` +## ASSET-[NNN] — [Asset Name] + +| Field | Value | +|-------|-------| +| Category | [Sprite / VFX / Environment / UI / Audio / 3D] | +| Dimensions | [e.g. 256×256px, 4-frame sprite sheet] | +| Format | [PNG / SVG / WAV / etc.] | +| Naming | [e.g. vfx_frost_hit_01.png] | +| Polycount | [if 3D — e.g. <800 tris] | +| Texture Res | [e.g. 512px — matches Art Bible §8 Tier 2] | + +**Visual Description:** +[2–3 sentences. Specific enough for two artists to produce consistent results.] + +**Art Bible Anchors:** +- §3 Shape Language: [relevant rule applied] +- §4 Color System: [color role — e.g. "uses Threat Blue per semantic color rules"] + +**Generation Prompt:** +[Ready-to-use prompt. Include: style keywords, composition notes, color palette anchors, lighting direction, negative prompts.] + +**Status:** Needed +``` + +After presenting all specs, use `AskUserQuestion`: +- Prompt: "Asset specs for **[target]** — [N] assets. Review complete?" +- Options: `[A] Approve all — write to file` / `[B] Revise a specific asset` / `[C] Regenerate with different direction` + +If [B]: ask which asset and what to change. Revise inline and re-present. Do NOT re-spawn agents for minor text revisions — only re-spawn if the visual direction itself needs to change. + +If [C]: ask what direction to change. Re-spawn the relevant agent with the updated brief. + +--- + +## Phase 5: Write Spec File + +After approval, ask: "May I write the spec to `design/assets/specs/[target-name]-assets.md`?" + +Write the file with: + +```markdown +# Asset Specs — [Target Type]: [Target Name] + +> **Source**: [path to source GDD/level/character doc] +> **Art Bible**: design/art/art-bible.md +> **Generated**: [date] +> **Status**: [N] assets specced / [N] approved / [N] in production / [N] done + +[all asset specs in ASSET-NNN format] +``` + +Then update `design/assets/asset-manifest.md`. If it doesn't exist, create it: + +```markdown +# Asset Manifest + +> Last updated: [date] + +## Progress Summary + +| Total | Needed | In Progress | Done | Approved | +|-------|--------|-------------|------|----------| +| [N] | [N] | [N] | [N] | [N] | + +## Assets by Context + +### [Target Type]: [Target Name] +| Asset ID | Name | Category | Status | Spec File | +|----------|------|----------|--------|-----------| +| ASSET-001 | [name] | [category] | Needed | design/assets/specs/[target]-assets.md | +``` + +If the manifest already exists, append the new context block and update the Progress Summary counts. + +Ask: "May I update `design/assets/asset-manifest.md`?" + +--- + +## Phase 6: Close + +Use `AskUserQuestion`: +- Prompt: "Asset specs complete for **[target]**. What's next?" +- Options: + - `[A] Spec another system — /asset-spec system:[next-system]` + - `[B] Spec a level — /asset-spec level:[level-name]` + - `[C] Spec a character — /asset-spec character:[character-name]` + - `[D] Run /asset-audit — validate delivered assets against specs` + - `[E] Stop here` + +--- + +## Asset ID Assignment + +Asset IDs are assigned sequentially across the entire project — not per-context. Read the manifest before assigning IDs to find the current highest number: + +``` +Grep pattern="ASSET-" path="design/assets/asset-manifest.md" +``` + +Start new assets from `ASSET-[highest + 1]`. This ensures IDs are stable and unique across the whole project. + +If no manifest exists yet, start from `ASSET-001`. + +--- + +## Shared Asset Protocol + +Before speccing an asset, check if an equivalent already exists in another context's spec: + +- Common UI elements (health bars, score displays) are often shared across systems +- Generic environment props may appear in multiple levels +- Character VFX (hit sparks, death effects) may reuse a base spec with color variants + +If a match is found: reference the existing ASSET-ID rather than creating a duplicate. Note the shared usage in the manifest's referenced-by column. + +> "ASSET-012 (Generic Hit Spark) already specced for Combat system. Reusing for Tower Defense — adding tower-defense to referenced-by." + +--- + +## Error Recovery Protocol + +If any spawned agent returns BLOCKED or cannot complete: + +1. Surface immediately: "[AgentName]: BLOCKED — [reason]" +2. In `lean` mode or if `technical-artist` blocks: proceed with art-director output only — note that technical constraints were not validated +3. In `solo` mode or if `art-director` blocks: derive descriptions from art bible rules — flag as "Art director not consulted — verify against art bible before production" +4. Always produce a partial spec — never discard work because one agent blocked + +--- + +## Collaborative Protocol + +Every phase follows: **Identify → Confirm → Generate → Review → Approve → Write** + +- Never spec assets without first confirming the asset list with the user +- Always anchor specs to the art bible — a spec that contradicts the art bible is wrong +- Surface all agent disagreements — do not silently pick one +- Write the spec file only after explicit approval +- Update the manifest immediately after writing the spec + +--- + +## Recommended Next Steps + +- Run `/asset-spec [next-context]` to continue speccing remaining systems, levels, or characters +- Run `/asset-audit` to validate delivered assets against the written specs and identify gaps or mismatches diff --git a/.omc/skills/balance-check/SKILL.md b/.omc/skills/balance-check/SKILL.md new file mode 100644 index 0000000..65ff326 --- /dev/null +++ b/.omc/skills/balance-check/SKILL.md @@ -0,0 +1,118 @@ +--- +name: balance-check +description: "Analyzes game balance data files, formulas, and configuration to identify outliers, broken progressions, degenerate strategies, and economy imbalances. Use after modifying any balance-related data or design. Use when user says 'balance report', 'check game balance', 'run a balance check'." +argument-hint: "[system-name|path-to-data-file]" +user-invocable: true +allowed-tools: Read, Glob, Grep +agent: economy-designer +--- + +## Phase 1: Identify Balance Domain + +Determine the balance domain from `$ARGUMENTS[0]`: + +- **Combat** → weapon/ability DPS, time-to-kill, damage type interactions +- **Economy** → resource faucets/sinks, acquisition rates, item pricing +- **Progression** → XP/power curves, dead zones, power spikes +- **Loot** → rarity distribution, pity timers, inventory pressure +- **File path given** → load that file directly and infer domain from content + +If no argument, ask the user which system to check. + +--- + +## Phase 2: Read Data Files + +Read relevant files from `assets/data/` and `design/balance/` for the identified domain. +Note every file read — they will appear in the Data Sources section of the report. + +--- + +## Phase 3: Read Design Document + +Read the GDD for the system from `design/gdd/` to understand intended design targets, +tuning knobs, and expected value ranges. This is the baseline for "correct" behaviour. + +--- + +## Phase 4: Perform Analysis + +Run domain-specific checks: + +**Combat balance:** +- Calculate DPS for all weapons/abilities at each power tier +- Check time-to-kill at each tier +- Identify any options that dominate all others (strictly better) +- Check if defensive options can create unkillable states +- Verify damage type/resistance interactions are balanced + +**Economy balance:** +- Map all resource faucets and sinks with flow rates +- Project resource accumulation over time +- Check for infinite resource loops +- Verify gold sinks scale with gold generation +- Check if any items are never worth purchasing + +**Progression balance:** +- Plot the XP curve and power curve +- Check for dead zones (no meaningful progression for too long) +- Check for power spikes (sudden jumps in capability) +- Verify content gates align with expected player power +- Check if skip/grind strategies break intended pacing + +**Loot balance:** +- Calculate expected time to acquire each rarity tier +- Check pity timer math +- Verify no loot is strictly useless at any stage +- Check inventory pressure vs acquisition rate + +--- + +## Phase 5: Output the Analysis + +``` +## Balance Check: [System Name] + +### Data Sources Analyzed +- [List of files read] + +### Health Summary: [HEALTHY / CONCERNS / CRITICAL ISSUES] + +### Outliers Detected +| Item/Value | Expected Range | Actual | Issue | +|-----------|---------------|--------|-------| + +### Degenerate Strategies Found +- [Strategy description and why it is problematic] + +### Progression Analysis +[Graph description or table showing progression curve health] + +### Recommendations +| Priority | Issue | Suggested Fix | Impact | +|----------|-------|--------------|--------| + +### Values That Need Attention +[Specific values with suggested adjustments and rationale] +``` + +--- + +## Phase 6: Fix & Verify Cycle + +After presenting the report, ask: + +> "Would you like to fix any of these balance issues now?" + +If yes: +- Ask which issue to address first (refer to the Recommendations table by priority row) +- Guide the user to update the relevant data file in `assets/data/` or formula in `design/balance/` +- After each fix, offer to re-run the relevant balance checks to verify no new outliers were introduced +- If the fix changes a tuning knob defined in a GDD or referenced by an ADR, remind the user: + > "This value is defined in a design document. Run `/propagate-design-change [path]` on the affected GDD to find downstream impacts before committing." + +If no: +- Summarize open issues and suggest saving the report to `design/balance/balance-check-[system]-[date].md` for later + +End with: +> "Re-run `/balance-check` after fixes to verify." diff --git a/.omc/skills/brainstorm/SKILL.md b/.omc/skills/brainstorm/SKILL.md new file mode 100644 index 0000000..48d0e6b --- /dev/null +++ b/.omc/skills/brainstorm/SKILL.md @@ -0,0 +1,350 @@ +--- +name: brainstorm +description: "Guided game concept ideation — from zero idea to a structured game concept document. Uses professional studio ideation techniques, player psychology frameworks, and structured creative exploration." +argument-hint: "[genre or theme hint, or 'open'] [--review full|lean|solo]" +user-invocable: true +allowed-tools: Read, Glob, Grep, Write, WebSearch, Task, AskUserQuestion +--- + +When this skill is invoked: + +1. **Parse the argument** for an optional genre/theme hint (e.g., `roguelike`, + `space survival`, `cozy farming`). If `open` or no argument, start from + scratch. Also resolve the review mode (once, store for all gate spawns this run): + 1. If `--review [full|lean|solo]` was passed → use that + 2. Else read `production/review-mode.txt` → use that value + 3. Else → default to `lean` + + See `.claude/docs/director-gates.md` for the full check pattern. + +2. **Check for existing concept work**: + - Read `design/gdd/game-concept.md` if it exists (resume, don't restart) + - Read `design/gdd/game-pillars.md` if it exists (build on established pillars) + +3. **Run through ideation phases** interactively, asking the user questions at + each phase. Do NOT generate everything silently — the goal is **collaborative + exploration** where the AI acts as a creative facilitator, not a replacement + for the human's vision. + + **Use `AskUserQuestion`** at key decision points throughout brainstorming: + - Constrained taste questions (genre preferences, scope, team size) + - Concept selection ("Which 2-3 concepts resonate?") after presenting options + - Direction choices ("Develop further, explore more, or prototype?") + - Pillar ranking after concepts are refined + Write full creative analysis in conversation text first, then use + `AskUserQuestion` to capture the decision with concise labels. + + Professional studio brainstorming principles to follow: + - Withhold judgment — no idea is bad during exploration + - Encourage unusual ideas — outside-the-box thinking sparks better concepts + - Build on each other — "yes, and..." responses, not "but..." + - Use constraints as creative fuel — limitations often produce the best ideas + - Time-box each phase — keep momentum, don't over-deliberate early + +--- + +### Phase 1: Creative Discovery + +Start by understanding the person, not the game. Ask these questions +conversationally (not as a checklist): + +**Emotional anchors**: +- What's a moment in a game that genuinely moved you, thrilled you, or made + you lose track of time? What specifically created that feeling? +- Is there a fantasy or power trip you've always wanted in a game but never + quite found? + +**Taste profile**: +- What 3 games have you spent the most time with? What kept you coming back? + *(Ask this as plain text — the user must be able to type specific game names freely. + Do NOT put this in an AskUserQuestion with preset options.)* +- Are there genres you love? Genres you avoid? Why? +- Do you prefer games that challenge you, relax you, tell you stories, + or let you express yourself? *(Use `AskUserQuestion` for this — constrained choice.)* + +**Practical constraints** (shape the sandbox before brainstorming). +Bundle these into a single multi-tab `AskUserQuestion` with these exact tab labels: +- Tab "Experience" — "What kind of experience do you most want players to have?" (Challenge & Mastery / Story & Discovery / Expression & Creativity / Relaxation & Flow) +- Tab "Timeline" — "What's your realistic development timeline?" (Weeks / Months / 1-2 years / Multi-year) +- Tab "Dev level" — "Where are you in your dev journey?" (First game / Shipped before / Professional background) + +Use exactly these tab names — do not rename or duplicate them. + +**Synthesize** the answers into a **Creative Brief** — a 3-5 sentence +summary of the person's emotional goals, taste profile, and constraints. +Read the brief back and confirm it captures their intent. + +--- + +### Phase 2: Concept Generation + +Using the creative brief as a foundation, generate **3 distinct concepts** +that each take a different creative direction. Use these ideation techniques: + +**Technique 1: Verb-First Design** +Start with the core player verb (build, fight, explore, solve, survive, +create, manage, discover) and build outward from there. The verb IS the game. + +**Technique 2: Mashup Method** +Combine two unexpected elements: [Genre A] + [Theme B]. The tension between +the two creates the unique hook. (e.g., "farming sim + cosmic horror", +"roguelike + dating sim", "city builder + real-time combat") + +**Technique 3: Experience-First Design (MDA Backward)** +Start from the desired player emotion (aesthetic goal from MDA framework: +sensation, fantasy, narrative, challenge, fellowship, discovery, expression, +submission) and work backward to the dynamics and mechanics that produce it. + +For each concept, present: +- **Working Title** +- **Elevator Pitch** (1-2 sentences — must pass the "10-second test") +- **Core Verb** (the single most common player action) +- **Core Fantasy** (the emotional promise) +- **Unique Hook** (passes the "and also" test: "Like X, AND ALSO Y") +- **Primary MDA Aesthetic** (which emotion dominates?) +- **Estimated Scope** (small / medium / large) +- **Why It Could Work** (1 sentence on market/audience fit) +- **Biggest Risk** (1 sentence on the hardest unanswered question) + +Present all three. Then use `AskUserQuestion` to capture the selection. + +**CRITICAL**: This MUST be a plain list call — no tabs, no form fields. Use exactly this structure: + +``` +AskUserQuestion( + prompt: "Which concept resonates with you? You can pick one, combine elements, or ask for fresh directions.", + options: [ + "Concept 1 — [Title]", + "Concept 2 — [Title]", + "Concept 3 — [Title]", + "Combine elements across concepts", + "Generate fresh directions" + ] +) +``` + +Do NOT use a `tabs` field here. The `tabs` form is for multi-field input only — using it here causes an "Invalid tool parameters" error. This is a plain `prompt` + `options` call. + +Never pressure toward a choice — let them sit with it. + +--- + +### Phase 3: Core Loop Design + +For the chosen concept, use structured questioning to build the core loop. +The core loop is the beating heart of the game — if it isn't fun in +isolation, no amount of content or polish will save the game. + +**30-Second Loop** (moment-to-moment): + +Ask these as `AskUserQuestion` calls — derive the options from the chosen concept, don't hardcode them: + +1. **Core action feel** — prompt: "What's the primary feel of the core action?" Generate 3-4 options that fit the concept's genre and tone, plus a free-text escape (`I'll describe it`). + +2. **Key design dimension** — identify the most important design variable for this specific concept (e.g., world reactivity, pacing, player agency) and ask about it. Generate options that match the concept. Always include a free-text escape. + +After capturing answers, analyze: Is this action intrinsically satisfying? What makes it feel good? (Audio feedback, visual juice, timing satisfaction, tactical depth?) + +**5-Minute Loop** (short-term goals): +- What structures the moment-to-moment play into cycles? +- Where does "one more turn" / "one more run" psychology kick in? +- What choices does the player make at this level? + +**Session Loop** (30-120 minutes): +- What does a complete session look like? +- Where are the natural stopping points? +- What's the "hook" that makes them think about the game when not playing? + +**Progression Loop** (days/weeks): +- How does the player grow? (Power? Knowledge? Options? Story?) +- What's the long-term goal? When is the game "done"? + +**Player Motivation Analysis** (based on Self-Determination Theory): +- **Autonomy**: How much meaningful choice does the player have? +- **Competence**: How does the player feel their skill growing? +- **Relatedness**: How does the player feel connected (to characters, + other players, or the world)? + +--- + +### Phase 4: Pillars and Boundaries + +Game pillars are used by real AAA studios (God of War, Hades, The Last of +Us) to keep hundreds of team members making decisions that all point the +same direction. Even for solo developers, pillars prevent scope creep and +keep the vision sharp. + +Collaboratively define **3-5 pillars**: +- Each pillar has a **name** and **one-sentence definition** +- Each pillar has a **design test**: "If we're debating between X and Y, + this pillar says we choose __" +- Pillars should feel like they create tension with each other — if all + pillars point the same way, they're not doing enough work + +Then define **3+ anti-pillars** (what this game is NOT): +- Anti-pillars prevent the most common form of scope creep: "wouldn't it + be cool if..." features that don't serve the core vision +- Frame as: "We will NOT do [thing] because it would compromise [pillar]" + +**Pillar confirmation**: After presenting the full pillar set, use `AskUserQuestion`: +- Prompt: "Do these pillars feel right for your game?" +- Options: `[A] Lock these in` / `[B] Rename or reframe one` / `[C] Swap a pillar out` / `[D] Something else` + +If the user selects B, C, or D, make the revision, then use `AskUserQuestion` again: +- Prompt: "Pillars updated. Ready to lock these in?" +- Options: `[A] Lock these in` / `[B] Revise another pillar` / `[C] Something else` + +Repeat until the user selects [A] Lock these in. + +**Review mode check** — apply before spawning CD-PILLARS and AD-CONCEPT-VISUAL: +- `solo` → skip both. Note: "CD-PILLARS skipped — Solo mode. AD-CONCEPT-VISUAL skipped — Solo mode." Proceed to Phase 5. +- `lean` → skip both (not PHASE-GATEs). Note: "CD-PILLARS skipped — Lean mode. AD-CONCEPT-VISUAL skipped — Lean mode." Proceed to Phase 5. +- `full` → spawn as normal. + +**After pillars and anti-pillars are agreed, spawn BOTH `creative-director` AND `art-director` via Task in parallel before moving to Phase 5. Issue both Task calls simultaneously — do not wait for one before starting the other.** + +- **`creative-director`** — gate **CD-PILLARS** (`.claude/docs/director-gates.md`) + Pass: full pillar set with design tests, anti-pillars, core fantasy, unique hook. + +- **`art-director`** — gate **AD-CONCEPT-VISUAL** (`.claude/docs/director-gates.md`) + Pass: game concept elevator pitch, full pillar set with design tests, target platform (if known), any reference games or visual touchstones the user mentioned. + +Collect both verdicts, then present them together using a two-tab `AskUserQuestion`: +- Tab **"Pillars"**: present creative-director feedback. Options mirror the standard CD-PILLARS handling — `Lock in as-is` / `Revise [specific pillar]` / `Discuss further`. +- Tab **"Visual anchor"**: present the art-director's 2-3 named visual direction options. Options: each named direction (one per option) + `Combine elements across directions` + `Describe my own direction`. + +The user's selected visual anchor (the named direction or their custom description) is stored as the **Visual Identity Anchor** — it will be written into the game-concept document and becomes the foundation of the art bible. + +If the creative-director returns CONCERNS or REJECT on pillars, resolve pillar issues before asking for the visual anchor selection — visual direction should flow from confirmed pillars. + +--- + +### Phase 5: Player Type Validation + +Using the Bartle taxonomy and Quantic Foundry motivation model, validate +who this game is actually for: + +- **Primary player type**: Who will LOVE this game? (Achievers, Explorers, + Socializers, Competitors, Creators, Storytellers) +- **Secondary appeal**: Who else might enjoy it? +- **Who is this NOT for**: Being clear about who won't like this game is as + important as knowing who will +- **Market validation**: Are there successful games that serve a similar + player type? What can we learn from their audience size? + +--- + +### Phase 6: Scope and Feasibility + +Ground the concept in reality: + +- **Target platform**: Use `AskUserQuestion` — "What platforms are you targeting for this game?" + Options: `PC (Steam / Epic)` / `Mobile (iOS / Android)` / `Console` / `Web / Browser` / `Multiple platforms` + Record the answer — it directly shapes the engine recommendation and will be passed to `/setup-engine`. + Note platform implications if relevant (e.g., mobile means Unity is strongly preferred; console means Godot has limitations; web means Godot exports cleanly). + +- **Engine experience**: Use `AskUserQuestion` — "Do you already have an engine you work in?" + Options: `Godot` / `Unity` / `Unreal Engine 5` / `No preference — help me decide` + - If they pick an engine → record it as their preference and move on. Do NOT second-guess it. + - If "No preference" → tell them: "Run `/setup-engine` after this session — it will walk you through the full decision based on your concept and platform target." Do not make a recommendation here. +- **Art pipeline**: What's the art style and how labor-intensive is it? +- **Content scope**: Estimate level/area count, item count, gameplay hours +- **MVP definition**: What's the absolute minimum build that tests "is the + core loop fun?" +- **Biggest risks**: Technical risks, design risks, market risks +- **Scope tiers**: What's the full vision vs. what ships if time runs out? + +**Review mode check** — apply before spawning TD-FEASIBILITY: +- `solo` → skip. Note: "TD-FEASIBILITY skipped — Solo mode." Proceed directly to scope tier definition. +- `lean` → skip (not a PHASE-GATE). Note: "TD-FEASIBILITY skipped — Lean mode." Proceed directly to scope tier definition. +- `full` → spawn as normal. + +**After identifying biggest technical risks, spawn `technical-director` via Task using gate TD-FEASIBILITY (`.claude/docs/director-gates.md`) before scope tiers are defined.** + +Pass: core loop description, platform target, engine choice (or "undecided"), list of identified technical risks. + +Present the assessment to the user. If HIGH RISK, offer to revisit scope before finalising. If CONCERNS, note them and continue. + +**Review mode check** — apply before spawning PR-SCOPE: +- `solo` → skip. Note: "PR-SCOPE skipped — Solo mode." Proceed to document generation. +- `lean` → skip (not a PHASE-GATE). Note: "PR-SCOPE skipped — Lean mode." Proceed to document generation. +- `full` → spawn as normal. + +**After scope tiers are defined, spawn `producer` via Task using gate PR-SCOPE (`.claude/docs/director-gates.md`).** + +Pass: full vision scope, MVP definition, timeline estimate, team size. + +Present the assessment to the user. If UNREALISTIC, offer to adjust the MVP definition or scope tiers before writing the document. + +--- + +4. **Generate the game concept document** using the template at + `.claude/docs/templates/game-concept.md`. Fill in ALL sections from the + brainstorm conversation, including the MDA analysis, player motivation + profile, and flow state design sections. + + **Include a Visual Identity Anchor section** in the game concept document with: + - The selected visual direction name + - The one-line visual rule + - The 2-3 supporting visual principles with their design tests + - The color philosophy summary + + This section is the seed of the art bible — it captures the "everything must + move" decision before it can be forgotten between sessions. + +5. Use `AskUserQuestion` for write approval: +- Prompt: "Game concept is ready. May I write it to `design/gdd/game-concept.md`?" +- Options: `[A] Yes — write it` / `[B] Not yet — revise a section first` + +If [B]: ask which section to revise using `AskUserQuestion` with options: `Elevator Pitch` / `Core Fantasy & Unique Hook` / `Pillars` / `Core Loop` / `MVP Definition` / `Scope Tiers` / `Risks` / `Something else — I'll describe` + +After revising, show the updated section as a diff or clear before/after, then use `AskUserQuestion` — "Ready to write the updated concept document?" +Options: `[A] Yes — write it` / `[B] Revise another section` +Repeat until the user selects [A]. + +If yes, generate the document using the template at `.claude/docs/templates/game-concept.md`, fill in ALL sections from the brainstorm conversation, and write the file, creating directories as needed. + +**Scope consistency rule**: The "Estimated Scope" field in the Core Identity table must match the full-vision timeline from the Scope Tiers section — not just say "Large (9+ months)". Write it as "Large (X–Y months, solo)" or "Large (X–Y months, team of N)" so the summary table is accurate. + +6. **Suggest next steps** (in this order — this is the professional studio + pre-production pipeline). List ALL steps — do not abbreviate or truncate: + 1. "Run `/setup-engine` to configure the engine and populate version-aware reference docs" + 2. "Run `/art-bible` to create the visual identity specification — do this BEFORE writing GDDs. The art bible gates asset production and shapes technical architecture decisions (rendering, VFX, UI systems)." + 3. "Use `/design-review design/gdd/game-concept.md` to validate concept completeness before going downstream" + 4. "Discuss vision with the `creative-director` agent for pillar refinement" + 5. "Decompose the concept into individual systems with `/map-systems` — maps dependencies, assigns priorities, and creates the systems index" + 5. "Author per-system GDDs with `/design-system` — guided, section-by-section GDD writing for each system identified in step 4" + 6. "Plan the technical architecture with `/create-architecture` — produces the master architecture blueprint and Required ADR list" + 7. "Record key architectural decisions with `/architecture-decision (×N)` — write one ADR per decision in the Required ADR list from `/create-architecture`" + 8. "Validate readiness to advance with `/gate-check` — phase gate before committing to production" + 9. "Prototype the riskiest system with `/prototype [core-mechanic]` — validate the core loop before full implementation" + 10. "Run `/playtest-report` after the prototype to validate the core hypothesis" + 11. "If validated, plan the first sprint with `/sprint-plan new`" + +7. **Output a summary** with the chosen concept's elevator pitch, pillars, + primary player type, engine recommendation, biggest risk, and file path. + +Verdict: **COMPLETE** — game concept created and handed off for next steps. + +--- + +## Context Window Awareness + +This is a multi-phase skill. If context reaches or exceeds 70% during any phase, +append this notice to the current response before continuing: + +> **Context is approaching the limit (≥70%).** The game concept document is saved +> to `design/gdd/game-concept.md`. Open a fresh Claude Code session to continue +> if needed — progress is not lost. + +--- + +## Recommended Next Steps + +After the game concept is written, follow the pre-production pipeline in order: +1. `/setup-engine` — configure the engine and populate version-aware reference docs +2. `/art-bible` — establish visual identity before writing any GDDs +3. `/map-systems` — decompose the concept into individual systems with dependencies +4. `/design-system [first-system]` — author per-system GDDs in dependency order +5. `/create-architecture` — produce the master architecture blueprint +6. `/gate-check pre-production` — validate readiness before committing to production diff --git a/.omc/skills/bug-report/SKILL.md b/.omc/skills/bug-report/SKILL.md new file mode 100644 index 0000000..45d8249 --- /dev/null +++ b/.omc/skills/bug-report/SKILL.md @@ -0,0 +1,163 @@ +--- +name: bug-report +description: "Creates a structured bug report from a description, or analyzes code to identify potential bugs. Ensures every bug report has full reproduction steps, severity assessment, and context." +argument-hint: "[description] | analyze [path-to-file]" +user-invocable: true +allowed-tools: Read, Glob, Grep, Write +--- + +## Phase 1: Parse Arguments + +Determine the mode from the argument: + +- No keyword → **Description Mode**: generate a structured bug report from the provided description +- `analyze [path]` → **Analyze Mode**: read the target file(s) and identify potential bugs +- `verify [BUG-ID]` → **Verify Mode**: confirm a reported fix actually resolved the bug +- `close [BUG-ID]` → **Close Mode**: mark a verified bug as closed with resolution record + +If no argument is provided, ask the user for a bug description before proceeding. + +--- + +## Phase 2A: Description Mode + +1. **Parse the description** for key information: what broke, when, how to reproduce it, and what the expected behavior is. + +2. **Search the codebase** for related files using Grep/Glob to add context (affected system, likely files). + +3. **Draft the bug report**: + +```markdown +# Bug Report + +## Summary +**Title**: [Concise, descriptive title] +**ID**: BUG-[NNNN] +**Severity**: [S1-Critical / S2-Major / S3-Minor / S4-Trivial] +**Priority**: [P1-Immediate / P2-Next Sprint / P3-Backlog / P4-Wishlist] +**Status**: Open +**Reported**: [Date] +**Reporter**: [Name] + +## Classification +- **Category**: [Gameplay / UI / Audio / Visual / Performance / Crash / Network] +- **System**: [Which game system is affected] +- **Frequency**: [Always / Often (>50%) / Sometimes (10-50%) / Rare (<10%)] +- **Regression**: [Yes/No/Unknown -- was this working before?] + +## Environment +- **Build**: [Version or commit hash] +- **Platform**: [OS, hardware if relevant] +- **Scene/Level**: [Where in the game] +- **Game State**: [Relevant state -- inventory, quest progress, etc.] + +## Reproduction Steps +**Preconditions**: [Required state before starting] + +1. [Exact step 1] +2. [Exact step 2] +3. [Exact step 3] + +**Expected Result**: [What should happen] +**Actual Result**: [What actually happens] + +## Technical Context +- **Likely affected files**: [List of files based on codebase search] +- **Related systems**: [What other systems might be involved] +- **Possible root cause**: [If identifiable from the description] + +## Evidence +- **Logs**: [Relevant log output if available] +- **Visual**: [Description of visual evidence] + +## Related Issues +- [Links to related bugs or design documents] + +## Notes +[Any additional context or observations] +``` + +--- + +## Phase 2B: Analyze Mode + +1. **Read the target file(s)** specified in the argument. + +2. **Identify potential bugs**: null references, off-by-one errors, race conditions, unhandled edge cases, resource leaks, incorrect state transitions. + +3. **For each potential bug**, generate a bug report using the template above, with the likely trigger scenario and recommended fix filled in. + +--- + +## Phase 2C: Verify Mode + +Read `production/qa/bugs/[BUG-ID].md`. Extract the reproduction steps and expected result. + +1. **Re-run reproduction steps** — use Grep/Glob to check whether the root cause code path still exists as described. If the fix removed or changed it, note the change. +2. **Run the related test** — if the bug's system has a test file in `tests/`, run it via Bash and report pass/fail. +3. **Check for regression** — grep the codebase for any new occurrence of the pattern that caused the bug. + +Produce a verification verdict: + +- **VERIFIED FIXED** — reproduction steps no longer produce the bug; related tests pass +- **STILL PRESENT** — bug reproduces as described; fix did not resolve the issue +- **CANNOT VERIFY** — automated checks inconclusive; manual playtest required + +Ask: "May I update `production/qa/bugs/[BUG-ID].md` to set Status: Verified Fixed / Still Present / Cannot Verify?" + +If STILL PRESENT: reopen the bug, set Status back to Open, and suggest re-running `/hotfix [BUG-ID]`. + +--- + +## Phase 2D: Close Mode + +Read `production/qa/bugs/[BUG-ID].md`. Confirm Status is `Verified Fixed` before closing. If status is anything else, stop: "Bug [ID] must be Verified Fixed before it can be closed. Run `/bug-report verify [BUG-ID]` first." + +Append a closure record to the bug file: + +```markdown +## Closure Record +**Closed**: [date] +**Resolution**: Fixed — [one-line description of what was changed] +**Fix commit / PR**: [if known] +**Verified by**: qa-tester +**Closed by**: [user] +**Regression test**: [test file path, or "Manual verification"] +**Status**: Closed +``` + +Update the top-level `**Status**: Open` field to `**Status**: Closed`. + +Ask: "May I update `production/qa/bugs/[BUG-ID].md` to mark it Closed?" + +After closing, check `production/qa/bug-triage-*.md` — if the bug appears in an open triage report, note: "Bug [ID] is referenced in the triage report. Run `/bug-triage` to refresh the open bug count." + +--- + +## Phase 3: Save Report + +Present the completed bug report(s) to the user. + +Ask: "May I write this to `production/qa/bugs/BUG-[NNNN].md`?" + +If yes, write the file, creating the directory if needed. Verdict: **COMPLETE** — bug report filed. + +If no, stop here. Verdict: **BLOCKED** — user declined write. + +--- + +## Phase 4: Next Steps + +After saving, suggest based on mode: + +**After filing (Description/Analyze mode):** +- Run `/bug-triage` to prioritize alongside existing open bugs +- If S1 or S2: run `/hotfix [BUG-ID]` for emergency fix workflow + +**After fixing the bug (developer confirms fix is in):** +- Run `/bug-report verify [BUG-ID]` — confirm the fix actually works before closing +- Never mark a bug closed without verification — a fix that doesn't verify is still Open + +**After verify returns VERIFIED FIXED:** +- Run `/bug-report close [BUG-ID]` — write the closure record and update status +- Run `/bug-triage` to refresh the open bug count and remove it from the active list diff --git a/.omc/skills/bug-triage/SKILL.md b/.omc/skills/bug-triage/SKILL.md new file mode 100644 index 0000000..cbed2b4 --- /dev/null +++ b/.omc/skills/bug-triage/SKILL.md @@ -0,0 +1,243 @@ +--- +name: bug-triage +description: "Read all open bugs in production/qa/bugs/, re-evaluate priority vs. severity, assign to sprints, surface systemic trends, and produce a triage report. Run at sprint start or when the bug count grows enough to need re-prioritization." +argument-hint: "[sprint | full | trend]" +user-invocable: true +allowed-tools: Read, Glob, Grep, Write, Edit +--- + +# Bug Triage + +This skill processes the open bug backlog into a prioritised, sprint-assigned +action list. It distinguishes between **severity** (how bad is the impact?) and +**priority** (how urgently must we fix it?), detects systemic trends, and +ensures no critical bug is lost between sprints. + +**Output:** `production/qa/bug-triage-[date].md` + +**When to run:** +- Sprint start — assign open bugs to the new sprint or backlog +- After `/team-qa` completes and new bugs have been filed +- When the bug count crosses 10+ open items + +--- + +## 1. Parse Arguments + +**Modes:** +- `/bug-triage sprint` — triage against the current sprint; assign fixable bugs + to the sprint backlog; defer the rest +- `/bug-triage full` — full triage of all bugs regardless of sprint scope +- `/bug-triage trend` — trend analysis only (no assignment); read-only report +- No argument — run sprint mode if a current sprint exists, else full mode + +--- + +## 2. Load Bug Backlog + +### Step 2a — Discover bug files + +Glob for bug reports in priority order: +1. `production/qa/bugs/*.md` — individual bug report files (preferred format) +2. `production/qa/bugs.md` — single consolidated bug log (fallback) +3. Any `production/qa/qa-plan-*.md` "Bugs Found" table (last resort) + +If no bug files found: +> "No bug files found in `production/qa/bugs/`. If bugs are tracked in a +> different location, adjust the glob pattern. If no bugs exist yet, there is +> nothing to triage." + +Stop and report. Do not proceed if no bugs exist. + +### Step 2b — Load sprint context + +Read the most recently modified file in `production/sprints/` to understand: +- Current sprint number / name +- Stories in scope (for assignment target) +- Sprint capacity constraints (if noted) + +If no sprint file exists: note "No sprint plan found — assigning to backlog only." + +### Step 2c — Load severity reference + +Read `.claude/docs/coding-standards.md` for severity/priority definitions if they +exist. If they do not exist, use the standard definitions in Step 3. + +--- + +## 3. Classify Each Bug + +For each bug, extract or infer: + +### Severity (impact of the bug) + +| Severity | Definition | +|----------|-----------| +| **S1 — Critical** | Game crashes, data loss, or complete feature failure. Cannot proceed past this point. | +| **S2 — High** | Major feature broken but game is still playable. Significant wrong behaviour. | +| **S3 — Medium** | Feature degraded but a workaround exists. Minor wrong behaviour. | +| **S4 — Low** | Visual glitch, cosmetic issue, typo. No gameplay impact. | + +### Priority (urgency of the fix) + +| Priority | Definition | +|----------|-----------| +| **P1 — Fix this sprint** | Blocks QA, blocks release, or is regression from last sprint | +| **P2 — Fix soon** | Should be resolved before the next major milestone | +| **P3 — Backlog** | Would be good to fix, but no active blocking impact | +| **P4 — Won't fix / Deferred** | Accepted risk or out of scope for current product scope | + +### Assignment + +For each P1/P2 bug in `sprint` mode: +- Identify which story or epic the fix belongs to +- Check whether the current sprint has remaining capacity +- If capacity exists: assign to sprint (`Sprint: [current]`) +- If capacity is full: flag as `Priority overflow — consider pulling from sprint` + +For `full` mode: assign all P1 to current sprint, P2 to next sprint estimate, +P3+ to backlog. + +### Deviation check + +Flag bugs that suggest **systematic problems**: +- 3+ bugs from the same system in the same sprint → "Potential design or + implementation quality issue in [system]" +- 2+ S1/S2 bugs in the same story → "Story may need to be reopened and + re-reviewed before shipping" +- Bug filed against a story marked Complete → "Regression in completed story — + story should be re-opened in sprint tracking" + +--- + +## 4. Trend Analysis + +After classifying all bugs, generate trend metrics: + +### Volume trends +- Total open bugs: [N] +- Opened this sprint: [N] +- Closed this sprint: [N] +- Net change: [+N / -N] + +### System hot spots +- Which system has the most open bugs? +- Which system has the highest S1/S2 ratio? + +### Age analysis +- How many bugs are older than 2 sprints? +- Are any S1/S2 bugs un-assigned (sprint = none)? + +### Regression indicator +- Any bugs filed against previously-completed stories? +- Count: [N] regression bugs (story reopened implied) + +--- + +## 5. Generate Triage Report + +```markdown +# Bug Triage Report + +> **Date**: [date] +> **Mode**: [sprint | full | trend] +> **Generated by**: /bug-triage +> **Open bugs processed**: [N] +> **Sprint in scope**: [sprint name, or "N/A"] + +--- + +## Triage Summary + +| Priority | Count | Notes | +|----------|-------|-------| +| P1 — Fix this sprint | [N] | [N] assigned to sprint, [N] overflow | +| P2 — Fix soon | [N] | Scheduled for next sprint | +| P3 — Backlog | [N] | Deferred | +| P4 — Won't fix | [N] | Accepted risk | + +**Critical (S1/S2) unfixed count**: [N] + +--- + +## P1 Bugs — Fix This Sprint + +| ID | System | Severity | Summary | Assigned to | Story | +|----|--------|----------|---------|-------------|-------| +| BUG-NNN | [system] | S[1-4] | [one-line description] | [sprint] | [story path] | + +--- + +## P2 Bugs — Fix Soon + +| ID | System | Severity | Summary | Target Sprint | +|----|--------|----------|---------|---------------| +| BUG-NNN | [system] | S[1-4] | [one-line description] | Sprint [N+1] | + +--- + +## P3/P4 Bugs — Backlog / Won't Fix + +| ID | System | Severity | Summary | Disposition | +|----|--------|----------|---------|-------------| +| BUG-NNN | [system] | S4 | [one-line description] | Backlog | + +--- + +## Systemic Issues Flagged + +[List any patterns from Step 3 deviation check, or "None identified."] + +--- + +## Trend Analysis + +**Volume**: [N] open / [+N] net change this sprint +**Hot spot**: [system with most bugs] +**Regressions**: [N] bugs against completed stories +**Aged bugs (>2 sprints old)**: [N] + +[If N aged S1/S2 bugs > 0:] +> ⚠️ [N] high-severity bugs have been open for more than 2 sprints without +> assignment. These represent accepted risk that should be explicitly reviewed. + +--- + +## Recommended Actions + +1. [Most urgent action — usually "fix P1 bugs before QA hand-off"] +2. [Second action — usually "investigate [hot spot system] quality"] +3. [Third action — optional improvement] +``` + +--- + +## 6. Write and Gate + +Present the report in conversation, then ask: + +"May I write this triage report to `production/qa/bug-triage-[date].md`?" + +Write only after approval. + +After writing: +- If any S1 bugs are unassigned: "S1 bugs must be assigned before the sprint + can be considered healthy. Run `/sprint-status` to see current capacity." +- If regression bugs exist: "Regressions found — consider re-opening the + affected stories in sprint tracking and running `/smoke-check` to re-gate." +- If no P1 bugs exist: "No P1 bugs — build is in good shape for QA hand-off." Verdict: **COMPLETE** — triage report written. + +If user declined write: Verdict: **BLOCKED** — user declined write. + +--- + +## Collaborative Protocol + +- **Never close or mark bugs Won't Fix without user approval** — surface them + as P4 candidates and ask: "Are these acceptable as Won't Fix?" +- **Never auto-assign to a sprint at capacity** — flag overflow and let the + sprint owner decide what to pull +- **Severity is objective; priority is a team decision** — present severity + classifications as recommendations, not mandates +- **Trend data is informational** — do not block work on trend findings alone; + surface them as observations diff --git a/.omc/skills/changelog/SKILL.md b/.omc/skills/changelog/SKILL.md new file mode 100644 index 0000000..4e618cb --- /dev/null +++ b/.omc/skills/changelog/SKILL.md @@ -0,0 +1,177 @@ +--- +name: changelog +description: "Auto-generates a changelog from git commits, sprint data, and design documents. Produces both internal and player-facing versions." +argument-hint: "[version|sprint-number]" +user-invocable: true +allowed-tools: Read, Glob, Grep, Bash, Write +context: | + !git log --oneline -30 2>/dev/null + !git tag --list --sort=-v:refname 2>/dev/null | head -5 +model: haiku +--- + +## Phase 1: Parse Arguments + +Read the argument for the target version or sprint number. If a version is given, use the corresponding git tag. If a sprint number is given, use the sprint date range. + +Verify the repository is initialized: run `git rev-parse --is-inside-work-tree` to confirm git is available. If not a git repo, inform the user and abort gracefully. + +--- + +## Phase 2: Gather Change Data + +Read the git log since the last tag or release: + +``` +git log --oneline [last-tag]..HEAD +``` + +If no tags exist, read the full log or a reasonable recent range (last 100 commits). + +Read sprint reports from `production/sprints/` for the relevant period to understand planned work and context behind changes. + +Read completed design documents from `design/gdd/` for any new features implemented during this period. + +--- + +## Phase 3: Categorize Changes + +Categorize every change into one of these categories: + +- **New Features**: Entirely new gameplay systems, modes, or content +- **Improvements**: Enhancements to existing features, UX improvements, performance gains +- **Bug Fixes**: Corrections to broken behavior +- **Balance Changes**: Tuning of gameplay values, difficulty, economy +- **Known Issues**: Issues the team is aware of but have not yet resolved +- **Miscellaneous**: Changes that do not fit the above categories, or commits whose messages are too vague to classify confidently + +For each commit, check whether the message contains a task ID or story reference +(e.g. `[STORY-123]`, `TR-`, `#NNN`, or similar). Count commits that lack any task reference +and include this count in the Phase 4 Metrics section as: `Commits without task reference: [N]`. + +--- + +## Phase 4: Generate Internal Changelog + +```markdown +# Internal Changelog: [Version] +Date: [Date] +Sprint(s): [Sprint numbers covered] +Commits: [Count] ([first-hash]..[last-hash]) + +## New Features +- [Feature Name] -- [Technical description, affected systems] + - Commits: [hash1], [hash2] + - Owner: [who implemented it] + - Design doc: [link if applicable] + +## Improvements +- [Improvement] -- [What changed technically and why] + - Commits: [hashes] + - Owner: [who] + +## Bug Fixes +- [BUG-ID] [Description of bug and root cause] + - Fix: [What was changed] + - Commits: [hashes] + - Owner: [who] + +## Balance Changes +- [What was tuned] -- [Old value -> New value] -- [Design intent] + - Owner: [who] + +## Technical Debt / Refactoring +- [What was cleaned up and why] + - Commits: [hashes] + +## Miscellaneous +- [Change that didn't fit other categories, or vague commit message] + - Commits: [hashes] + +## Known Issues +- [Issue description] -- [Severity] -- [ETA for fix if known] + +## Metrics +- Total commits: [N] +- Files changed: [N] +- Lines added: [N] +- Lines removed: [N] +- Commits without task reference: [N] +``` + +--- + +## Phase 5: Generate Player-Facing Changelog + +```markdown +# What is New in [Version] + +## New Features +- **[Feature Name]**: [Player-friendly description of what they can now do + and why it is exciting. Focus on the experience, not the implementation.] + +## Improvements +- **[What improved]**: [How this makes the game better for the player. + Be specific but avoid jargon.] + +## Bug Fixes +- Fixed an issue where [describe what the player experienced, not what was + wrong in the code] +- Fixed [player-visible symptom] + +## Balance Changes +- [What changed in player-understandable terms and the design intent. + Example: "Healing potions now restore 50 HP (up from 30) -- we felt + players needed more recovery options in late-game encounters."] + +## Known Issues +- We are aware of [issue description in player terms] and are working on a + fix. [Workaround if one exists.] + +--- +Thank you for playing! Your feedback helps us make the game better. +Report issues at [link]. +``` + +--- + +## Phase 6: Output + +Output both changelogs to the user. The internal changelog is the primary working document. The player-facing changelog is ready for community posting after review. + +--- + +## Phase 7: Offer File Write + +After presenting the changelogs, ask the user: + +> "May I write this changelog to `docs/CHANGELOG.md`? +> [A] Yes, append this entry (recommended if the file already exists) +> [B] Yes, overwrite the file entirely +> [C] No — I'll copy it manually" + +- Check whether `docs/CHANGELOG.md` exists before asking. If it does, default the + recommendation to **[A] append**. +- If the user selects [A]: append the new internal changelog entry to the top of + the existing file (newest entries first). +- If the user selects [B]: overwrite the file with the new changelog. +- If the user selects [C]: stop here without writing. + +After a successful write: Verdict: **CHANGELOG WRITTEN** — changelog saved to `docs/CHANGELOG.md`. +If the user declines: Verdict: **COMPLETE** — changelog generated. + +--- + +## Phase 7: Next Steps + +- Use `/patch-notes [version]` to generate a styled, saved version for public release. +- Use `/release-checklist` before publishing the changelog externally. + +### Guidelines + +- Never expose internal code references, file paths, or developer names in the player-facing changelog +- Group related changes together rather than listing individual commits +- If a commit message is unclear, check the associated files and sprint data for context +- Balance changes should always include the design reasoning, not just the numbers +- Known issues should be honest — players appreciate transparency +- If the git history is messy (merge commits, reverts, fixup commits), clean up the narrative rather than listing every commit literally diff --git a/.omc/skills/code-review/SKILL.md b/.omc/skills/code-review/SKILL.md new file mode 100644 index 0000000..e1f8733 --- /dev/null +++ b/.omc/skills/code-review/SKILL.md @@ -0,0 +1,166 @@ +--- +name: code-review +description: "Performs an architectural and quality code review on a specified file or set of files. Checks for coding standard compliance, architectural pattern adherence, SOLID principles, testability, and performance concerns." +argument-hint: "[path-to-file-or-directory]" +user-invocable: true +allowed-tools: Read, Glob, Grep, Bash, Task +agent: lead-programmer +--- + +## Phase 1: Load Target Files + +Read the target file(s) in full. Read CLAUDE.md for project coding standards. + +--- + +## Phase 2: Identify Engine Specialists + +Read `.claude/docs/technical-preferences.md`, section `## Engine Specialists`. Note: + +- The **Primary** specialist (used for architecture and broad engine concerns) +- The **Language/Code Specialist** (used when reviewing the project's primary language files) +- The **Shader Specialist** (used when reviewing shader files) +- The **UI Specialist** (used when reviewing UI code) + +If the section reads `[TO BE CONFIGURED]`, no engine is pinned — skip engine specialist steps. + +--- + +## Phase 3: ADR Compliance Check + +Search for ADR references in the story file, commit messages, and header comments. Look for patterns like `ADR-NNN` or `docs/architecture/ADR-`. + +If no ADR references found, note: "No ADR references found — skipping ADR compliance check." + +For each referenced ADR: read the file, extract the **Decision** and **Consequences** sections, then classify any deviation: + +- **ARCHITECTURAL VIOLATION** (BLOCKING): Uses a pattern explicitly rejected in the ADR +- **ADR DRIFT** (WARNING): Meaningfully diverges from the chosen approach without using a forbidden pattern +- **MINOR DEVIATION** (INFO): Small difference from ADR guidance that doesn't affect overall architecture + +--- + +## Phase 4: Standards Compliance + +Identify the system category (engine, gameplay, AI, networking, UI, tools) and evaluate: + +- [ ] Public methods and classes have doc comments +- [ ] Cyclomatic complexity under 10 per method +- [ ] No method exceeds 40 lines (excluding data declarations) +- [ ] Dependencies are injected (no static singletons for game state) +- [ ] Configuration values loaded from data files +- [ ] Systems expose interfaces (not concrete class dependencies) + +--- + +## Phase 5: Architecture and SOLID + +**Architecture:** +- [ ] Correct dependency direction (engine <- gameplay, not reverse) +- [ ] No circular dependencies between modules +- [ ] Proper layer separation (UI does not own game state) +- [ ] Events/signals used for cross-system communication +- [ ] Consistent with established patterns in the codebase + +**SOLID:** +- [ ] Single Responsibility: Each class has one reason to change +- [ ] Open/Closed: Extendable without modification +- [ ] Liskov Substitution: Subtypes substitutable for base types +- [ ] Interface Segregation: No fat interfaces +- [ ] Dependency Inversion: Depends on abstractions, not concretions + +--- + +## Phase 6: Game-Specific Concerns + +- [ ] Frame-rate independence (delta time usage) +- [ ] No allocations in hot paths (update loops) +- [ ] Proper null/empty state handling +- [ ] Thread safety where required +- [ ] Resource cleanup (no leaks) + +--- + +## Phase 7: Specialist Reviews (Parallel) + +Spawn all applicable specialists simultaneously via Task — do not wait for one before starting the next. + +### Engine Specialists + +If an engine is configured, determine which specialist applies to each file and spawn in parallel: + +- Primary language files (`.gd`, `.cs`, `.cpp`) → Language/Code Specialist +- Shader files (`.gdshader`, `.hlsl`, shader graph) → Shader Specialist +- UI screen/widget code → UI Specialist +- Cross-cutting or unclear → Primary Specialist + +Also spawn the **Primary Specialist** for any file touching engine architecture (scene structure, node hierarchy, lifecycle hooks). + +### QA Testability Review + +For Logic and Integration stories, also spawn `qa-tester` via Task in parallel with the engine specialists. Pass: +- The implementation files being reviewed +- The story's `## QA Test Cases` section (the pre-written test specs from qa-lead) +- The story's `## Acceptance Criteria` + +Ask the qa-tester to evaluate: +- [ ] Are all test hooks and interfaces exposed (not hidden behind private/internal access)? +- [ ] Do the QA test cases from the story's `## QA Test Cases` section map to testable code paths? +- [ ] Are any acceptance criteria untestable as implemented (e.g., hardcoded values, no seam for injection)? +- [ ] Does the implementation introduce any new edge cases not covered by the existing QA test cases? +- [ ] Are there any observable side effects that should have a test but don't? + +For Visual/Feel and UI stories: qa-tester reviews whether the manual verification steps in `## QA Test Cases` are achievable with the implementation as written — e.g., "is the state the manual checker needs to reach actually reachable?" + +Collect all specialist findings before producing output. + +--- + +## Phase 8: Output Review + +``` +## Code Review: [File/System Name] + +### Engine Specialist Findings: [N/A — no engine configured / CLEAN / ISSUES FOUND] +[Findings from engine specialist(s), or "No engine configured." if skipped] + +### Testability: [N/A — Visual/Feel or Config story / TESTABLE / GAPS / BLOCKING] +[qa-tester findings: test hooks, coverage gaps, untestable paths, new edge cases] +[If BLOCKING: implementation must expose [X] before tests in ## QA Test Cases can run] + +### ADR Compliance: [NO ADRS FOUND / COMPLIANT / DRIFT / VIOLATION] +[List each ADR checked, result, and any deviations with severity] + +### Standards Compliance: [X/6 passing] +[List failures with line references] + +### Architecture: [CLEAN / MINOR ISSUES / VIOLATIONS FOUND] +[List specific architectural concerns] + +### SOLID: [COMPLIANT / ISSUES FOUND] +[List specific violations] + +### Game-Specific Concerns +[List game development specific issues] + +### Positive Observations +[What is done well -- always include this section] + +### Required Changes +[Must-fix items before approval — ARCHITECTURAL VIOLATIONs always appear here] + +### Suggestions +[Nice-to-have improvements] + +### Verdict: [APPROVED / APPROVED WITH SUGGESTIONS / CHANGES REQUIRED] +``` + +This skill is read-only — no files are written. + +--- + +## Phase 9: Next Steps + +- If verdict is APPROVED: run `/story-done [story-path]` to close the story. +- If verdict is CHANGES REQUIRED: fix the issues and re-run `/code-review`. +- If an ARCHITECTURAL VIOLATION is found: run `/architecture-decision` to record the correct approach. diff --git a/.omc/skills/consistency-check/SKILL.md b/.omc/skills/consistency-check/SKILL.md new file mode 100644 index 0000000..a7f60a7 --- /dev/null +++ b/.omc/skills/consistency-check/SKILL.md @@ -0,0 +1,275 @@ +--- +name: consistency-check +description: "Scan all GDDs against the entity registry to detect cross-document inconsistencies: same entity with different stats, same item with different values, same formula with different variables. Grep-first approach — reads registry then targets only conflicting GDD sections rather than full document reads." +argument-hint: "[full | since-last-review | entity: | item:]" +user-invocable: true +allowed-tools: Read, Glob, Grep, Write, Edit, Bash +--- + +# Consistency Check + +Detects cross-document inconsistencies by comparing all GDDs against the +entity registry (`design/registry/entities.yaml`). Uses a grep-first approach: +reads the registry once, then targets only the GDD sections that mention +registered names — no full document reads unless a conflict needs investigation. + +**This skill is the write-time safety net.** It catches what `/design-system`'s +per-section checks may have missed and what `/review-all-gdds`'s holistic review +catches too late. + +**When to run:** +- After writing each new GDD (before moving to the next system) +- Before `/review-all-gdds` (so that skill starts with a clean baseline) +- Before `/create-architecture` (inconsistencies poison downstream ADRs) +- On demand: `/consistency-check entity:[name]` to check one entity specifically + +**Output:** Conflict report + optional registry corrections + +--- + +## Phase 1: Parse Arguments and Load Registry + +**Modes:** +- No argument / `full` — check all registered entries against all GDDs +- `since-last-review` — check only GDDs modified since the last review report +- `entity:` — check one specific entity across all GDDs +- `item:` — check one specific item across all GDDs + +**Load the registry:** + +``` +Read path="design/registry/entities.yaml" +``` + +If the file does not exist or has no entries: +> "Entity registry is empty. Run `/design-system` to write GDDs — the registry +> is populated automatically after each GDD is completed. Nothing to check yet." + +Stop and exit. + +Build four lookup tables from the registry: +- **entity_map**: `{ name → { source, attributes, referenced_by } }` +- **item_map**: `{ name → { source, value_gold, weight, ... } }` +- **formula_map**: `{ name → { source, variables, output_range } }` +- **constant_map**: `{ name → { source, value, unit } }` + +Count total registered entries. Report: +``` +Registry loaded: [N] entities, [N] items, [N] formulas, [N] constants +Scope: [full | since-last-review | entity:name] +``` + +--- + +## Phase 2: Locate In-Scope GDDs + +``` +Glob pattern="design/gdd/*.md" +``` + +Exclude: `game-concept.md`, `systems-index.md`, `game-pillars.md` — these are +not system GDDs. + +For `since-last-review` mode: +```bash +git log --name-only --pretty=format: -- design/gdd/ | grep "\.md$" | sort -u +``` +Limit to GDDs modified since the most recent `design/gdd/gdd-cross-review-*.md` +file's creation date. + +Report the in-scope GDD list before scanning. + +--- + +## Phase 3: Grep-First Conflict Scan + +For each registered entry, grep every in-scope GDD for the entry's name. +Do NOT do full reads — extract only the matching lines and their immediate +context (-C 3 lines). + +This is the core optimization: instead of reading 10 GDDs × 400 lines each +(4,000 lines), you grep 50 entity names × 10 GDDs (50 targeted searches, +each returning ~10 lines on a hit). + +### 3a: Entity Scan + +For each entity in entity_map: + +``` +Grep pattern="[entity_name]" glob="design/gdd/*.md" output_mode="content" -C 3 +``` + +For each GDD hit, extract the values mentioned near the entity name: +- any numeric attributes (counts, costs, durations, ranges, rates) +- any categorical attributes (types, tiers, categories) +- any derived values (totals, outputs, results) +- any other attributes registered in entity_map + +Compare extracted values against the registry entry. + +**Conflict detection:** +- Registry says `[entity_name].[attribute] = [value_A]`. GDD says `[entity_name] has [value_B]`. → **CONFLICT** +- Registry says `[item_name].[attribute] = [value_A]`. GDD says `[item_name] is [value_B]`. → **CONFLICT** +- GDD mentions `[entity_name]` but doesn't specify the attribute. → **NOTE** (no conflict, just unverifiable) + +### 3b: Item Scan + +For each item in item_map, grep all GDDs for the item name. Extract: +- sell price / value / gold value +- weight +- stack rules (stackable / non-stackable) +- category + +Compare against registry entry values. + +### 3c: Formula Scan + +For each formula in formula_map, grep all GDDs for the formula name. Extract: +- variable names mentioned near the formula +- output range or cap values mentioned + +Compare against registry entry: +- Different variable names → **CONFLICT** +- Output range stated differently → **CONFLICT** + +### 3d: Constant Scan + +For each constant in constant_map, grep all GDDs for the constant name. Extract: +- Any numeric value mentioned near the constant name + +Compare against registry value: +- Different number → **CONFLICT** + +--- + +## Phase 4: Deep Investigation (Conflicts Only) + +For each conflict found in Phase 3, do a targeted full-section read of the +conflicting GDD to get precise context: + +``` +Read path="design/gdd/[conflicting_gdd].md" +``` +(Or use Grep with wider context if the file is large) + +Confirm the conflict with full context. Determine: +1. **Which GDD is correct?** Check the `source:` field in the registry — the + source GDD is the authoritative owner. Any other GDD that contradicts it + is the one that needs updating. +2. **Is the registry itself out of date?** If the source GDD was updated after + the registry entry was written (check git log), the registry may be stale. +3. **Is this a genuine design change?** If the conflict represents an intentional + design decision, the resolution is: update the source GDD, update the registry, + then fix all other GDDs. + +For each conflict, classify: +- **🔴 CONFLICT** — same named entity/item/formula/constant with different values + in different GDDs. Must resolve before architecture begins. +- **⚠️ STALE REGISTRY** — source GDD value changed but registry not updated. + Registry needs updating; other GDDs may be correct already. +- **ℹ️ UNVERIFIABLE** — entity mentioned but no comparable attribute stated. + Not a conflict; just noting the reference. + +--- + +## Phase 5: Output Report + +``` +## Consistency Check Report +Date: [date] +Registry entries checked: [N entities, N items, N formulas, N constants] +GDDs scanned: [N] ([list names]) + +--- + +### Conflicts Found (must resolve before architecture) + +🔴 [Entity/Item/Formula/Constant Name] + Registry (source: [gdd]): [attribute] = [value] + Conflict in [other_gdd].md: [attribute] = [different_value] + → Resolution needed: [which doc to change and to what] + +--- + +### Stale Registry Entries (registry behind the GDD) + +⚠️ [Entry Name] + Registry says: [value] (written [date]) + Source GDD now says: [new value] + → Update registry entry to match source GDD, then check referenced_by docs. + +--- + +### Unverifiable References (no conflict, informational) + +ℹ️ [gdd].md mentions [entity_name] but states no comparable attributes. + No conflict detected. No action required. + +--- + +### Clean Entries (no issues found) + +✅ [N] registry entries verified across all GDDs with no conflicts. + +--- + +Verdict: PASS | CONFLICTS FOUND +``` + +**Verdict:** +- **PASS** — no conflicts. Registry and GDDs agree on all checked values. +- **CONFLICTS FOUND** — one or more conflicts detected. List resolution steps. + +--- + +## Phase 6: Registry Corrections + +If stale registry entries were found, ask: +> "May I update `design/registry/entities.yaml` to fix the [N] stale entries?" + +For each stale entry: +- Update the `value` / attribute field +- Set `revised:` to today's date +- Add a YAML comment with the old value: `# was: [old_value] before [date]` + +If new entries were found in GDDs that are not in the registry, ask: +> "Found [N] entities/items mentioned in GDDs that aren't in the registry yet. +> May I add them to `design/registry/entities.yaml`?" + +Only add entries that appear in more than one GDD (true cross-system facts). + +**Never delete registry entries.** Set `status: deprecated` if an entry is removed +from all GDDs. + +After writing: Verdict: **COMPLETE** — consistency check finished. +If conflicts remain unresolved: Verdict: **BLOCKED** — [N] conflicts need manual resolution before architecture begins. + +### 6b: Append to Reflexion Log + +If any 🔴 CONFLICT entries were found (regardless of whether they were resolved), +append an entry to `docs/consistency-failures.md` for each conflict: + +```markdown +### [YYYY-MM-DD] — /consistency-check — 🔴 CONFLICT +**Domain**: [system domain(s) involved] +**Documents involved**: [source GDD] vs [conflicting GDD] +**What happened**: [specific conflict — entity name, attribute, differing values] +**Resolution**: [how it was fixed, or "Unresolved — manual action needed"] +**Pattern**: [generalised lesson, e.g. "Item values defined in combat GDD were not +referenced in economy GDD before authoring — always check entities.yaml first"] +``` + +Only append if `docs/consistency-failures.md` exists. If the file is missing, +skip this step silently — do not create the file from this skill. + +--- + +## Next Steps + +- **If PASS**: Run `/review-all-gdds` for holistic design-theory review, or + `/create-architecture` if all MVP GDDs are complete. +- **If CONFLICTS FOUND**: Fix the flagged GDDs, then re-run + `/consistency-check` to confirm resolution. +- **If STALE REGISTRY**: Update the registry (Phase 6), then re-run to verify. +- Run `/consistency-check` after writing each new GDD to catch issues early, + not at architecture time. diff --git a/.omc/skills/content-audit/SKILL.md b/.omc/skills/content-audit/SKILL.md new file mode 100644 index 0000000..a62b4d8 --- /dev/null +++ b/.omc/skills/content-audit/SKILL.md @@ -0,0 +1,204 @@ +--- +name: content-audit +description: "Audit GDD-specified content counts against implemented content. Identifies what's planned vs built." +argument-hint: "[system-name | --summary | (no arg = full audit)]" +user-invocable: true +allowed-tools: Read, Glob, Grep, Write +agent: producer +--- + +When this skill is invoked: + +Parse the argument: +- No argument → full audit across all systems +- `[system-name]` → audit that single system only +- `--summary` → summary table only, no file write + +--- + +## Phase 1 — Context Gathering + +1. **Read `design/gdd/systems-index.md`** for the full list of systems, their + categories, and MVP/priority tier. + +2. **L0 pre-scan**: Before full-reading any GDDs, Grep all GDD files for + `## Summary` sections plus common content-count keywords: + ``` + Grep pattern="(## Summary|N enemies|N levels|N items|N abilities|enemy types|item types)" glob="design/gdd/*.md" output_mode="files_with_matches" + ``` + For a single-system audit: skip this step and go straight to full-read. + For a full audit: full-read only the GDDs that matched content-count keywords. + GDDs with no content-count language (pure mechanics GDDs) are noted as + "No auditable content counts" without a full read. + +3. **Full-read in-scope GDD files** (or the single system GDD if a system + name was given). + +4. **For each GDD, extract explicit content counts or lists.** Look for patterns + like: + - "N enemies" / "enemy types:" / list of named enemies + - "N levels" / "N areas" / "N maps" / "N stages" + - "N items" / "N weapons" / "N equipment pieces" + - "N abilities" / "N skills" / "N spells" + - "N dialogue scenes" / "N conversations" / "N cutscenes" + - "N quests" / "N missions" / "N objectives" + - Any explicit enumerated list (bullet list of named content pieces) + +4. **Build a content inventory table** from the extracted data: + + | System | Content Type | Specified Count/List | Source GDD | + |--------|-------------|---------------------|------------| + + Note: If a GDD describes content qualitatively but gives no count, record + "Unspecified" and flag it — unspecified counts are a design gap worth noting. + +--- + +## Phase 2 — Implementation Scan + +For each content type found in Phase 1, scan the relevant directories to count +what has been implemented. Use Glob and Grep to locate files. + +**Levels / Areas / Maps:** +- Glob `assets/**/*.tscn`, `assets/**/*.unity`, `assets/**/*.umap` +- Glob `src/**/*.tscn`, `src/**/*.unity` +- Look for scene files in subdirectories named `levels/`, `areas/`, `maps/`, + `worlds/`, `stages/` +- Count unique files that appear to be level/scene definitions (not UI scenes) + +**Enemies / Characters / NPCs:** +- Glob `assets/data/**/enemies/**`, `assets/data/**/characters/**` +- Glob `src/**/enemies/**`, `src/**/characters/**` +- Look for `.json`, `.tres`, `.asset`, `.yaml` data files defining entity stats +- Look for scene/prefab files in character subdirectories + +**Items / Equipment / Loot:** +- Glob `assets/data/**/items/**`, `assets/data/**/equipment/**`, + `assets/data/**/loot/**` +- Look for `.json`, `.tres`, `.asset` data files + +**Abilities / Skills / Spells:** +- Glob `assets/data/**/abilities/**`, `assets/data/**/skills/**`, + `assets/data/**/spells/**` +- Look for `.json`, `.tres`, `.asset` data files + +**Dialogue / Conversations / Cutscenes:** +- Glob `assets/**/*.dialogue`, `assets/**/*.csv`, `assets/**/*.ink` +- Grep for dialogue data files in `assets/data/` + +**Quests / Missions:** +- Glob `assets/data/**/quests/**`, `assets/data/**/missions/**` +- Look for `.json`, `.yaml` definition files + +**Engine-specific notes (acknowledge in the report):** +- Counts are approximations — the skill cannot perfectly parse every engine + format or distinguish editor-only files from shipped content +- Scene files may include both gameplay content and system/UI scenes; the scan + counts all matches and notes this caveat + +--- + +## Phase 3 — Gap Report + +Produce the gap table: + +``` +| System | Content Type | Specified | Found | Gap | Status | +|--------|-------------|-----------|-------|-----|--------| +``` + +**Status categories:** +- `COMPLETE` — Found ≥ Specified (100%+) +- `IN PROGRESS` — Found is 50–99% of Specified +- `EARLY` — Found is 1–49% of Specified +- `NOT STARTED` — Found is 0 + +**Priority flags:** +Flag a system as `HIGH PRIORITY` in the report if: +- Status is `NOT STARTED` or `EARLY`, AND +- The system is tagged MVP or Vertical Slice in the systems index, OR +- The systems index shows the system is blocking downstream systems + +**Summary line:** +- Total content items specified (sum of all Specified column values) +- Total content items found (sum of all Found column values) +- Overall gap percentage: `(Specified - Found) / Specified * 100` + +--- + +## Phase 4 — Output + +### Full audit and single-system modes + +Present the gap table and summary to the user. Ask: "May I write the full report to `docs/content-audit-[YYYY-MM-DD].md`?" + +If yes, write the file: + +```markdown +# Content Audit — [Date] + +## Summary +- **Total specified**: [N] content items across [M] systems +- **Total found**: [N] +- **Gap**: [N] items ([X%] unimplemented) +- **Scope**: [Full audit | System: name] + +> Note: Counts are approximations based on file scanning. +> The audit cannot distinguish shipped content from editor/test assets. +> Manual verification is recommended for any HIGH PRIORITY gaps. + +## Gap Table + +| System | Content Type | Specified | Found | Gap | Status | +|--------|-------------|-----------|-------|-----|--------| + +## HIGH PRIORITY Gaps + +[List systems flagged HIGH PRIORITY with rationale] + +## Per-System Breakdown + +### [System Name] +- **GDD**: `design/gdd/[file].md` +- **Content types audited**: [list] +- **Notes**: [any caveats about scan accuracy for this system] + +## Recommendation + +Focus implementation effort on: +1. [Highest-gap HIGH PRIORITY system] +2. [Second system] +3. [Third system] + +## Unspecified Content Counts + +The following GDDs describe content without giving explicit counts. +Consider adding counts to improve auditability: +[List of GDDs and content types with "Unspecified"] +``` + +After writing the report, ask: + +> "Would you like to create backlog stories for any of the content gaps?" + +If yes: for each system the user selects, suggest a story title and point them +to `/create-stories [epic-slug]` or `/quick-design` depending on the size of the gap. + +### --summary mode + +Print the Gap Table and Summary directly to conversation. Do not write a file. +End with: "Run `/content-audit` without `--summary` to write the full report." + +--- + +## Phase 5 — Next Steps + +After the audit, recommend the highest-value follow-up actions: + +- If any system is `NOT STARTED` and MVP-tagged → "Run `/design-system [name]` to + add missing content counts to the GDD before implementation begins." +- If total gap is >50% → "Run `/sprint-plan` to allocate content work across upcoming sprints." +- If backlog stories are needed → "Run `/create-stories [epic-slug]` for each HIGH PRIORITY gap." +- If `--summary` was used → "Run `/content-audit` (no flag) to write the full report to `docs/`." + +Verdict: **COMPLETE** — content audit finished. diff --git a/.omc/skills/create-architecture/SKILL.md b/.omc/skills/create-architecture/SKILL.md new file mode 100644 index 0000000..d33c581 --- /dev/null +++ b/.omc/skills/create-architecture/SKILL.md @@ -0,0 +1,402 @@ +--- +name: create-architecture +description: "Guided, section-by-section authoring of the master architecture document for the game. Reads all GDDs, the systems index, existing ADRs, and the engine reference library to produce a complete architecture blueprint before any code is written. Engine-version-aware: flags knowledge gaps and validates decisions against the pinned engine version." +argument-hint: "[focus-area: full | layers | data-flow | api-boundaries | adr-audit] [--review full|lean|solo]" +user-invocable: true +allowed-tools: Read, Glob, Grep, Write, Bash, AskUserQuestion, Task +agent: technical-director +--- + +# Create Architecture + +This skill produces `docs/architecture/architecture.md` — the master architecture +document that translates all approved GDDs into a concrete technical blueprint. +It sits between design and implementation, and must exist before sprint planning begins. + +**Distinct from `/architecture-decision`**: ADRs record individual point decisions. +This skill creates the whole-system blueprint that gives ADRs their context. + +Resolve the review mode (once, store for all gate spawns this run): +1. If `--review [full|lean|solo]` was passed → use that +2. Else read `production/review-mode.txt` → use that value +3. Else → default to `lean` + +See `.claude/docs/director-gates.md` for the full check pattern. + +**Argument modes:** +- **No argument / `full`**: Full guided walkthrough — all sections, start to finish +- **`layers`**: Focus on the system layer diagram only +- **`data-flow`**: Focus on data flow between modules only +- **`api-boundaries`**: Focus on API boundary definitions only +- **`adr-audit`**: Audit existing ADRs for engine compatibility gaps only + +--- + +## Phase 0: Load All Context + +Before anything else, load the full project context in this order: + +### 0a. Engine Context (Critical) + +Read the engine reference library completely: + +1. `docs/engine-reference/[engine]/VERSION.md` + → Extract: engine name, version, LLM cutoff, post-cutoff risk levels +2. `docs/engine-reference/[engine]/breaking-changes.md` + → Extract: all HIGH and MEDIUM risk changes +3. `docs/engine-reference/[engine]/deprecated-apis.md` + → Extract: APIs to avoid +4. `docs/engine-reference/[engine]/current-best-practices.md` + → Extract: post-cutoff best practices that differ from training data +5. All files in `docs/engine-reference/[engine]/modules/` + → Extract: current API patterns per domain + +If no engine is configured, stop and prompt: +> "No engine is configured. Run `/setup-engine` first. Architecture cannot be +> written without knowing which engine and version you are targeting." + +### 0b. Design Context + Technical Requirements Extraction + +Read all approved design documents and extract technical requirements from each: + +1. `design/gdd/game-concept.md` — game pillars, genre, core loop +2. `design/gdd/systems-index.md` — all systems, dependencies, priority tiers +3. `.claude/docs/technical-preferences.md` — naming conventions, performance budgets, + allowed libraries, forbidden patterns +4. **Every GDD in `design/gdd/`** — for each, extract technical requirements: + - Data structures implied by the game rules + - Performance constraints stated or implied + - Engine capabilities the system requires + - Cross-system communication patterns (what talks to what, how) + - State that must persist (save/load implications) + - Threading or timing requirements + +Build a **Technical Requirements Baseline** — a flat list of all extracted +requirements across all GDDs, numbered `TR-[gdd-slug]-[NNN]`. This is the +complete set of what the architecture must cover. Present it as: + +``` +## Technical Requirements Baseline +Extracted from [N] GDDs | [X] total requirements + +| Req ID | GDD | System | Requirement | Domain | +|--------|-----|--------|-------------|--------| +| TR-combat-001 | combat.md | Combat | Hitbox detection per-frame | Physics | +| TR-combat-002 | combat.md | Combat | Combo state machine | Core | +| TR-inventory-001 | inventory.md | Inventory | Item persistence | Save/Load | +``` + +This baseline feeds into every subsequent phase. No GDD requirement should be +left without an architectural decision to support it by the end of this session. + +### 0c. Existing Architecture Decisions + +Read all files in `docs/architecture/` to understand what has already been decided. +List any ADRs found and their domains. + +### 0d. Generate Knowledge Gap Inventory + +Before proceeding, display a structured summary: + +``` +## Engine Knowledge Gap Inventory +Engine: [name + version] +LLM Training Covers: up to approximately [version] +Post-Cutoff Versions: [list] + +### HIGH RISK Domains (must verify against engine reference before deciding) +- [Domain]: [Key changes] + +### MEDIUM RISK Domains (verify key APIs) +- [Domain]: [Key changes] + +### LOW RISK Domains (in training data, likely reliable) +- [Domain]: [no significant post-cutoff changes] + +### Systems from GDD that touch HIGH/MEDIUM risk domains: +- [GDD system name] → [domain] → [risk level] +``` + +Ask: "This inventory identifies [N] systems in HIGH RISK engine domains. Shall I +continue building the architecture with these warnings flagged throughout?" + +--- + +## Phase 1: System Layer Mapping + +Map every system from `systems-index.md` into an architecture layer. The standard +game architecture layers are: + +``` +┌─────────────────────────────────────────────┐ +│ PRESENTATION LAYER │ ← UI, HUD, menus, VFX, audio +├─────────────────────────────────────────────┤ +│ FEATURE LAYER │ ← gameplay systems, AI, quests +├─────────────────────────────────────────────┤ +│ CORE LAYER │ ← physics, input, combat, movement +├─────────────────────────────────────────────┤ +│ FOUNDATION LAYER │ ← engine integration, save/load, +│ │ scene management, event bus +├─────────────────────────────────────────────┤ +│ PLATFORM LAYER │ ← OS, hardware, engine API surface +└─────────────────────────────────────────────┘ +``` + +For each GDD system, ask: +- Which layer does it belong to? +- What are its module boundaries? +- What does it own exclusively? (data, state, behaviour) + +Present the proposed layer assignment and ask for approval before proceeding to +the next section. Write the approved layer map immediately to the skeleton file. + +**Engine awareness check**: For each system assigned to the Core and Foundation +layers, flag if it touches a HIGH or MEDIUM risk engine domain. Show the relevant +engine reference excerpt inline. + +--- + +## Phase 2: Module Ownership Map + +For each module defined in Phase 1, define ownership: + +- **Owns**: what data and state this module is solely responsible for +- **Exposes**: what other modules may read or call +- **Consumes**: what it reads from other modules +- **Engine APIs used**: which specific engine classes/nodes/signals this module + calls directly (with version and risk level noted) + +Format as a table per layer, then as an ASCII dependency diagram. + +**Engine awareness check**: For every engine API listed, verify against the +relevant module reference doc. If an API is post-cutoff, flag it: + +``` +⚠️ [ClassName.method()] — Godot 4.6 (post-cutoff, HIGH risk) + Verified against: docs/engine-reference/godot/modules/[domain].md + Behaviour confirmed: [yes / NEEDS VERIFICATION] +``` + +Get user approval on the ownership map before writing. + +--- + +## Phase 3: Data Flow + +Define how data moves between modules during key game scenarios. Cover at minimum: + +1. **Frame update path**: Input → Core systems → State → Rendering +2. **Event/signal path**: How systems communicate without tight coupling +3. **Save/load path**: What state is serialised, which module owns serialisation +4. **Initialisation order**: Which modules must boot before others + +Use ASCII sequence diagrams where helpful. For each data flow: +- Name the data being transferred +- Identify the producer and consumer +- State whether this is synchronous call, signal/event, or shared state +- Flag any data flows that cross thread boundaries + +Get user approval per scenario before writing. + +--- + +## Phase 4: API Boundaries + +Define the public contracts between modules. For each boundary: + +- What is the interface a module exposes to the rest of the system? +- What are the entry points (functions/signals/properties)? +- What invariants must callers respect? +- What must the module guarantee to callers? + +Write in pseudocode or the project's actual language (from technical preferences). +These become the contracts programmers implement against. + +**Engine awareness check**: If any interface uses engine-specific types (e.g. +`Node`, `Resource`, `Signal` in Godot), flag the version and verify the type +exists and has not changed signature in the target engine version. + +--- + +## Phase 5: ADR Audit + Traceability Check + +Review all existing ADRs from Phase 0c against both the architecture built in +Phases 1-4 AND the Technical Requirements Baseline from Phase 0b. + +### ADR Quality Check + +For each ADR: +- [ ] Does it have an Engine Compatibility section? +- [ ] Is the engine version recorded? +- [ ] Are post-cutoff APIs flagged? +- [ ] Does it have a "GDD Requirements Addressed" section? +- [ ] Does it conflict with the layer/ownership decisions made in this session? +- [ ] Is it still valid for the pinned engine version? + +| ADR | Engine Compat | Version | GDD Linkage | Conflicts | Valid | +|-----|--------------|---------|-------------|-----------|-------| +| ADR-0001: [title] | ✅/❌ | ✅/❌ | ✅/❌ | None/[conflict] | ✅/⚠️ | + +### Traceability Coverage Check + +Map every requirement from the Technical Requirements Baseline to existing ADRs. +For each requirement, check if any ADR's "GDD Requirements Addressed" section +or decision text covers it: + +| Req ID | Requirement | ADR Coverage | Status | +|--------|-------------|--------------|--------| +| TR-combat-001 | Hitbox detection per-frame | ADR-0003 | ✅ | +| TR-combat-002 | Combo state machine | — | ❌ GAP | + +Count: X covered, Y gaps. For each gap, it becomes a **Required New ADR**. + +### Required New ADRs + +List all decisions made during this architecture session (Phases 1-4) that do +not yet have a corresponding ADR, PLUS all uncovered Technical Requirements. +Group by layer — Foundation first: + +**Foundation Layer (must create before any coding):** +- `/architecture-decision [title]` → covers: TR-[id], TR-[id] + +**Core Layer:** +- `/architecture-decision [title]` → covers: TR-[id] + +--- + +## Phase 6: Missing ADR List + +Based on the full architecture, produce a complete list of ADRs that should exist +but don't yet. Group by priority: + +**Must have before coding starts (Foundation & Core decisions):** +- [e.g. "Scene management and scene loading strategy"] +- [e.g. "Event bus vs direct signal architecture"] + +**Should have before the relevant system is built:** +- [e.g. "Inventory serialisation format"] + +**Can defer to implementation:** +- [e.g. "Specific shader technique for water"] + +--- + +## Phase 7: Write the Master Architecture Document + +Once all sections are approved, write the complete document to +`docs/architecture/architecture.md`. + +Ask: "May I write the master architecture document to `docs/architecture/architecture.md`?" + +The document structure: + +```markdown +# [Game Name] — Master Architecture + +## Document Status +- Version: [N] +- Last Updated: [date] +- Engine: [name + version] +- GDDs Covered: [list] +- ADRs Referenced: [list] + +## Engine Knowledge Gap Summary +[Condensed from Phase 0d inventory — HIGH/MEDIUM risk domains and their implications] + +## System Layer Map +[From Phase 1] + +## Module Ownership +[From Phase 2] + +## Data Flow +[From Phase 3] + +## API Boundaries +[From Phase 4] + +## ADR Audit +[From Phase 5] + +## Required ADRs +[From Phase 6] + +## Architecture Principles +[3-5 key principles that govern all technical decisions for this project, +derived from the game concept, GDDs, and technical preferences] + +## Open Questions +[Decisions deferred — must be resolved before the relevant layer is built] +``` + +--- + +## Phase 7b: Technical Director Sign-Off + Lead Programmer Feasibility Review + +After writing the master architecture document, perform an explicit sign-off before handoff. + +**Step 1 — Technical Director self-review** (this skill runs as technical-director): + +Apply gate **TD-ARCHITECTURE** (`.claude/docs/director-gates.md`) as a self-review. Check all four criteria from that gate definition against the completed document. + +**Review mode check** — apply before spawning LP-FEASIBILITY: +- `solo` → skip. Note: "LP-FEASIBILITY skipped — Solo mode." Proceed to Phase 8 handoff. +- `lean` → skip (not a PHASE-GATE). Note: "LP-FEASIBILITY skipped — Lean mode." Proceed to Phase 8 handoff. +- `full` → spawn as normal. + +**Step 2 — Spawn `lead-programmer` via Task using gate LP-FEASIBILITY (`.claude/docs/director-gates.md`):** + +Pass: architecture document path, technical requirements baseline summary, ADR list. + +**Step 3 — Present both assessments to the user:** + +Show the Technical Director assessment and Lead Programmer verdict side by side. + +Use `AskUserQuestion` — "Technical Director and Lead Programmer have reviewed the architecture. How would you like to proceed?" +Options: `Accept — proceed to handoff` / `Revise flagged items first` / `Discuss specific concerns` + +**Step 4 — Record sign-off in the architecture document:** + +Update the Document Status section: +``` +- Technical Director Sign-Off: [date] — APPROVED / APPROVED WITH CONDITIONS +- Lead Programmer Feasibility: FEASIBLE / CONCERNS ACCEPTED / REVISED +``` + +Ask: "May I update the Document Status section in `docs/architecture/architecture.md` with the sign-off?" + +--- + +## Phase 8: Handoff + +After writing the document, provide a clear handoff: + +1. **Run these ADRs next** (from Phase 6, prioritised): list the top 3 +2. **Gate check**: "The master architecture document is complete. Run `/gate-check + pre-production` when all required ADRs are also written." +3. **Update session state**: Write a summary to `production/session-state/active.md` + +--- + +## Collaborative Protocol + +This skill follows the collaborative design principle at every phase: + +1. **Load context silently** — do not narrate file reads +2. **Present findings** — show the knowledge gap inventory and layer proposals +3. **Ask before deciding** — present options for each architectural choice +4. **Get approval before writing** — each phase section is written only after + user approves the content +5. **Incremental writing** — write each approved section immediately; do not + accumulate everything and write at the end. This survives session crashes. + +Never make a binding architectural decision without user input. If the user is +unsure, present 2-4 options with pros/cons before asking them to decide. + +--- + +## Recommended Next Steps + +- Run `/architecture-decision [title]` for each required ADR listed in Phase 6 — Foundation layer ADRs first +- Run `/create-control-manifest` once the required ADRs are written to produce the layer rules manifest +- Run `/gate-check pre-production` when all required ADRs are written and the architecture is signed off diff --git a/.omc/skills/create-control-manifest/SKILL.md b/.omc/skills/create-control-manifest/SKILL.md new file mode 100644 index 0000000..a3c7881 --- /dev/null +++ b/.omc/skills/create-control-manifest/SKILL.md @@ -0,0 +1,276 @@ +--- +name: create-control-manifest +description: "After architecture is complete, produces a flat actionable rules sheet for programmers — what you must do, what you must never do, per system and per layer. Extracted from all Accepted ADRs, technical preferences, and engine reference docs. More immediately actionable than ADRs (which explain why)." +argument-hint: "[update — regenerate from current ADRs]" +user-invocable: true +allowed-tools: Read, Glob, Grep, Write, Task +agent: technical-director +--- + +# Create Control Manifest + +The Control Manifest is a flat, actionable rules sheet for programmers. It +answers "what do I do?" and "what must I never do?" — organized by architectural +layer, extracted from all Accepted ADRs, technical preferences, and engine +reference docs. Where ADRs explain *why*, the manifest tells you *what*. + +**Output:** `docs/architecture/control-manifest.md` + +**When to run:** After `/architecture-review` passes and ADRs are in Accepted +status. Re-run whenever new ADRs are accepted or existing ADRs are revised. + +--- + +## 1. Load All Inputs + +### ADRs +- Glob `docs/architecture/adr-*.md` and read every file +- Filter to only Accepted ADRs (Status: Accepted) — skip Proposed, Deprecated, + Superseded +- Note the ADR number and title for every rule sourced + +### Technical Preferences +- Read `.claude/docs/technical-preferences.md` +- Extract: naming conventions, performance budgets, approved libraries/addons, + forbidden patterns + +### Engine Reference +- Read `docs/engine-reference/[engine]/VERSION.md` for engine + version +- Read `docs/engine-reference/[engine]/deprecated-apis.md` — these become + forbidden API entries +- Read `docs/engine-reference/[engine]/current-best-practices.md` if it exists + +Report: "Loaded [N] Accepted ADRs, engine: [name + version]." + +--- + +## 2. Extract Rules from Each ADR + +For each Accepted ADR, extract: + +### Required Patterns (from "Implementation Guidelines" section) +- Every "must", "should", "required to", "always" statement +- Every specific pattern or approach mandated + +### Forbidden Approaches (from "Alternatives Considered" sections) +- Every alternative that was explicitly rejected — *why* it was rejected becomes + the rule ("never use X because Y") +- Any anti-patterns explicitly called out + +### Performance Guardrails (from "Performance Implications" section) +- Budget constraints: "max N ms per frame for this system" +- Memory limits: "this system must not exceed N MB" + +### Engine API Constraints (from "Engine Compatibility" section) +- Post-cutoff APIs that require verification +- Verified behaviours that differ from default LLM assumptions +- API fields or methods that behave differently in the pinned engine version + +### Layer Classification +Classify each rule by the architectural layer of the system it governs: +- **Foundation**: Scene management, event architecture, save/load, engine init +- **Core**: Core gameplay loops, main player systems, physics/collision +- **Feature**: Secondary systems, secondary mechanics, AI +- **Presentation**: Rendering, audio, UI, VFX, shaders + +If an ADR spans multiple layers, duplicate the rule into each relevant layer. + +--- + +## 3. Add Global Rules + +Combine rules that apply to all layers: + +### From technical-preferences.md: +- Naming conventions (classes, variables, signals/events, files, constants) +- Performance budgets (target framerate, frame budget, draw call limits, memory ceiling) + +### From deprecated-apis.md: +- All deprecated APIs → Forbidden API entries + +### From current-best-practices.md (if available): +- Engine-recommended patterns → Required entries + +### From technical-preferences.md forbidden patterns: +- Copy any "Forbidden Patterns" entries directly + +--- + +## 4. Present Rules Summary Before Writing + +Before writing the manifest, present a summary to the user: + +``` +## Control Manifest Preview +Engine: [name + version] +ADRs covered: [list ADR numbers] +Total rules extracted: + - Foundation layer: [N] required, [M] forbidden, [P] guardrails + - Core layer: [N] required, [M] forbidden, [P] guardrails + - Feature layer: ... + - Presentation layer: ... + - Global: [N] naming conventions, [M] forbidden APIs, [P] approved libraries +``` + +Ask: "Does this look complete? Any rules to add or remove before I write the manifest?" + +--- + +## 4b. Director Gate — Technical Review + +**Review mode check** — apply before spawning TD-MANIFEST: +- `solo` → skip. Note: "TD-MANIFEST skipped — Solo mode." Proceed to Phase 5. +- `lean` → skip. Note: "TD-MANIFEST skipped — Lean mode." Proceed to Phase 5. +- `full` → spawn as normal. + +Spawn `technical-director` via Task using gate **TD-MANIFEST** (`.claude/docs/director-gates.md`). + +Pass: the Control Manifest Preview from Phase 4 (rule counts per layer, full extracted rule list), the list of ADRs covered, engine version, and any rules sourced from technical-preferences.md or engine reference docs. + +The technical-director reviews whether: +- All mandatory ADR patterns are captured and accurately stated +- Forbidden approaches are complete and correctly attributed +- No rules were added that lack a source ADR or preference document +- Performance guardrails are consistent with the ADR constraints + +Apply the verdict: +- **APPROVE** → proceed to Phase 5 +- **CONCERNS** → surface via `AskUserQuestion` with options: `Revise flagged rules` / `Accept and proceed` / `Discuss further` +- **REJECT** → do not write the manifest; fix the flagged rules and re-present the summary + +--- + +## 5. Write the Control Manifest + +Ask: "May I write this to `docs/architecture/control-manifest.md`?" + +Format: + +```markdown +# Control Manifest + +> **Engine**: [name + version] +> **Last Updated**: [date] +> **Manifest Version**: [date] +> **ADRs Covered**: [ADR-NNNN, ADR-MMMM, ...] +> **Status**: [Active — regenerate with `/create-control-manifest update` when ADRs change] + +`Manifest Version` is the date this manifest was generated. Story files embed +this date when created. `/story-readiness` compares a story's embedded version +to this field to detect stories written against stale rules. Always matches +`Last Updated` — they are the same date, serving different consumers. + +This manifest is a programmer's quick-reference extracted from all Accepted ADRs, +technical preferences, and engine reference docs. For the reasoning behind each +rule, see the referenced ADR. + +--- + +## Foundation Layer Rules + +*Applies to: scene management, event architecture, save/load, engine initialisation* + +### Required Patterns +- **[rule]** — source: [ADR-NNNN] +- **[rule]** — source: [ADR-NNNN] + +### Forbidden Approaches +- **Never [anti-pattern]** — [brief reason] — source: [ADR-NNNN] + +### Performance Guardrails +- **[system]**: max [N]ms/frame — source: [ADR-NNNN] + +--- + +## Core Layer Rules + +*Applies to: core gameplay loop, main player systems, physics, collision* + +### Required Patterns +... + +### Forbidden Approaches +... + +### Performance Guardrails +... + +--- + +## Feature Layer Rules + +*Applies to: secondary mechanics, AI systems, secondary features* + +### Required Patterns +... + +### Forbidden Approaches +... + +--- + +## Presentation Layer Rules + +*Applies to: rendering, audio, UI, VFX, shaders, animations* + +### Required Patterns +... + +### Forbidden Approaches +... + +--- + +## Global Rules (All Layers) + +### Naming Conventions +| Element | Convention | Example | +|---------|-----------|---------| +| Classes | [from technical-preferences] | [example] | +| Variables | [from technical-preferences] | [example] | +| Signals/Events | [from technical-preferences] | [example] | +| Files | [from technical-preferences] | [example] | +| Constants | [from technical-preferences] | [example] | + +### Performance Budgets +| Target | Value | +|--------|-------| +| Framerate | [from technical-preferences] | +| Frame budget | [from technical-preferences] | +| Draw calls | [from technical-preferences] | +| Memory ceiling | [from technical-preferences] | + +### Approved Libraries / Addons +- [library] — approved for [purpose] + +### Forbidden APIs ([engine version]) +These APIs are deprecated or unverified for [engine + version]: +- `[api name]` — deprecated since [version] / unverified post-cutoff +- Source: `docs/engine-reference/[engine]/deprecated-apis.md` + +### Cross-Cutting Constraints +- [constraint that applies everywhere, regardless of layer] +``` + +--- + +## 6. Suggest Next Steps + +After writing the manifest: + +- If epics/stories don't exist yet: "Run `/create-epics layer: foundation` then `/create-stories [epic-slug]` — programmers + can now use this manifest when writing story implementation notes." +- If this is a regeneration (manifest already existed): "Updated. Recommend + notifying the team of changed rules — especially any new Forbidden entries." + +--- + +## Collaborative Protocol + +1. **Load silently** — read all inputs before presenting anything +2. **Show the summary first** — let the user see the scope before writing +3. **Ask before writing** — always confirm before creating or overwriting the manifest. On write: Verdict: **COMPLETE** — control manifest written. On decline: Verdict: **BLOCKED** — user declined write. +4. **Source every rule** — never add a rule that doesn't trace to an ADR, a + technical preference, or an engine reference doc +5. **No interpretation** — extract rules as stated in ADRs; do not paraphrase + in ways that change meaning diff --git a/.omc/skills/create-epics/SKILL.md b/.omc/skills/create-epics/SKILL.md new file mode 100644 index 0000000..662a04a --- /dev/null +++ b/.omc/skills/create-epics/SKILL.md @@ -0,0 +1,225 @@ +--- +name: create-epics +description: "Translate approved GDDs + architecture into epics — one epic per architectural module. Defines scope, governing ADRs, engine risk, and untraced requirements. Does NOT break into stories — run /create-stories [epic-slug] after each epic is created." +argument-hint: "[system-name | layer: foundation|core|feature|presentation | all] [--review full|lean|solo]" +user-invocable: true +allowed-tools: Read, Glob, Grep, Write, Task, AskUserQuestion +agent: technical-director +--- + +# Create Epics + +An epic is a named, bounded body of work that maps to one architectural module. +It defines **what** needs to be built and **who owns it architecturally**. It +does not prescribe implementation steps — that is the job of stories. + +**Run this skill once per layer** as you approach that layer in development. +Do not create Feature layer epics until Core is nearly complete — the design +will have changed. + +**Output:** `production/epics/[epic-slug]/EPIC.md` + `production/epics/index.md` + +**Next step after each epic:** `/create-stories [epic-slug]` + +**When to run:** After `/create-control-manifest` and `/architecture-review` pass. + +--- + +## 1. Parse Arguments + +Resolve the review mode (once, store for all gate spawns this run): +1. If `--review [full|lean|solo]` was passed → use that +2. Else read `production/review-mode.txt` → use that value +3. Else → default to `lean` + +See `.claude/docs/director-gates.md` for the full check pattern. + +**Modes:** +- `/create-epics all` — process all systems in layer order +- `/create-epics layer: foundation` — Foundation layer only +- `/create-epics layer: core` — Core layer only +- `/create-epics layer: feature` — Feature layer only +- `/create-epics layer: presentation` — Presentation layer only +- `/create-epics [system-name]` — one specific system +- No argument — ask: "Which layer or system would you like to create epics for?" + +--- + +## 2. Load Inputs + +### Step 2a — Summary scan (fast) + +Grep all GDDs for their `## Summary` sections before reading anything fully: + +``` +Grep pattern="## Summary" glob="design/gdd/*.md" output_mode="content" -A 5 +``` + +For `layer:` or `[system-name]` modes: filter to only in-scope GDDs based on +the Summary quick-reference. Skip full-reading anything out of scope. + +### Step 2b — Full document load (in-scope systems only) + +Using the Step 2a grep results, identify which systems are in scope. Read full documents **only for in-scope systems** — do not read GDDs or ADRs for out-of-scope systems or layers. + +Read for in-scope systems: + +- `design/gdd/systems-index.md` — authoritative system list, layers, priority +- In-scope GDDs only (Approved or Designed status, filtered by Step 2a results) +- `docs/architecture/architecture.md` — module ownership and API boundaries +- Accepted ADRs **whose domains cover in-scope systems only** — read the "GDD Requirements Addressed", "Decision", and "Engine Compatibility" sections; skip ADRs for unrelated domains +- `docs/architecture/control-manifest.md` — manifest version date from header +- `docs/architecture/tr-registry.yaml` — for tracing requirements to ADR coverage +- `docs/engine-reference/[engine]/VERSION.md` — engine name, version, risk levels + +Report: "Loaded [N] GDDs, [M] ADRs, engine: [name + version]." + +--- + +## 3. Processing Order + +Process in dependency-safe layer order: +1. **Foundation** (no dependencies) +2. **Core** (depends on Foundation) +3. **Feature** (depends on Core) +4. **Presentation** (depends on Feature + Core) + +Within each layer, use the order from `systems-index.md`. + +--- + +## 4. Define Each Epic + +For each system, map it to an architectural module from `architecture.md`. + +Check ADR coverage against the TR registry: +- **Traced requirements**: TR-IDs that have an Accepted ADR covering them +- **Untraced requirements**: TR-IDs with no ADR — warn before proceeding + +Present to user before writing anything: + +``` +## Epic: [System Name] + +**Layer**: [Foundation / Core / Feature / Presentation] +**GDD**: design/gdd/[filename].md +**Architecture Module**: [module name from architecture.md] +**Governing ADRs**: [ADR-NNNN, ADR-MMMM] +**Engine Risk**: [LOW / MEDIUM / HIGH — highest risk among governing ADRs] +**GDD Requirements Covered by ADRs**: [N / total] +**Untraced Requirements**: [list TR-IDs with no ADR, or "None"] +``` + +If there are untraced requirements: +> "⚠️ [N] requirements in [system] have no ADR. The epic can be created, but +> stories for these requirements will be marked Blocked until ADRs exist. +> Run `/architecture-decision` first, or proceed with placeholders." + +Ask: "Shall I create Epic: [name]?" +Options: "Yes, create it", "Skip", "Pause — I need to write ADRs first" + +--- + +## 4b. Producer Epic Structure Gate + +**Review mode check** — apply before spawning PR-EPIC: +- `solo` → skip. Note: "PR-EPIC skipped — Solo mode." Proceed to Step 5 (write epic files). +- `lean` → skip (not a PHASE-GATE). Note: "PR-EPIC skipped — Lean mode." Proceed to Step 5 (write epic files). +- `full` → spawn as normal. + +After all epics for the current layer are defined (Step 4 completed for all in-scope systems), and before writing any files, spawn `producer` via Task using gate **PR-EPIC** (`.claude/docs/director-gates.md`). + +Pass: the full epic structure summary (all epics, their scope summaries, governing ADR counts), the layer being processed, milestone timeline and team capacity. + +Present the producer's assessment. If UNREALISTIC, offer to revise epic boundaries (split overscoped or merge underscoped epics) before writing. If CONCERNS, surface them and let the user decide. Do not write epic files until the producer gate resolves. + +--- + +## 5. Write Epic Files + +After approval, ask: "May I write the epic file to `production/epics/[epic-slug]/EPIC.md`?" + +After user confirms, write: + +### `production/epics/[epic-slug]/EPIC.md` + +```markdown +# Epic: [System Name] + +> **Layer**: [Foundation / Core / Feature / Presentation] +> **GDD**: design/gdd/[filename].md +> **Architecture Module**: [module name] +> **Status**: Ready +> **Stories**: Not yet created — run `/create-stories [epic-slug]` + +## Overview + +[1 paragraph describing what this epic implements, derived from the GDD Overview +and the architecture module's stated responsibilities] + +## Governing ADRs + +| ADR | Decision Summary | Engine Risk | +|-----|-----------------|-------------| +| ADR-NNNN: [title] | [1-line summary] | LOW/MEDIUM/HIGH | + +## GDD Requirements + +| TR-ID | Requirement | ADR Coverage | +|-------|-------------|--------------| +| TR-[system]-001 | [requirement text from registry] | ADR-NNNN ✅ | +| TR-[system]-002 | [requirement text] | ❌ No ADR | + +## Definition of Done + +This epic is complete when: +- All stories are implemented, reviewed, and closed via `/story-done` +- All acceptance criteria from `design/gdd/[filename].md` are verified +- All Logic and Integration stories have passing test files in `tests/` +- All Visual/Feel and UI stories have evidence docs with sign-off in `production/qa/evidence/` + +## Next Step + +Run `/create-stories [epic-slug]` to break this epic into implementable stories. +``` + +### Update `production/epics/index.md` + +Create or update the master index: + +```markdown +# Epics Index + +Last Updated: [date] +Engine: [name + version] + +| Epic | Layer | System | GDD | Stories | Status | +|------|-------|--------|-----|---------|--------| +| [name] | Foundation | [system] | [file] | Not yet created | Ready | +``` + +--- + +## 6. Gate-Check Reminder + +After writing all epics for the requested scope: + +- **Foundation + Core complete**: These are required for the Pre-Production → + Production gate. Run `/gate-check production` to check readiness. +- **Reminder**: Epics define scope. Stories define implementation steps. Run + `/create-stories [epic-slug]` for each epic before developers can pick up work. + +--- + +## Collaborative Protocol + +1. **One epic at a time** — present each epic definition before asking to create it +2. **Warn on gaps** — flag untraced requirements before proceeding +3. **Ask before writing** — per-epic approval before writing any file +4. **No invention** — all content comes from GDDs, ADRs, and architecture docs +5. **Never create stories** — this skill stops at the epic level + +After all requested epics are processed: + +- **Verdict: COMPLETE** — [N] epic(s) written. Run `/create-stories [epic-slug]` per epic. +- **Verdict: BLOCKED** — user declined all epics, or no eligible systems found. diff --git a/.omc/skills/create-stories/SKILL.md b/.omc/skills/create-stories/SKILL.md new file mode 100644 index 0000000..ba39446 --- /dev/null +++ b/.omc/skills/create-stories/SKILL.md @@ -0,0 +1,313 @@ +--- +name: create-stories +description: "Break a single epic into implementable story files. Reads the epic, its GDD, governing ADRs, and control manifest. Each story embeds its GDD requirement TR-ID, ADR guidance, acceptance criteria, story type, and test evidence path. Run after /create-epics for each epic." +argument-hint: "[epic-slug | epic-path] [--review full|lean|solo]" +user-invocable: true +allowed-tools: Read, Glob, Grep, Write, Task, AskUserQuestion +agent: lead-programmer +--- + +# Create Stories + +A story is a single implementable behaviour — small enough to complete in one +focused session, self-contained, and fully traceable to a GDD requirement and +an ADR decision. Stories are what developers pick up. Epics are what architects +define. + +**Run this skill per epic**, not per layer. Run it for Foundation epics first, +then Core, and so on — matching the dependency order. + +**Output:** `production/epics/[epic-slug]/story-NNN-[slug].md` files + +**Previous step:** `/create-epics [system]` +**Next step after stories exist:** `/story-readiness [story-path]` then `/dev-story [story-path]` + +--- + +## 1. Parse Argument + +Extract `--review [full|lean|solo]` if present and store as the review mode +override for this run. If not provided, read `production/review-mode.txt` +(default `full` if missing). This resolved mode applies to all gate spawns +in this skill — apply the check pattern from `.claude/docs/director-gates.md` +before every gate invocation. + +- `/create-stories [epic-slug]` — e.g. `/create-stories combat` +- `/create-stories production/epics/combat/EPIC.md` — full path also accepted +- No argument — ask: "Which epic would you like to break into stories?" + Glob `production/epics/*/EPIC.md` and list available epics with their status. + +--- + +## 2. Load Everything for This Epic + +Read in full: + +- `production/epics/[epic-slug]/EPIC.md` — epic overview, governing ADRs, GDD requirements table +- The epic's GDD (`design/gdd/[filename].md`) — read all 8 sections, especially Acceptance Criteria, Formulas, and Edge Cases +- All governing ADRs listed in the epic — read the Decision, Implementation Guidelines, Engine Compatibility, and Engine Notes sections +- `docs/architecture/control-manifest.md` — extract rules for this epic's layer; note the Manifest Version date from the header +- `docs/architecture/tr-registry.yaml` — load all TR-IDs for this system + +**ADR existence validation**: After reading the governing ADRs list from the epic, confirm each ADR file exists on disk. If any ADR file cannot be found, **stop immediately** before decomposing any story: + +> "Epic references [ADR-NNNN: title] but `docs/architecture/[adr-file].md` was not found. +> Check the filename in the epic's Governing ADRs list, or run `/architecture-decision` +> to create it. Cannot create stories until all referenced ADR files are present." + +Do not proceed to Step 3 until all referenced ADR files are confirmed present. + +Report: "Loaded epic [name], GDD [filename], [N] governing ADRs (all confirmed present), control manifest v[date]." + +--- + +## 3. Classify Stories by Type + +**Story Type Classification** — assign each story a type based on its acceptance criteria: + +| Story Type | Assign when criteria reference... | +|---|---| +| **Logic** | Formulas, numerical thresholds, state transitions, AI decisions, calculations | +| **Integration** | Two or more systems interacting, signals crossing boundaries, save/load round-trips | +| **Visual/Feel** | Animation behaviour, VFX, "feels responsive", timing, screen shake, audio sync | +| **UI** | Menus, HUD elements, buttons, screens, dialogue boxes, tooltips | +| **Config/Data** | Balance tuning values, data file changes only — no new code logic | + +Mixed stories: assign the type that carries the highest implementation risk. +The type determines what test evidence is required before `/story-done` can close the story. + +--- + +## 4. Decompose the GDD into Stories + +For each GDD acceptance criterion: + +1. Group related criteria that require the same core implementation +2. Each group = one story +3. Order stories: foundational behaviour first, edge cases last, UI last + +**Story sizing rule:** one story = one focused session (~2-4 hours). If a +group of criteria would take longer, split into two stories. + +For each story, determine: +- **GDD requirement**: which acceptance criterion(ia) does this satisfy? +- **TR-ID**: look up in `tr-registry.yaml`. Use the stable ID. If no match, use `TR-[system]-???` and warn. +- **Governing ADR**: which ADR governs how to implement this? + - `Status: Accepted` → embed normally + - `Status: Proposed` → set story `Status: Blocked` with note: "BLOCKED: ADR-NNNN is Proposed — run `/architecture-decision` to advance it" +- **Story Type**: from Step 3 classification +- **Engine risk**: from the ADR's Knowledge Risk field + +--- + +## 4b. QA Lead Story Readiness Gate + +**Review mode check** — apply before spawning QL-STORY-READY: +- `solo` → skip. Note: "QL-STORY-READY skipped — Solo mode." Proceed to Step 5 (present stories for review). +- `lean` → skip (not a PHASE-GATE). Note: "QL-STORY-READY skipped — Lean mode." Proceed to Step 5 (present stories for review). +- `full` → spawn as normal. + +After decomposing all stories (Step 4 complete) but before presenting them for write approval, spawn `qa-lead` via Task using gate **QL-STORY-READY** (`.claude/docs/director-gates.md`). + +Pass: the full story list with acceptance criteria, story types, and TR-IDs; the epic's GDD acceptance criteria for reference. + +Present the QA lead's assessment. For each story flagged as GAPS or INADEQUATE, revise the acceptance criteria before proceeding — stories with untestable criteria cannot be implemented correctly. Once all stories reach ADEQUATE, proceed. + +**After ADEQUATE**: for every Logic and Integration story, ask the qa-lead to produce concrete test case specifications — one per acceptance criterion — in this format: + +``` +Test: [criterion text] + Given: [precondition] + When: [action] + Then: [expected result / assertion] + Edge cases: [boundary values or failure states to test] +``` + +For Visual/Feel and UI stories, produce manual verification steps instead: +``` +Manual check: [criterion text] + Setup: [how to reach the state] + Verify: [what to look for] + Pass condition: [unambiguous pass description] +``` + +These test case specs are embedded directly into each story's `## QA Test Cases` section. The developer implements against these cases. The programmer does not write tests from scratch — QA has already defined what "done" looks like. + +--- + +## 5. Present Stories for Review + +Before writing any files, present the full story list: + +``` +## Stories for Epic: [name] + +Story 001: [title] — Logic — ADR-NNNN + Covers: TR-[system]-001 ([1-line summary of requirement]) + Test required: tests/unit/[system]/[slug]_test.[ext] + +Story 002: [title] — Integration — ADR-MMMM + Covers: TR-[system]-002, TR-[system]-003 + Test required: tests/integration/[system]/[slug]_test.[ext] + +Story 003: [title] — Visual/Feel — ADR-NNNN + Covers: TR-[system]-004 + Evidence required: production/qa/evidence/[slug]-evidence.md + +[N stories total: N Logic, N Integration, N Visual/Feel, N UI, N Config/Data] +``` + +Use `AskUserQuestion`: +- Prompt: "May I write these [N] stories to `production/epics/[epic-slug]/`?" +- Options: `[A] Yes — write all [N] stories` / `[B] Not yet — I want to review or adjust first` + +--- + +## 6. Write Story Files + +For each story, write `production/epics/[epic-slug]/story-[NNN]-[slug].md`: + +```markdown +# Story [NNN]: [title] + +> **Epic**: [epic name] +> **Status**: Ready +> **Layer**: [Foundation / Core / Feature / Presentation] +> **Type**: [Logic | Integration | Visual/Feel | UI | Config/Data] +> **Manifest Version**: [date from control-manifest.md header] + +## Context + +**GDD**: `design/gdd/[filename].md` +**Requirement**: `TR-[system]-NNN` +*(Requirement text lives in `docs/architecture/tr-registry.yaml` — read fresh at review time)* + +**ADR Governing Implementation**: [ADR-NNNN: title] +**ADR Decision Summary**: [1-2 sentence summary of what the ADR decided] + +**Engine**: [name + version] | **Risk**: [LOW / MEDIUM / HIGH] +**Engine Notes**: [from ADR Engine Compatibility section — post-cutoff APIs, verification required] + +**Control Manifest Rules (this layer)**: +- Required: [relevant required pattern] +- Forbidden: [relevant forbidden pattern] +- Guardrail: [relevant performance guardrail] + +--- + +## Acceptance Criteria + +*From GDD `design/gdd/[filename].md`, scoped to this story:* + +- [ ] [criterion 1 — directly from GDD] +- [ ] [criterion 2] +- [ ] [performance criterion if applicable] + +--- + +## Implementation Notes + +*Derived from ADR-NNNN Implementation Guidelines:* + +[Specific, actionable guidance from the ADR. Do not paraphrase in ways that +change meaning. This is what the programmer reads instead of the ADR.] + +--- + +## Out of Scope + +*Handled by neighbouring stories — do not implement here:* + +- [Story NNN+1]: [what it handles] + +--- + +## QA Test Cases + +*Written by qa-lead at story creation. The developer implements against these — do not invent new test cases during implementation.* + +**[For Logic / Integration stories — automated test specs]:** + +- **AC-1**: [criterion text] + - Given: [precondition] + - When: [action] + - Then: [assertion] + - Edge cases: [boundary values / failure states] + +**[For Visual/Feel / UI stories — manual verification steps]:** + +- **AC-1**: [criterion text] + - Setup: [how to reach the state] + - Verify: [what to look for] + - Pass condition: [unambiguous pass description] + +--- + +## Test Evidence + +**Story Type**: [type] +**Required evidence**: +- Logic: `tests/unit/[system]/[story-slug]_test.[ext]` — must exist and pass +- Integration: `tests/integration/[system]/[story-slug]_test.[ext]` OR playtest doc +- Visual/Feel: `production/qa/evidence/[story-slug]-evidence.md` + sign-off +- UI: `production/qa/evidence/[story-slug]-evidence.md` or interaction test +- Config/Data: smoke check pass (`production/qa/smoke-*.md`) + +**Status**: [ ] Not yet created + +--- + +## Dependencies + +- Depends on: [Story NNN-1 must be DONE, or "None"] +- Unlocks: [Story NNN+1, or "None"] +``` + +### Also update `production/epics/[epic-slug]/EPIC.md` + +Replace the "Stories: Not yet created" line with a populated table: + +```markdown +## Stories + +| # | Story | Type | Status | ADR | +|---|-------|------|--------|-----| +| 001 | [title] | Logic | Ready | ADR-NNNN | +| 002 | [title] | Integration | Ready | ADR-MMMM | +``` + +--- + +## 7. After Writing + +Use `AskUserQuestion` to close with context-aware next steps: + +Check: +- Are there other epics in `production/epics/` without stories yet? List them. +- Is this the last epic? If so, include `/sprint-plan` as an option. + +Widget: +- Prompt: "[N] stories written to `production/epics/[epic-slug]/`. What next?" +- Options (include all that apply): + - `[A] Start implementing — run /story-readiness [first-story-path]` (Recommended) + - `[B] Create stories for [next-epic-slug] — run /create-stories [slug]` (only if other epics have no stories yet) + - `[C] Plan the sprint — run /sprint-plan` (only if all epics have stories) + - `[D] Stop here for this session` + +Note in output: "Work through stories in order — each story's `Depends on:` field tells you what must be DONE before you can start it." + +--- + +## Collaborative Protocol + +1. **Read before presenting** — load all inputs silently before showing the story list +2. **Ask once** — present all stories for the epic in one summary, not one at a time +3. **Warn on blocked stories** — flag any story with a Proposed ADR before writing +4. **Ask before writing** — get approval for the full story set before writing files +5. **No invention** — acceptance criteria come from GDDs, implementation notes from ADRs, rules from the manifest +6. **Never start implementation** — this skill stops at the story file level + +After writing (or declining): + +- **Verdict: COMPLETE** — [N] stories written to `production/epics/[epic-slug]/`. Run `/story-readiness` → `/dev-story` to begin implementation. +- **Verdict: BLOCKED** — user declined. No story files written. diff --git a/.omc/skills/day-one-patch/SKILL.md b/.omc/skills/day-one-patch/SKILL.md new file mode 100644 index 0000000..770d372 --- /dev/null +++ b/.omc/skills/day-one-patch/SKILL.md @@ -0,0 +1,218 @@ +--- +name: day-one-patch +description: "Prepare a day-one patch for a game launch. Scopes, prioritises, implements, and QA-gates a focused patch addressing known issues discovered after gold master but before or immediately after public launch. Treats the patch as a mini-sprint with its own QA gate and rollback plan." +argument-hint: "[scope: known-bugs | cert-feedback | all]" +user-invocable: true +allowed-tools: Read, Glob, Grep, Write, Edit, Bash, Task, AskUserQuestion +--- + +# Day-One Patch + +Every shipped game has a day-one patch. Planning it before launch day prevents +chaos. This skill scopes the patch to only what is safe and necessary, gates it +through a lightweight QA pass, and ensures a rollback plan exists before anything +ships. It is a mini-sprint — not a hotfix, not a full sprint. + +**When to run:** +- After the gold master build is locked (cert approved or launch candidate tagged) +- When known bugs exist that are too risky to address in the gold master +- When cert feedback requires minor fixes post-submission +- When a pre-launch playtest surfaces must-fix issues after the release gate passed + +**Day-one patch scope rules:** +- Only P1/P2 bugs that are SAFE to fix quickly +- No new features — this is fix-only +- No refactoring — minimum viable change +- Any fix that requires more than 4 hours of dev time belongs in patch 1.1, not day-one + +**Output:** `production/releases/day-one-patch-[version].md` + +--- + +## Phase 1: Load Release Context + +Read: +- `production/stage.txt` — confirm project is in Release stage +- The most recent file in `production/gate-checks/` — read the release gate verdict +- `production/qa/bugs/*.md` — load all bugs with Status: Open or Fixed — Pending Verification +- `production/sprints/` most recent — understand what shipped +- `production/security/security-audit-*.md` most recent — check for any open security items + +If `production/stage.txt` is not `Release` or `Polish`: +> "Day-one patch prep is for Release-stage projects. Current stage: [stage]. This skill is not appropriate until you are approaching launch." + +--- + +## Phase 2: Scope the Patch + +### Step 2a — Classify open bugs for patch inclusion + +For each open bug, evaluate: + +| Criterion | Include in day-one? | +|-----------|-------------------| +| S1 or S2 severity | Yes — must include if safe to fix | +| P1 priority | Yes | +| Fix estimated < 4 hours | Yes | +| Fix requires architecture change | No — defer to 1.1 | +| Fix introduces new code paths | No — too risky | +| Fix is data/config only (no code change) | Yes — very low risk | +| Cert feedback requirement | Yes — required for platform approval | +| S3/S4 severity | Only if trivial config fix; otherwise defer | + +### Step 2b — Present patch scope to user + +Use `AskUserQuestion`: +- Prompt: "Based on open bugs and cert feedback, here is the proposed day-one patch scope. Does this look right?" +- Show: table of included bugs (ID, severity, description, estimated effort) +- Show: table of deferred bugs (ID, severity, reason deferred) +- Options: `[A] Approve this scope` / `[B] Adjust — I want to add or remove items` / `[C] No day-one patch needed` + +If [C]: output "No day-one patch required. Proceed to `/launch-checklist`." Stop. + +### Step 2c — Check total scope + +Sum estimated effort. If total exceeds 1 day of work: +> "⚠️ Patch scope is [N hours] — this exceeds a safe day-one window. Consider deferring lower-priority items to patch 1.1. A bloated day-one patch introduces more risk than it removes." + +Use `AskUserQuestion` to confirm proceeding or reduce scope. + +--- + +## Phase 3: Rollback Plan + +Before any code is written, define the rollback procedure. This is non-negotiable. + +Spawn `release-manager` via Task. Ask them to produce a rollback plan covering: +- How to revert to the gold master build on each target platform +- Platform-specific rollback constraints (some platforms cannot roll back cert builds) +- Who is responsible for triggering the rollback +- What player communication is required if a rollback occurs + +Present the rollback plan. Ask: "May I write this rollback plan to `production/releases/rollback-plan-[version].md`?" + +Do not proceed to Phase 4 until the rollback plan is written. + +--- + +## Phase 4: Implement Fixes + +For each bug in the approved scope, spawn a focused implementation loop: + +1. Spawn `lead-programmer` via Task with: + - The bug report (exact reproduction steps and root cause if known) + - The constraint: minimum viable fix only, no cleanup + - The affected files (from bug report Technical Context section) + +2. The lead-programmer implements and runs targeted tests. + +3. Spawn `qa-tester` via Task to verify: does the bug reproduce after the fix? + +For config/data-only fixes: make the change directly (no programmer agent needed). Confirm the value changed and re-run any relevant smoke test. + +--- + +## Phase 5: Patch QA Gate + +This is a lightweight QA pass — not a full `/team-qa`. The patch is already QA-approved from the release gate; we are only re-verifying the changed areas. + +Spawn `qa-lead` via Task with: +- List of all changed files +- List of bugs fixed (with verification status from Phase 4) +- The smoke check scope for the affected systems + +Ask qa-lead to determine: **Is a targeted smoke check sufficient, or do any fixes touch systems that require a broader regression?** + +Run the required QA scope: +- **Targeted smoke check** — run `/smoke-check [affected-systems]` +- **Broader regression** — run targeted tests in `tests/unit/` and `tests/integration/` for affected systems + +QA verdict must be PASS or PASS WITH WARNINGS before proceeding. If FAIL: scope the failing fix out of the day-one patch and defer to 1.1. + +--- + +## Phase 6: Generate Patch Record + +```markdown +# Day-One Patch: [Game Name] v[version] + +**Date prepared**: [date] +**Target release**: [launch date or "day of launch"] +**Base build**: [gold master tag or commit] +**Patch build**: [patch tag or commit] + +--- + +## Patch Notes (Internal) + +### Bugs Fixed +| BUG-ID | Severity | Description | Fix summary | +|--------|----------|-------------|-------------| +| BUG-NNN | S[1-4] | [description] | [one-line fix] | + +### Deferred to 1.1 +| BUG-ID | Severity | Description | Reason deferred | +|--------|----------|-------------|-----------------| +| BUG-NNN | S[1-4] | [description] | [reason] | + +--- + +## QA Sign-Off + +**QA scope**: [Targeted smoke / Broader regression] +**Verdict**: [PASS / PASS WITH WARNINGS] +**QA lead**: qa-lead agent +**Date**: [date] +**Warnings (if any)**: [list or "None"] + +--- + +## Rollback Plan + +See: `production/releases/rollback-plan-[version].md` + +**Trigger condition**: If [N] or more S1 bugs are reported within [X] hours of launch, execute rollback. +**Rollback owner**: [user / producer] + +--- + +## Approvals Required Before Deploy + +- [ ] lead-programmer: all fixes reviewed +- [ ] qa-lead: QA gate PASS confirmed +- [ ] producer: deployment timing approved +- [ ] release-manager: platform submission confirmed + +--- + +## Player-Facing Patch Notes + +[Draft for community-manager to review before publishing] + +[list player-facing changes in plain language] +``` + +Ask: "May I write this patch record to `production/releases/day-one-patch-[version].md`?" + +--- + +## Phase 7: Next Steps + +After the patch record is written: + +1. Run `/patch-notes` to generate the player-facing version of the patch notes +2. Run `/bug-report verify [BUG-ID]` for each fixed bug after the patch is live +3. Run `/bug-report close [BUG-ID]` for each verified fix +4. Schedule a post-launch review 48–72 hours after launch using `/retrospective launch` + +**If any S1 bugs remain open after the patch:** +> "⚠️ S1 bugs remain open and were not patched. These are accepted risks. Document them in the rollback plan trigger conditions — if they occur at scale, rollback may be preferable to a follow-up patch." + +--- + +## Collaborative Protocol + +- **Scope discipline is everything** — resist scope creep; every addition increases risk +- **Rollback plan first, always** — a patch without a rollback plan is irresponsible +- **Deferred is not forgotten** — every deferred bug gets a 1.1 ticket automatically +- **Player communication is part of the patch** — `/patch-notes` is a required output, not optional diff --git a/.omc/skills/design-review/SKILL.md b/.omc/skills/design-review/SKILL.md new file mode 100644 index 0000000..e12bbe9 --- /dev/null +++ b/.omc/skills/design-review/SKILL.md @@ -0,0 +1,257 @@ +--- +name: design-review +description: "Reviews a game design document for completeness, internal consistency, implementability, and adherence to project design standards. Run this before handing a design document to programmers." +argument-hint: "[path-to-design-doc] [--depth full|lean|solo]" +user-invocable: true +allowed-tools: Read, Glob, Grep, Write, Edit, Task, AskUserQuestion +--- + +## Phase 0: Parse Arguments + +Extract `--depth [full|lean|solo]` if present. Default is `full` when no flag is given. + +**Note**: `--depth` controls the *analysis depth* of this skill (how many specialist agents are spawned). It is independent of the global review mode in `production/review-mode.txt`, which controls director gate spawning. These are two different concepts — `--depth` is about how thoroughly *this* skill analyses the document. + +- **`full`**: Complete review — all phases + specialist agent delegation (Phase 3b) +- **`lean`**: All phases, no specialist agents — faster, single-session analysis +- **`solo`**: Phases 1-4 only, no delegation, no Phase 5 next-step prompt — use when called from within another skill + +--- + +## Phase 1: Load Documents + +Read the target design document in full. Read CLAUDE.md to understand project context and standards. Read related design documents referenced or implied by the target doc (check `design/gdd/` for related systems). + +**Dependency graph validation:** For every system listed in the Dependencies section, use Glob to check whether its GDD file exists in `design/gdd/`. Flag any that don't exist yet — these are broken references that downstream authors will hit. + +**Lore/narrative alignment:** If `design/gdd/game-concept.md` or any file in `design/narrative/` exists, read it. Note any mechanical choices in this GDD that contradict established world rules, tone, or design pillars. Pass this context to `game-designer` in Phase 3b. + +**Prior review check:** Check whether `design/gdd/reviews/[doc-name]-review-log.md` exists. If it does, read the most recent entry — note what verdict was given and what blocking items were listed. This session is a re-review; track whether prior items were addressed. + +--- + +## Phase 2: Completeness Check + +Evaluate against the Design Document Standard checklist: + +- [ ] Has Overview section (one-paragraph summary) +- [ ] Has Player Fantasy section (intended feeling) +- [ ] Has Detailed Rules section (unambiguous mechanics) +- [ ] Has Formulas section (all math defined with variables) +- [ ] Has Edge Cases section (unusual situations handled) +- [ ] Has Dependencies section (other systems listed) +- [ ] Has Tuning Knobs section (configurable values identified) +- [ ] Has Acceptance Criteria section (testable success conditions) + +--- + +## Phase 3: Consistency and Implementability + +**Internal consistency:** +- Do the formulas produce values that match the described behavior? +- Do edge cases contradict the main rules? +- Are dependencies bidirectional (does the other system know about this one)? + +**Implementability:** +- Are the rules precise enough for a programmer to implement without guessing? +- Are there any "hand-wave" sections where details are missing? +- Are performance implications considered? + +**Cross-system consistency:** +- Does this conflict with any existing mechanic? +- Does this create unintended interactions with other systems? +- Is this consistent with the game's established tone and pillars? + +--- + +## Phase 3b: Adversarial Specialist Review (full mode only) + +**Skip this phase in `lean` or `solo` mode.** + +**This phase is MANDATORY in full mode.** Do not skip it. + +**Before spawning any agents**, print this notice: +> "Full review: spawning specialist agents in parallel. This typically takes 8–15 minutes. Use `--review lean` for faster single-session analysis." + +### Step 1 — Identify all domains the GDD touches + +Read the GDD and identify every domain present. A GDD can touch multiple domains simultaneously — be thorough. Common signals: + +| If the GDD contains... | Spawn these agents | +|------------------------|-------------------| +| Costs, prices, drops, rewards, economy | `economy-designer` | +| Combat stats, damage, health, DPS | `game-designer`, `systems-designer` | +| AI behaviour, pathfinding, targeting | `ai-programmer` | +| Level layout, spawning, wave structure | `level-designer` | +| Player progression, XP, unlocks | `economy-designer`, `game-designer` | +| UI, HUD, menus, player-facing displays | `ux-designer`, `ui-programmer` | +| Dialogue, quests, story, lore | `narrative-director` | +| Animation, feel, timing, juice | `gameplay-programmer` | +| Multiplayer, sync, replication | `network-programmer` | +| Audio cues, music triggers | `audio-director` | +| Performance, draw calls, memory | `performance-analyst` | +| Engine-specific patterns or APIs | Primary engine specialist (from `.claude/docs/technical-preferences.md`) | +| Acceptance criteria, test coverage | `qa-lead` | +| Data schema, resource structure | `systems-designer` | +| Any gameplay system | `game-designer` (always) | + +**Always spawn `game-designer` and `systems-designer` as a baseline minimum.** Every GDD touches their domain. + +### Step 2 — Spawn all relevant specialists in parallel + +**CRITICAL: Task in this skill spawns a SUBAGENT — a separate independent Claude session +with its own context window. It is NOT task tracking. Do NOT simulate specialist +perspectives internally. Do NOT reason through domain views yourself. You MUST issue +actual Task calls. A simulated review is not a specialist review.** + +Issue all Task calls simultaneously. Do NOT spawn one at a time. + +**Prompt each specialist adversarially:** +> "Here is the GDD for [system] and the main review's structural findings so far. +> Your job is NOT to validate this design — your job is to find problems. +> Challenge the design choices from your domain expertise. What is wrong, +> underspecified, likely to cause problems, or missing entirely? +> Be specific and critical. Disagreement with the main review is welcome." + +**Additional instructions per agent type:** + +- **`game-designer`**: Anchor your review to the Player Fantasy stated in Section B of this GDD. Does this design actually deliver that fantasy? Would a player feel the intended experience? Flag any rules that serve implementability but undermine the stated feeling. + +- **`systems-designer`**: For every formula in the GDD, plug in boundary values (minimum and maximum plausible inputs). Report whether any outputs go degenerate — negative values, division by zero, infinity, or nonsensical results at the extremes. + +- **`qa-lead`**: Review every acceptance criterion. Flag any that are not independently testable — phrases like "feels balanced", "works correctly", "performs well" are not ACs. Suggest concrete rewrites for any that fail this test. + +### Step 3 — Senior lead review + +After all specialists respond, spawn `creative-director` as the **senior reviewer**: +- Provide: the GDD, all specialist findings, any disagreements between them +- Ask: "Synthesise these findings. What are the most important issues? Do you agree with the specialists? What is your overall verdict on this design?" +- The creative-director's synthesis becomes the **final verdict** in Phase 4. + +### Step 4 — Surface disagreements + +If specialists disagree with each other or with the creative-director, do NOT silently pick one view. Present the disagreement explicitly in Phase 4 so the user can adjudicate. + +Mark every finding with its source: `[game-designer]`, `[economy-designer]`, `[creative-director]` etc. + +--- + +## Phase 4: Output Review + +``` +## Design Review: [Document Title] +Specialists consulted: [list agents spawned] +Re-review: [Yes — prior verdict was X on YYYY-MM-DD / No — first review] + +### Completeness: [X/8 sections present] +[List missing sections] + +### Dependency Graph +[List each declared dependency and whether its GDD file exists on disk] +- ✓ enemy-definition-data.md — exists +- ✗ loot-system.md — NOT FOUND (file does not exist yet) + +### Required Before Implementation +[Numbered list — blocking issues only. Each item tagged with source agent.] + +### Recommended Revisions +[Numbered list — important but not blocking. Source-tagged.] + +### Specialist Disagreements +[Any cases where agents disagreed with each other or with the main review. +Present both sides — do not silently resolve.] + +### Nice-to-Have +[Minor improvements, low priority.] + +### Senior Verdict [creative-director] +[Creative director's synthesis and overall assessment.] + +### Scope Signal +Estimate implementation scope based on: dependency count, formula count, +systems touched, and whether new ADRs are required. +- **S** — single system, no formulas, no new ADRs, <3 dependencies +- **M** — moderate complexity, 1-2 formulas, 3-6 dependencies +- **L** — multi-system integration, 3+ formulas, may require new ADR +- **XL** — cross-cutting concern, 5+ dependencies, multiple new ADRs likely +Label clearly: "Rough scope signal: M (producer should verify before sprint planning)" + +### Verdict: [APPROVED / NEEDS REVISION / MAJOR REVISION NEEDED] +``` + +This skill is read-only — no files are written during Phase 4. + +--- + +## Phase 5: Next Steps + +Use `AskUserQuestion` for ALL closing interactions. Never plain text. + +**First widget — what to do next:** + +If APPROVED (first-pass, no revision needed), proceed directly to the systems-index widget, review-log widget, then the final closing widget. Do not show a separate "what to do" widget — the final closing widget covers next steps. + +If NEEDS REVISION or MAJOR REVISION NEEDED, options: +- `[A] Revise the GDD now — address blocking items together` +- `[B] Stop here — revise in a separate session` +- `[C] Accept as-is and move on (only if all items are advisory)` + +**If user selects [A] — Revise now:** + +Work through all blocking items, asking for design decisions only where you cannot resolve the issue from the GDD and existing docs alone. Group all design-decision questions into a single multi-tab `AskUserQuestion` before making any edits — do not interrupt mid-revision for each blocker individually. + +After all revisions are complete, show a summary table (blocker → fix applied) and use `AskUserQuestion` for a **post-revision closing widget**: + +- Prompt: "Revisions complete — [N] blockers resolved. What next?" +- Note current context usage: if context is above ~50%, add: "(Recommended: /clear before re-review — this session has used X% context. A full re-review runs 5 agents and needs clean context.)" +- Options: + - `[A] Re-review in a new session — run /design-review [doc-path] after /clear` + - `[B] Accept revisions and mark Approved — update systems index, skip re-review` + - `[C] Move to next system — /design-system [next-system] (#N in design order)` + - `[D] Stop here` + +Never end the revision flow with plain text. Always close with this widget. + +**Second widget — systems index update (always show this separately):** + +Use a second `AskUserQuestion`: +- Prompt: "May I update `design/gdd/systems-index.md` to mark [system] as [In Review / Approved]?" +- Options: `[A] Yes — update it` / `[B] No — leave it as-is` + +**Third widget — review log (always offer):** + +Use a third `AskUserQuestion`: +- Prompt: "May I append this review summary to `design/gdd/reviews/[doc-name]-review-log.md`? This creates a revision history so future re-reviews can track what changed." +- Options: `[A] Yes — append to review log` / `[B] No — skip` + +If yes, append an entry in this format: +``` +## Review — [YYYY-MM-DD] — Verdict: [APPROVED / NEEDS REVISION / MAJOR REVISION NEEDED] +Scope signal: [S/M/L/XL] +Specialists: [list] +Blocking items: [count] | Recommended: [count] +Summary: [2-3 sentence summary of key findings from creative-director verdict] +Prior verdict resolved: [Yes / No / First review] +``` + +--- + +**Final closing widget — always show after all file writes complete:** + +Once the systems-index and review-log widgets are answered, check project state and show one final `AskUserQuestion`: + +Before building options, read: +- `design/gdd/systems-index.md` — find any system with Status: In Review or NEEDS REVISION (other than the one just reviewed) +- Count `.md` files in `design/gdd/` (excluding game-concept.md, systems-index.md) to determine if `/review-all-gdds` is worth offering (≥2 GDDs) +- Find the next system with Status: Not Started in design order + +Build the option list dynamically — only include options that are genuinely next: +- `[_] Run /design-review [other-gdd-path] — [system name] is still [In Review / NEEDS REVISION]` (include if another GDD needs review) +- `[_] Run /consistency-check — verify this GDD's values don't conflict with existing GDDs` (always include if ≥1 other GDD exists) +- `[_] Run /review-all-gdds — holistic design-theory review across all designed systems` (include if ≥2 GDDs exist) +- `[_] Run /design-system [next-system] — next in design order` (always include, name the actual system) +- `[_] Stop here` + +Assign letters A, B, C… only to included options. Mark the most pipeline-advancing option as `(recommended)`. + +Never end the skill with plain text after file writes. Always close with this widget. diff --git a/.omc/skills/design-system/SKILL.md b/.omc/skills/design-system/SKILL.md new file mode 100644 index 0000000..9cda7c6 --- /dev/null +++ b/.omc/skills/design-system/SKILL.md @@ -0,0 +1,841 @@ +--- +name: design-system +description: "Guided, section-by-section GDD authoring for a single game system. Gathers context from existing docs, walks through each required section collaboratively, cross-references dependencies, and writes incrementally to file." +argument-hint: " [--review full|lean|solo]" +user-invocable: true +allowed-tools: Read, Glob, Grep, Write, Edit, Task, AskUserQuestion, TodoWrite +--- + +When this skill is invoked: + +## 1. Parse Arguments & Validate + +Resolve the review mode (once, store for all gate spawns this run): +1. If `--review [full|lean|solo]` was passed → use that +2. Else read `production/review-mode.txt` → use that value +3. Else → default to `lean` + +See `.claude/docs/director-gates.md` for the full check pattern. + +A system name or retrofit path is **required**. If missing: + +1. Check if `design/gdd/systems-index.md` exists. +2. If it exists: read it, find the highest-priority system with status "Not Started" or equivalent, and use `AskUserQuestion`: + - Prompt: "The next system in your design order is **[system-name]** ([priority] | [layer]). Start designing it?" + - Options: `[A] Yes — design [system-name]` / `[B] Pick a different system` / `[C] Stop here` + - If [A]: proceed with that system name. If [B]: ask which system to design (plain text). If [C]: exit. +3. If no systems index exists, fail with: + > "Usage: `/design-system ` — e.g., `/design-system movement` + > Or to fill gaps in an existing GDD: `/design-system retrofit design/gdd/[system-name].md` + > No systems index found. Run `/map-systems` first to map your systems and get the design order." + +**Detect retrofit mode:** +If the argument starts with `retrofit` or the argument is a file path to an +existing `.md` file in `design/gdd/`, enter **retrofit mode**: + +1. Read the existing GDD file. +2. Identify which of the 8 required sections are present (scan for section headings). + Required sections: Overview, Player Fantasy, Detailed Design/Rules, Formulas, + Edge Cases, Dependencies, Tuning Knobs, Acceptance Criteria. +3. Identify which sections contain only placeholder text (`[To be designed]` or + equivalent — blank, a single line, or obviously incomplete). +4. Present to the user before doing anything: + ``` + ## Retrofit: [System Name] + File: design/gdd/[filename].md + + Sections already written (will not be touched): + ✓ [section name] + ✓ [section name] + + Missing or incomplete sections (will be authored): + ✗ [section name] — missing + ✗ [section name] — placeholder only + ``` +5. Ask: "Shall I fill the [N] missing sections? I will not modify any existing content." +6. If yes: proceed to **Phase 2 (Gather Context)** as normal, but in **Phase 3** + skip creating the skeleton (file already exists) and in **Phase 4** skip + sections that are already complete. Only run the section cycle for missing/ + incomplete sections. +7. **Never overwrite existing section content.** Use Edit tool to replace only + `[To be designed]` placeholders or empty section bodies. + +If NOT in retrofit mode, normalize the system name to kebab-case for the +filename (e.g., "combat system" becomes `combat-system`). + +--- + +## 2. Gather Context (Read Phase) + +Read all relevant context **before** asking the user anything. This is the skill's +primary advantage over ad-hoc design — it arrives informed. + +### 2a: Required Reads + +- **Game concept**: Read `design/gdd/game-concept.md` — fail if missing: + > "No game concept found. Run `/brainstorm` first." +- **Systems index**: Read `design/gdd/systems-index.md` — fail if missing: + > "No systems index found. Run `/map-systems` first to map your systems." +- **Target system**: Find the system in the index. If not listed, warn: + > "[system-name] is not in the systems index. Would you like to add it, or + > design it as an off-index system?" +- **Entity registry**: Read `design/registry/entities.yaml` if it exists. + Extract all entries referenced by or relevant to this system (grep + `referenced_by.*[system-name]` and `source.*[system-name]`). Hold these + in context as **known facts** — values that other GDDs have already + established and this GDD must not contradict. +- **Reflexion log**: Read `docs/consistency-failures.md` if it exists. + Extract entries whose Domain matches this system's category. These are + recurring conflict patterns — present them under "Past failure patterns" + in the Phase 2d context summary so the user knows where mistakes have + occurred before in this domain. + +### 2b: Dependency Reads + +From the systems index, identify: +- **Upstream dependencies**: Systems this one depends on. Read their GDDs if they + exist (these contain decisions this system must respect). +- **Downstream dependents**: Systems that depend on this one. Read their GDDs if + they exist (these contain expectations this system must satisfy). + +For each dependency GDD that exists, extract and hold in context: +- Key interfaces (what data flows between the systems) +- Formulas that reference this system's outputs +- Edge cases that assume this system's behavior +- Tuning knobs that feed into this system + +### 2c: Optional Reads + +- **Game pillars**: Read `design/gdd/game-pillars.md` if it exists +- **Existing GDD**: Read `design/gdd/[system-name].md` if it exists (resume, don't + restart from scratch) +- **Related GDDs**: Glob `design/gdd/*.md` and read any that are thematically related + (e.g., if designing a system that overlaps with another in scope, read the related GDD + even if it's not a formal dependency) + +### 2d: Present Context Summary + +Before starting design work, present a brief summary to the user: + +> **Designing: [System Name]** +> - Priority: [from index] | Layer: [from index] +> - Depends on: [list, noting which have GDDs vs. undesigned] +> - Depended on by: [list, noting which have GDDs vs. undesigned] +> - Existing decisions to respect: [key constraints from dependency GDDs] +> - Pillar alignment: [which pillar(s) this system primarily serves] +> - **Known cross-system facts (from registry):** +> - [entity_name]: [attribute]=[value], [attribute]=[value] (owned by [source GDD]) +> - [item_name]: [attribute]=[value], [attribute]=[value] (owned by [source GDD]) +> - [formula_name]: variables=[list], output=[min–max] (owned by [source GDD]) +> - [constant_name]: [value] [unit] (owned by [source GDD]) +> *(These values are locked — if this GDD needs different values, surface +> the conflict before writing. Do not silently use different numbers.)* +> +> If no registry entries are relevant: omit the "Known cross-system facts" section. + +If any upstream dependencies are undesigned, warn: +> "[dependency] doesn't have a GDD yet. We'll need to make assumptions about +> its interface. Consider designing it first, or we can define the expected +> contract and flag it as provisional." + +### 2e: Technical Feasibility Pre-Check + +Before asking the user to begin designing, load engine context and surface any +constraints or knowledge gaps that will shape the design. + +**Step 1 — Determine the engine domain for this system:** +Map the system's category (from systems-index.md) to an engine domain: + +| System Category | Engine Domain | +|----------------|--------------| +| Combat, physics, collision | Physics | +| Rendering, visual effects, shaders | Rendering | +| UI, HUD, menus | UI | +| Audio, sound, music | Audio | +| AI, pathfinding, behavior trees | Navigation / Scripting | +| Animation, IK, rigs | Animation | +| Networking, multiplayer, sync | Networking | +| Input, controls, keybinding | Input | +| Save/load, persistence, data | Core | +| Dialogue, quests, narrative | Scripting | + +**Step 2 — Read engine context (if available):** +- Read `.claude/docs/technical-preferences.md` to identify the engine and version +- If engine is configured, read `docs/engine-reference/[engine]/VERSION.md` +- Read `docs/engine-reference/[engine]/modules/[domain].md` if it exists +- Read `docs/engine-reference/[engine]/breaking-changes.md` for domain-relevant entries +- Glob `docs/architecture/adr-*.md` and read any ADRs whose domain matches + (check the Engine Compatibility table's "Domain" field) + +**Step 3 — Present the Feasibility Brief:** + +If engine reference docs exist, present before starting design: + +``` +## Technical Feasibility Brief: [System Name] +Engine: [name + version] +Domain: [domain] + +### Known Engine Capabilities (verified for [version]) +- [capability relevant to this system] +- [capability 2] + +### Engine Constraints That Will Shape This Design +- [constraint from engine-reference or existing ADR] + +### Knowledge Gaps (verify before committing to these) +- [post-cutoff feature this design might rely on — mark HIGH/MEDIUM risk] + +### Existing ADRs That Constrain This System +- ADR-XXXX: [decision summary] — means [implication for this GDD] + (or "None yet") +``` + +If no engine reference docs exist (engine not yet configured), show a short note: +> "No engine configured yet — skipping technical feasibility check. Run +> `/setup-engine` before moving to architecture if you haven't already." + +**Step 4 — Ask before proceeding:** + +Use `AskUserQuestion`: +- "Any constraints to add before we begin, or shall we proceed with these noted?" + - Options: "Proceed with these noted", "Add a constraint first", "I need to check the engine docs — pause here" + +--- + +Use `AskUserQuestion`: +- "Ready to start designing [system-name]?" + - Options: "Yes, let's go", "Show me more context first", "Design a dependency first" + +--- + +## 3. Create File Skeleton + +Once the user confirms, **immediately** create the GDD file with empty section +headers. This ensures incremental writes have a target. + +Use the template structure from `.claude/docs/templates/game-design-document.md`: + +```markdown +# [System Name] + +> **Status**: In Design +> **Author**: [user + agents] +> **Last Updated**: [today's date] +> **Implements Pillar**: [from context] + +## Overview + +[To be designed] + +## Player Fantasy + +[To be designed] + +## Detailed Design + +### Core Rules + +[To be designed] + +### States and Transitions + +[To be designed] + +### Interactions with Other Systems + +[To be designed] + +## Formulas + +[To be designed] + +## Edge Cases + +[To be designed] + +## Dependencies + +[To be designed] + +## Tuning Knobs + +[To be designed] + +## Visual/Audio Requirements + +[To be designed] + +## UI Requirements + +[To be designed] + +## Acceptance Criteria + +[To be designed] + +## Open Questions + +[To be designed] +``` + +Ask: "May I create the skeleton file at `design/gdd/[system-name].md`?" + +After writing, update `production/session-state/active.md`: +- Use Glob to check if the file exists. +- If it **does not exist**: use the **Write** tool to create it. Never attempt Edit on a file that may not exist. +- If it **already exists**: use the **Edit** tool to update the relevant fields. + +File content: +- Task: Designing [system-name] GDD +- Current section: Starting (skeleton created) +- File: design/gdd/[system-name].md + +--- + +## 4. Section-by-Section Design + +Walk through each section in order. For **each section**, follow this cycle: + +### The Section Cycle + +``` +Context -> Questions -> Options -> Decision -> Draft -> Approval -> Write +``` + +1. **Context**: State what this section needs to contain, and surface any relevant + decisions from dependency GDDs that constrain it. + +2. **Questions**: Ask clarifying questions specific to this section. Use + `AskUserQuestion` for constrained questions, conversational text for open-ended + exploration. + +3. **Options**: Where the section involves design choices (not just documentation), + present 2-4 approaches with pros/cons. Explain reasoning in conversation text, + then use `AskUserQuestion` to capture the decision. + +4. **Decision**: User picks an approach or provides custom direction. + +5. **Draft**: Write the section content in conversation text for review. Flag any + provisional assumptions about undesigned dependencies. + +6. **Approval**: Immediately after the draft — in the SAME response — use + `AskUserQuestion`. **NEVER use plain text. NEVER skip this step.** + - Prompt: "Approve the [Section Name] section?" + - Options: `[A] Approve — write it to file` / `[B] Make changes — describe what to fix` / `[C] Start over` + + **The draft and the approval widget MUST appear together in one response. + If the draft appears without the widget, the user is left at a blank prompt + with no path forward — this is a protocol violation.** + +7. **Write**: Use the Edit tool to replace the placeholder with the approved content. + **CRITICAL**: Always include the section heading in the `old_string` to ensure + uniqueness — never match `[To be designed]` alone, as multiple sections use the + same placeholder and the Edit tool requires a unique match. Use this pattern: + ``` + old_string: "## [Section Name]\n\n[To be designed]" + new_string: "## [Section Name]\n\n[approved content]" + ``` + Confirm the write. + +8. **Registry conflict check** (Sections C and D only — Detailed Design and Formulas): + After writing, scan the section content for entity names, item names, formula + names, and numeric constants that appear in the registry. For each match: + - Compare the value just written against the registry entry. + - If they differ: **surface the conflict immediately** before starting the next + section. Do not continue silently. + > "Registry conflict: [name] is registered in [source GDD] as [registry_value]. + > This section just wrote [new_value]. Which is correct?" + - If new (not in registry): flag it as a candidate for registry registration + (will be handled in Phase 5). + +After writing each section, update `production/session-state/active.md` with the +completed section name. Use Glob to check if the file exists — use Write to create +it if absent, Edit to update it if present. + +### Section-Specific Guidance + +Each section has unique design considerations and may benefit from specialist agents: + +--- + +### Section A: Overview + +**Goal**: One paragraph a stranger could read and understand. + +**Derive recommended options before building the widget**: Read the system's category and layer from the systems index (already in context from Phase 2), then determine the recommended option for each tab: +- **Framing tab**: Foundation/Infrastructure layer → `[A]` recommended. Player-facing categories (Combat, UI, Dialogue, Character, Animation, Visual Effects, Audio) → `[C] Both` recommended. +- **ADR ref tab**: Glob `docs/architecture/adr-*.md` and grep for the system name in the GDD Requirements section of any ADR. If a matching ADR is found → `[A] Yes — cite the ADR` recommended. If none found → `[B] No` recommended. +- **Fantasy tab**: Foundation/Infrastructure layer → `[B] No` recommended. All other categories → `[A] Yes` recommended. + +Append `(Recommended)` to the appropriate option text in each tab. + +**Framing questions (ask BEFORE drafting)**: Use `AskUserQuestion` with a multi-tab widget: +- Tab "Framing" — "How should the overview frame this system?" Options: `[A] As a data/infrastructure layer (technical framing)` / `[B] Through its player-facing effect (design framing)` / `[C] Both — describe the data layer and its player impact` +- Tab "ADR ref" — "Should the overview reference the existing ADR for this system?" Options: `[A] Yes — cite the ADR for implementation details` / `[B] No — keep the GDD at pure design level` +- Tab "Fantasy" — "Does this system have a player fantasy worth stating?" Options: `[A] Yes — players feel it directly` / `[B] No — pure infrastructure, players feel what it enables` + +Use the user's answers to shape the draft. Do NOT answer these questions yourself and auto-draft. + +**Questions to ask**: +- What is this system in one sentence? +- How does a player interact with it? (active/passive/automatic) +- Why does this system exist — what would the game lose without it? + +**Cross-reference**: Check that the description aligns with how the systems index +describes it. Flag discrepancies. + +**Design vs. implementation boundary**: Overview questions must stay at the behavior +level — what the system *does*, not *how it is built*. If implementation questions +arise during the Overview (e.g., "Should this use an Autoload singleton or a signal +bus?"), note them as "→ becomes an ADR" and move on. Implementation patterns belong +in `/architecture-decision`, not the GDD. The GDD describes behavior; the ADR +describes the technical approach used to achieve it. + +--- + +### Section B: Player Fantasy + +**Goal**: The emotional target — what the player should *feel*. + +**Derive recommended option before building the widget**: Read the system's category and layer from Phase 2 context: +- Player-facing categories (Combat, UI, Dialogue, Character, Animation, Audio, Level/World) → `[A] Direct` recommended +- Foundation/Infrastructure layer → `[B] Indirect` recommended +- Mixed categories (Camera/input, Economy, AI with visible player effects) → `[C] Both` recommended + +Append `(Recommended)` to the appropriate option text. + +**Framing question (ask BEFORE drafting)**: Use `AskUserQuestion`: +- Prompt: "Is this system something the player engages with directly, or infrastructure they experience indirectly?" +- Options: `[A] Direct — player actively uses or feels this system` / `[B] Indirect — player experiences the effects, not the system` / `[C] Both — has a direct interaction layer and infrastructure beneath it` + +Use the answer to frame the Player Fantasy section appropriately. Do NOT assume the answer. + +**Questions to ask**: +- What emotion or power fantasy does this serve? +- What reference games nail this feeling? What specifically creates it? +- Is this a "system you love engaging with" or "infrastructure you don't notice"? + +**Cross-reference**: Must align with the game pillars. If the system serves a pillar, +quote the relevant pillar text. + +**Agent delegation (MANDATORY)**: After the framing answer is given but before drafting, +spawn `creative-director` via Task: +- Provide: system name, framing answer (direct/indirect/both), game pillars, any reference games the user mentioned, the game concept summary +- Ask: "Shape the Player Fantasy for this system. What emotion or power fantasy should it serve? What player moment should we anchor to? What tone and language fits the game's established feeling? Be specific — give me 2-3 candidate framings." +- Collect the creative-director's framings and present them to the user alongside the draft. + +**Do NOT draft Section B without first consulting `creative-director`.** The framing +answer tells us *what kind* of fantasy it is; the creative-director shapes *how it's +described* — tone, language, the specific player moment to anchor to. + +--- + +### Section C: Detailed Design (Core Rules, States, Interactions) + +**Goal**: Unambiguous specification a programmer could implement without questions. + +This is usually the largest section. Break it into sub-sections: + +1. **Core Rules**: The fundamental mechanics. Use numbered rules for sequential + processes, bullets for properties. +2. **States and Transitions**: If the system has states, map every state and + every valid transition. Use a table. +3. **Interactions with Other Systems**: For each dependency (upstream and downstream), + specify what data flows in, what flows out, and who owns the interface. + +**Questions to ask**: +- Walk me through a typical use of this system, step by step +- What are the decision points the player faces? +- What can the player NOT do? (Constraints are as important as capabilities) + +**Agent delegation (MANDATORY)**: Before drafting Section C, spawn specialist agents via Task in parallel: +- Look up the system category in the routing table (Section 6 of this skill) +- Spawn the Primary Agent AND Supporting Agent(s) listed for this category +- Provide each agent: system name, game concept summary, pillar set, dependency GDD excerpts, the specific section being worked on +- Collect their findings before drafting +- Surface any disagreements between agents to the user via `AskUserQuestion` +- Draft only after receiving specialist input + +**Do NOT draft Section C without first consulting the appropriate specialists.** A `systems-designer` reviewing rules and mechanics will catch design gaps the main session cannot. + +**Cross-reference**: For each interaction listed, verify it matches what the +dependency GDD specifies. If a dependency defines a value or formula and this +system expects something different, flag the conflict. + +--- + +### Section D: Formulas + +**Goal**: Every mathematical formula, with variables defined, ranges specified, +and edge cases noted. + +**Completion Steering — always begin each formula with this exact structure:** + +``` +The [formula_name] formula is defined as: + +`[formula_name] = [expression]` + +**Variables:** +| Variable | Symbol | Type | Range | Description | +|----------|--------|------|-------|-------------| +| [name] | [sym] | float/int | [min–max] | [what it represents] | + +**Output Range:** [min] to [max] under normal play; [behaviour at extremes] +**Example:** [worked example with real numbers] +``` + +Do NOT write `[Formula TBD]` or describe a formula in prose without the variable +table. A formula without defined variables cannot be implemented without guesswork. + +**Questions to ask**: +- What are the core calculations this system performs? +- Should scaling be linear, logarithmic, or stepped? +- What should the output ranges be at early/mid/late game? + +**Agent delegation (MANDATORY)**: Before proposing any formulas or balance values, spawn specialist agents via Task in parallel: +- **Always spawn `systems-designer`**: provide Core Rules from Section C, tuning goals from user, balance context from dependency GDDs. Ask them to propose formulas with variable tables and output ranges. +- **For economy/cost systems, also spawn `economy-designer`**: provide placement costs, upgrade cost intent, and progression goals. Ask them to validate cost curves and ratios. +- Present the specialists' proposals to the user for review via `AskUserQuestion` +- The user decides; the main session writes to file +- **Do NOT invent formula values or balance numbers without specialist input.** A user without balance design expertise cannot evaluate raw numbers — they need the specialists' reasoning. + +**Cross-reference**: If a dependency GDD defines a formula whose output feeds into +this system, reference it explicitly. Don't reinvent — connect. + +--- + +### Section E: Edge Cases + +**Goal**: Explicitly handle unusual situations so they don't become bugs. + +**Completion Steering — format each edge case as:** +- **If [condition]**: [exact outcome]. [rationale if non-obvious] + +Example (adapt terminology to the game's domain): +- **If [resource] reaches 0 while [protective condition] is active**: hold at minimum until condition ends, then apply consequence. +- **If two [triggers/events] fire simultaneously**: resolve in [defined priority order]; ties use [defined tiebreak rule]. + +Do NOT write vague entries like "handle appropriately" — each must name the exact +condition and the exact resolution. An edge case without a resolution is an open +design question, not a specification. + +**Questions to ask**: +- What happens at zero? At maximum? At out-of-range values? +- What happens when two rules apply at the same time? +- What happens if a player finds an unintended interaction? (Identify degenerate strategies) + +**Agent delegation (MANDATORY)**: Spawn `systems-designer` via Task before finalising edge cases. Provide: the completed Sections C and D, and ask them to identify edge cases from the formula and rule space that the main session may have missed. For narrative systems, also spawn `narrative-director`. Present their findings and ask the user which to include. + +**Cross-reference**: Check edge cases against dependency GDDs. If a dependency +defines a floor, cap, or resolution rule that this system could violate, flag it. + +--- + +### Section F: Dependencies + +**Goal**: Map every system connection with direction and nature. + +This section is partially pre-filled from the context gathering phase. Present the +known dependencies from the systems index and ask: +- Are there dependencies I'm missing? +- For each dependency, what's the specific data interface? +- Which dependencies are hard (system cannot function without it) vs. soft + (enhanced by it but works without it)? + +**Cross-reference**: This section must be bidirectionally consistent. If this system +lists "depends on Combat", then the Combat GDD should list "depended on by [this +system]". Flag any one-directional dependencies for correction. + +--- + +### Section G: Tuning Knobs + +**Goal**: Every designer-adjustable value, with safe ranges and extreme behaviors. + +**Questions to ask**: +- What values should designers be able to tweak without code changes? +- For each knob, what breaks if it's set too high? Too low? +- Which knobs interact with each other? (Changing A makes B irrelevant) + +**Agent delegation**: If formulas are complex, delegate to `systems-designer` +to derive tuning knobs from the formula variables. + +**Cross-reference**: If a dependency GDD lists tuning knobs that affect this system, +reference them here. Don't create duplicate knobs — point to the source of truth. + +--- + +### Section H: Acceptance Criteria + +**Goal**: Testable conditions that prove the system works as designed. + +**Completion Steering — format each criterion as Given-When-Then:** +- **GIVEN** [initial state], **WHEN** [action or trigger], **THEN** [measurable outcome] + +Example (adapt terminology to the game's domain): +- **GIVEN** [initial state], **WHEN** [player action or system trigger], **THEN** [specific measurable outcome]. +- **GIVEN** [a constraint is active], **WHEN** [player attempts an action], **THEN** [feedback shown and action result]. + +Include at least: one criterion per core rule from Section C, and one per formula +from Section D. Do NOT write "the system works as designed" — every criterion must +be independently verifiable by a QA tester without reading the GDD. + +**Agent delegation (MANDATORY)**: Spawn `qa-lead` via Task before finalising acceptance criteria. Provide: the completed GDD sections C, D, E, and ask them to validate that the criteria are independently testable and cover all core rules and formulas. Surface any gaps or untestable criteria to the user. + +**Questions to ask**: +- What's the minimum set of tests that prove this works? +- What performance budget does this system get? (frame time, memory) +- What would a QA tester check first? + +**Cross-reference**: Include criteria that verify cross-system interactions work, +not just this system in isolation. + +--- + +### Optional Sections: Visual/Audio, UI Requirements, Open Questions + +These sections are included in the template. Visual/Audio is **REQUIRED** for visual system categories — not optional. Determine the requirement level before asking: + +**Visual/Audio is REQUIRED (mandatory — do not offer to skip) for these system categories:** +- Combat, damage, health +- UI systems (HUD, menus) +- Animation, character movement +- Visual effects, particles, shaders +- Character systems +- Dialogue, quests, lore +- Level/world systems + +For required systems: **spawn `art-director` via Task** before drafting this section. Provide: system name, game concept, game pillars, art bible sections 1–4 if they exist. Ask them to specify: (1) VFX and visual feedback requirements for this system's events, (2) any animation or visual style constraints, (3) which art bible principles most directly apply to this system. Present their output; do NOT leave this section as `[To be designed]` for visual systems. + +For **all other system categories** (Foundation/Infrastructure, Economy, AI/pathfinding, Camera/input), offer the optional sections after the required sections: + +Use `AskUserQuestion`: +- "The 8 required sections are complete. Do you want to also define Visual/Audio + requirements, UI requirements, or capture open questions?" + - Options: "Yes, all three", "Just open questions", "Skip — I'll add these later" + +For **Visual/Audio** (non-required systems): Coordinate with `art-director` and `audio-director` if detail is needed. Often a brief note suffices at the GDD stage. + +> **Asset Spec Flag**: After the Visual/Audio section is written with real content, output this notice: +> "📌 **Asset Spec** — Visual/Audio requirements are defined. After the art bible is approved, run `/asset-spec system:[system-name]` to produce per-asset visual descriptions, dimensions, and generation prompts from this section." + +For **UI Requirements**: Coordinate with `ux-designer` for complex UI systems. +After writing this section, check whether it contains real content (not just +`[To be designed]` or a note that this system has no UI). If it does have real +UI requirements, output this flag immediately: + +> **📌 UX Flag — [System Name]**: This system has UI requirements. In Phase 4 +> (Pre-Production), run `/ux-design` to create a UX spec for each screen or +> HUD element this system contributes to **before** writing epics. Stories that +> reference UI should cite `design/ux/[screen].md`, not the GDD directly. +> +> Note this in the systems index for this system if you update it. + +For **Open Questions**: Capture anything that came up during design that wasn't +fully resolved. Each question should have an owner and target resolution date. + +--- + +## 5. Post-Design Validation + +After all sections are written: + +### 5a: Self-Check + +Read back the complete GDD from file (not from conversation memory — the file is +the source of truth). Verify: +- All 8 required sections have real content (not placeholders) +- Formulas reference defined variables +- Edge cases have resolutions +- Dependencies are listed with interfaces +- Acceptance criteria are testable + +### 5a-bis: Creative Director Pillar Review + +**Review mode check** — apply before spawning CD-GDD-ALIGN: +- `solo` → skip. Note: "CD-GDD-ALIGN skipped — Solo mode." Proceed to Step 5b. +- `lean` → skip (not a PHASE-GATE). Note: "CD-GDD-ALIGN skipped — Lean mode." Proceed to Step 5b. +- `full` → spawn as normal. + +Before finalizing the GDD, spawn `creative-director` via Task using gate **CD-GDD-ALIGN** (`.claude/docs/director-gates.md`). + +Pass: completed GDD file path, game pillars (from `design/gdd/game-concept.md` or `design/gdd/game-pillars.md`), MDA aesthetics target. + +Handle verdict per the standard rules in `director-gates.md`. After resolution, record the verdict in the GDD Status header: +`> **Creative Director Review (CD-GDD-ALIGN)**: APPROVED [date] / CONCERNS (accepted) [date] / REVISED [date]` + +--- + +### 5b: Update Entity Registry + +Scan the completed GDD for cross-system facts that should be registered: +- Named entities (enemies, NPCs, bosses) with stats or drops +- Named items with values, weights, or categories +- Named formulas with defined variables and output ranges +- Named constants referenced by value in more than one place + +For each candidate, check if it already exists in `design/registry/entities.yaml`: +``` +Grep pattern=" - name: [candidate_name]" path="design/registry/entities.yaml" +``` + +Present a summary: +``` +Registry candidates from this GDD: + NEW (not yet registered): + - [entity_name] [entity]: [attribute]=[value], [attribute]=[value] + - [item_name] [item]: [attribute]=[value], [attribute]=[value] + - [formula_name] [formula]: variables=[list], output=[min–max] + ALREADY REGISTERED (referenced_by will be updated): + - [constant_name] [constant]: value=[N] ← matches registry ✅ +``` + +Ask: "May I update `design/registry/entities.yaml` with these [N] new entries +and update `referenced_by` for the existing entries?" + +If yes: append new entries and update `referenced_by` arrays. Never modify +existing `value` / attribute fields without surfacing it as a conflict first. + +### 5c: Offer Design Review + +Present a completion summary: + +> **GDD Complete: [System Name]** +> - Sections written: [list] +> - Provisional assumptions: [list any assumptions about undesigned dependencies] +> - Cross-system conflicts found: [list or "none"] + +> **To validate this GDD, open a fresh Claude Code session and run:** +> `/design-review design/gdd/[system-name].md` +> +> **Never run `/design-review` in the same session as `/design-system`.** The reviewing +> agent must be independent of the authoring context. Running it here would inherit +> the full design history, making independent critique impossible. + +**NEVER offer to run `/design-review` inline.** Always direct the user to a fresh window. + +### 5d: Update Systems Index + +After the GDD is complete (and optionally reviewed): + +- Read the systems index +- Update the target system's row: + - If design-review was run and verdict is APPROVED: Status → "Approved" + - If design-review was run and verdict is NEEDS REVISION: Status → "In Review" + - If design-review was skipped: Status → "Designed" (pending review) + - If the user chose "I'll review it myself first": Status → "Designed" + - Design Doc: link to `design/gdd/[system-name].md` +- Update the Progress Tracker counts + +Ask: "May I update the systems index at `design/gdd/systems-index.md`?" + +### 5d: Update Session State + +Update `production/session-state/active.md` with: +- Task: [system-name] GDD +- Status: Complete (or In Review if design-review was run) +- File: design/gdd/[system-name].md +- Sections: All 8 written +- Next: [suggest next system from design order] + +### 5e: Suggest Next Steps + +Use `AskUserQuestion`: +- "What's next?" + - Options: + - "Run `/consistency-check` — verify this GDD's values don't conflict with existing GDDs (recommended before designing the next system)" + - "Design next system ([next-in-order])" — if undesigned systems remain + - "Fix review findings" — if design-review flagged issues + - "Stop here for this session" + - "Run `/gate-check`" — if enough MVP systems are designed + +--- + +## 6. Specialist Agent Routing + +This skill delegates to specialist agents for domain expertise. The main session +orchestrates the overall flow; agents provide expert content. + +| System Category | Primary Agent | Supporting Agent(s) | +|----------------|---------------|---------------------| +| **Foundation/Infrastructure** (event bus, save/load, scene mgmt, service locator) | `systems-designer` | `gameplay-programmer` (feasibility), `engine-programmer` (engine integration) | +| Combat, damage, health | `game-designer` | `systems-designer` (formulas), `ai-programmer` (enemy AI), `art-director` (hit feedback visual direction, VFX intent) | +| Economy, loot, crafting | `economy-designer` | `systems-designer` (curves), `game-designer` (loops) | +| Progression, XP, skills | `game-designer` | `systems-designer` (curves), `economy-designer` (sinks) | +| Dialogue, quests, lore | `game-designer` | `narrative-director` (story), `writer` (content), `art-director` (character visual profiles, cinematic tone) | +| UI systems (HUD, menus) | `game-designer` | `ux-designer` (flows), `ui-programmer` (feasibility), `art-director` (visual style direction), `technical-artist` (render/shader constraints) | +| Audio systems | `game-designer` | `audio-director` (direction), `sound-designer` (specs) | +| AI, pathfinding, behavior | `game-designer` | `ai-programmer` (implementation), `systems-designer` (scoring) | +| Level/world systems | `game-designer` | `level-designer` (spatial), `world-builder` (lore) | +| Camera, input, controls | `game-designer` | `ux-designer` (feel), `gameplay-programmer` (feasibility) | +| Animation, character movement | `game-designer` | `art-director` (animation style, pose language), `technical-artist` (rig/blend constraints), `gameplay-programmer` (feel) | +| Visual effects, particles, shaders | `game-designer` | `art-director` (VFX visual direction), `technical-artist` (performance budget, shader complexity), `systems-designer` (trigger/state integration) | +| Character systems (stats, archetypes) | `game-designer` | `art-director` (character visual archetype), `narrative-director` (character arc alignment), `systems-designer` (stat formulas) | + +**When delegating via Task tool**: +- Provide: system name, game concept summary, dependency GDD excerpts, the specific + section being worked on, and what question needs expert input +- The agent returns analysis/proposals to the main session +- The main session presents the agent's output to the user via `AskUserQuestion` +- The user decides; the main session writes to file +- Agents do NOT write to files directly — the main session owns all file writes + +--- + +## 7. Recovery & Resume + +If the session is interrupted (compaction, crash, new session): + +1. Read `production/session-state/active.md` — it records the current system and + which sections are complete +2. Read `design/gdd/[system-name].md` — sections with real content are done; + sections with `[To be designed]` still need work +3. Resume from the next incomplete section — no need to re-discuss completed ones + +This is why incremental writing matters: every approved section survives any +disruption. + +--- + +## Collaborative Protocol + +This skill follows the collaborative design principle at every step: + +1. **Question -> Options -> Decision -> Draft -> Approval** for every section +2. **AskUserQuestion** at every decision point (Explain -> Capture pattern): + - Phase 2: "Ready to start, or need more context?" + - Phase 3: "May I create the skeleton?" + - Phase 4 (each section): Design questions, approach options, draft approval + - Phase 5: "Run design review? Update systems index? What's next?" +3. **"May I write to [filepath]?"** before the skeleton and before each section write +4. **Incremental writing**: Each section is written to file immediately after approval +5. **Session state updates**: After every section write +6. **Cross-referencing**: Every section checks existing GDDs for conflicts +7. **Specialist routing**: Complex sections get expert agent input, presented to + the user for decision — never written silently + +**Never** auto-generate the full GDD and present it as a fait accompli. +**Never** write a section without user approval. +**Never** contradict an existing approved GDD without flagging the conflict. +**Always** show where decisions come from (dependency GDDs, pillars, user choices). + +## Context Window Awareness + +This is a long-running skill. After writing each section, check if the status line +shows context at or above 70%. If so, append this notice to the response: + +> **Context is approaching the limit (≥70%).** Your progress is saved — all approved +> sections are written to `design/gdd/[system-name].md`. When you're ready to continue, +> open a fresh Claude Code session and run `/design-system [system-name]` — it will +> detect which sections are complete and resume from the next one. + +--- + +## Recommended Next Steps + +- Run `/design-review design/gdd/[system-name].md` in a **fresh session** to validate the completed GDD independently +- Run `/consistency-check` to verify this GDD's values don't conflict with other GDDs +- Run `/map-systems next` to move to the next highest-priority undesigned system +- Run `/gate-check pre-production` when all MVP GDDs are authored and reviewed diff --git a/.omc/skills/dev-story/SKILL.md b/.omc/skills/dev-story/SKILL.md new file mode 100644 index 0000000..cbf4fbe --- /dev/null +++ b/.omc/skills/dev-story/SKILL.md @@ -0,0 +1,323 @@ +--- +name: dev-story +description: "Read a story file and implement it. Loads the full context (story, GDD requirement, ADR guidelines, control manifest), routes to the right programmer agent for the system and engine, implements the code and test, and confirms each acceptance criterion. The core implementation skill — run after /story-readiness, before /code-review and /story-done." +argument-hint: "[story-path]" +user-invocable: true +allowed-tools: Read, Glob, Grep, Write, Bash, Task, AskUserQuestion +--- + +# Dev Story + +This skill bridges planning and code. It reads a story file in full, assembles +all the context a programmer needs, routes to the correct specialist agent, and +drives implementation to completion — including writing the test. + +**The loop for every story:** +``` +/qa-plan sprint ← define test requirements before sprint begins +/story-readiness [path] ← validate before starting +/dev-story [path] ← implement it (this skill) +/code-review [files] ← review it +/story-done [path] ← verify and close it +``` + +**After all sprint stories are done:** run `/team-qa sprint` to execute the full QA cycle and get a sign-off verdict before advancing the project stage. + +**Output:** Source code + test file in the project's `src/` and `tests/` directories. + +--- + +## Phase 1: Find the Story + +**If a path is provided**: read that file directly. + +**If no argument**: check `production/session-state/active.md` for the active +story. If found, confirm: "Continuing work on [story title] — is that correct?" +If not found, ask: "Which story are we implementing?" Glob +`production/epics/**/*.md` and list stories with Status: Ready. + +--- + +## Phase 2: Load Full Context + +**Before loading any context, verify required files exist.** Extract the ADR path from the story's `ADR Governing Implementation` field, then check: + +| File | Path | If missing | +|------|------|------------| +| TR registry | `docs/architecture/tr-registry.yaml` | **STOP** — "TR registry not found. Run `/create-epics` to generate it." | +| Governing ADR | path from story's ADR field | **STOP** — "ADR file [path] not found. Run `/architecture-decision` to create it, or correct the filename in the story's ADR field." | +| Control manifest | `docs/architecture/control-manifest.md` | **WARN and continue** — "Control manifest not found — layer rules cannot be checked. Run `/create-control-manifest`." | + +If the TR registry or governing ADR is missing, set the story status to **BLOCKED** in the session state and do not spawn any programmer agent. + +Read all of the following simultaneously — these are independent reads. Do not start implementation until all context is loaded: + +### The story file +Extract and hold: +- **Story title, ID, layer, type** (Logic / Integration / Visual/Feel / UI / Config/Data) +- **TR-ID** — the GDD requirement identifier +- **Governing ADR** reference +- **Manifest Version** embedded in story header +- **Acceptance Criteria** — every checkbox item, verbatim +- **Implementation Notes** — the ADR guidance section in the story +- **Out of Scope** boundaries +- **Test Evidence** — the required test file path +- **Dependencies** — what must be DONE before this story + +### The TR registry +Read `docs/architecture/tr-registry.yaml`. Look up the story's TR-ID. +Read the current `requirement` text — this is the source of truth for what the +GDD requires now. Do not rely on any inline text in the story file (may be stale). + +### The governing ADR +Read `docs/architecture/[adr-file].md`. Extract: +- The full Decision section +- The Implementation Guidelines section (this is what the programmer follows) +- The Engine Compatibility section (post-cutoff APIs, known risks) +- The ADR Dependencies section + +### The control manifest +Read `docs/architecture/control-manifest.md`. Extract the rules for this story's layer: +- Required patterns +- Forbidden patterns +- Performance guardrails + +Check: does the story's embedded Manifest Version match the current manifest header date? +If they differ, use `AskUserQuestion` before proceeding: +- Prompt: "Story was written against manifest v[story-date]. Current manifest is v[current-date]. New rules may apply. How do you want to proceed?" +- Options: + - `[A] Update story manifest version and implement with current rules (Recommended)` + - `[B] Implement with old rules — I accept the risk of non-compliance` + - `[C] Stop here — I want to review the manifest diff first` + +If [A]: edit the story file's `Manifest Version:` field to the current manifest date before spawning the programmer. Then read the manifest carefully for new rules. +If [B]: read the manifest carefully for new rules anyway, and note the version mismatch in the Phase 6 summary under "Deviations". +If [C]: stop. Do not spawn any agent. Let the user review and re-run `/dev-story`. + +### Dependency validation + +After extracting the **Dependencies** list from the story file, validate each: + +1. Glob `production/epics/**/*.md` to find each dependency story file. +2. Read its `Status:` field. +3. If any dependency has Status other than `Complete` or `Done`: + - Use `AskUserQuestion`: + - Prompt: "Story '[current story]' depends on '[dependency title]' which is currently [status], not Complete. How do you want to proceed?" + - Options: + - `[A] Proceed anyway — I accept the dependency risk` + - `[B] Stop — I'll complete the dependency first` + - `[C] The dependency is done but status wasn't updated — mark it Complete and continue` + - If [B]: set story status to **BLOCKED** in session state and stop. Do not spawn any programmer agent. + - If [C]: ask "May I update [dependency path] Status to Complete?" before continuing. + - If [A]: note in Phase 6 summary under "Deviations": "Implemented with incomplete dependency: [dependency title] — [status]." + +If a dependency file cannot be found: warn "Dependency story not found: [path]. Verify the path or create the story file." + +--- + +### Engine reference +Read `.claude/docs/technical-preferences.md`: +- `Engine:` value — determines which programmer agents to use +- Naming conventions (class names, file names, signal/event names) +- Performance budgets (frame budget, memory ceiling) +- Forbidden patterns + +--- + +## Phase 3: Route to the Right Programmer + +Based on the story's **Layer**, **Type**, and **system name**, determine which +specialist to spawn via Task. + +**Config/Data stories — skip agent spawning entirely:** +If the story's Type is `Config/Data`, no programmer agent or engine specialist is needed. Jump directly to Phase 4 (Config/Data note). The implementation is a data file edit — no routing table evaluation, no engine specialist. + +### Primary agent routing table + +| Story context | Primary agent | +|---|---| +| Foundation layer — any type | `engine-programmer` | +| Any layer — Type: UI | `ui-programmer` | +| Any layer — Type: Visual/Feel | `gameplay-programmer` (implements) | +| Core or Feature — gameplay mechanics | `gameplay-programmer` | +| Core or Feature — AI behaviour, pathfinding | `ai-programmer` | +| Core or Feature — networking, replication | `network-programmer` | +| Config/Data — no code | No agent needed (see Phase 4 Config note) | + +### Engine specialist — always spawn as secondary for code stories + +Read the `Engine Specialists` section of `.claude/docs/technical-preferences.md` +to get the configured primary specialist. Spawn them alongside the primary agent +when the story involves engine-specific APIs, patterns, or the ADR has HIGH +engine risk. + +| Engine | Specialist agents available | +|--------|----------------------------| +| Godot 4 | `godot-specialist`, `godot-gdscript-specialist`, `godot-shader-specialist` | +| Unity | `unity-specialist`, `unity-ui-specialist`, `unity-shader-specialist` | +| Unreal Engine | `unreal-specialist`, `ue-gas-specialist`, `ue-blueprint-specialist`, `ue-umg-specialist`, `ue-replication-specialist` | + +**When engine risk is HIGH** (from the ADR or VERSION.md): always spawn the engine +specialist, even for non-engine-facing stories. High risk means the ADR records +assumptions about post-cutoff engine APIs that need expert verification. + +--- + +## Phase 4: Implement + +Spawn the chosen programmer agent(s) via Task with the full context package: + +Provide the agent with: +1. The complete story file content +2. The current GDD requirement text (from TR registry) +3. The ADR Decision + Implementation Guidelines (verbatim — do not summarise) +4. The control manifest rules for this layer +5. The engine naming conventions and performance budgets +6. Any engine-specific notes from the ADR Engine Compatibility section +7. The test file path that must be created +8. Explicit instruction: **implement this story and write the test** + +The agent should: +- Create or modify files in `src/` following the ADR guidelines +- Respect all Required and Forbidden patterns from the control manifest +- Stay within the story's Out of Scope boundaries (do not touch unrelated files) +- Write clean, doc-commented public APIs + +### Config/Data stories (no agent needed) + +For Type: Config/Data stories, no programmer agent is required. The implementation +is editing a data file. Read the story's acceptance criteria and make the specified +changes to the data file directly. Note which values were changed and what they +changed from/to. + +### Visual/Feel stories + +Spawn `gameplay-programmer` to implement the code/animation calls. Note that +Visual/Feel acceptance criteria cannot be auto-verified — the "does it feel right?" +check happens in `/story-done` via manual confirmation. + +--- + +## Phase 5: Write the Test + +For **Logic** and **Integration** stories, the test must be written as part of +this implementation — not deferred to later. + +Remind the programmer agent: + +> "The test file for this story is required at: `[path from Test Evidence section]`. +> The story cannot be closed via `/story-done` without it. Write the test +> alongside the implementation, not after." + +Test requirements (from coding-standards.md): +- File name: `[system]_[feature]_test.[ext]` +- Function names: `test_[scenario]_[expected_outcome]` +- Each acceptance criterion must have at least one test function covering it +- No random seeds, no time-dependent assertions, no external I/O +- Test the formula bounds from the GDD Formulas section + +For **Visual/Feel** and **UI** stories: no automated test. Remind the agent to +note in the implementation summary what manual evidence will be needed: +"Evidence doc required at `production/qa/evidence/[slug]-evidence.md`." + +For **Config/Data** stories: no test file. A smoke check will serve as evidence. + +--- + +## Phase 6: Collect and Summarise + +After the programmer agent(s) complete, collect: + +- Files created or modified (with paths) +- Test file created (path and number of test functions written) +- Any deviations from the story's Out of Scope boundary (flag these) +- Any questions or blockers the agent surfaced +- Any engine-specific risks the specialist flagged + +Present a concise implementation summary: + +``` +## Implementation Complete: [Story Title] + +**Files changed**: +- `src/[path]` — created / modified ([brief description]) +- `tests/[path]` — test file ([N] test functions) + +**Acceptance criteria covered**: +- [x] [criterion] — implemented in [file:function] +- [x] [criterion] — covered by test [test_name] +- [ ] [criterion] — DEFERRED: requires playtest (Visual/Feel) + +**Deviations from scope**: [None] or [list files touched outside story boundary] +**Engine risks flagged**: [None] or [specialist finding] +**Blockers**: [None] or [describe] + +Ready for: `/code-review [file1] [file2]` then `/story-done [story-path]` +``` + +--- + +## Phase 7: Update Session State + +Silently append to `production/session-state/active.md`: + +``` +## Session Extract — /dev-story [date] +- Story: [story-path] — [story title] +- Files changed: [comma-separated list] +- Test written: [path, or "None — Visual/Feel/Config story"] +- Blockers: [None, or description] +- Next: /code-review [files] then /story-done [story-path] +``` + +Create `active.md` if it does not exist. Confirm: "Session state updated." + +--- + +## Error Recovery Protocol + +If any spawned agent (via Task) returns BLOCKED, errors, or cannot complete: + +1. **Surface immediately**: Report "[AgentName]: BLOCKED — [reason]" to the user before continuing to dependent phases +2. **Assess dependencies**: Check whether the blocked agent's output is required by subsequent phases. If yes, do not proceed past that dependency point without user input. +3. **Offer options** via AskUserQuestion with choices: + - Skip this agent and note the gap in the final report + - Retry with narrower scope + - Stop here and resolve the blocker first +4. **Always produce a partial report** — output whatever was completed. Never discard work because one agent blocked. + +Common blockers: +- Input file missing (story not found, GDD absent) → redirect to the skill that creates it +- ADR status is Proposed → do not implement; run `/architecture-decision` first +- Scope too large → split into two stories via `/create-stories` +- Conflicting instructions between ADR and story → surface the conflict, do not guess +- Manifest version mismatch → show diff to user, ask whether to proceed with old rules or update story first + +## Collaborative Protocol + +- **File writes are delegated** — all source code, test files, and evidence docs are written by sub-agents spawned via Task. Each sub-agent enforces the "May I write to [path]?" protocol individually. This orchestrator does not write files directly. +- **Load before implementing** — do not start coding until all context is loaded + (story, TR-ID, ADR, manifest, engine prefs). Incomplete context produces code + that drifts from design. +- **The ADR is the law** — implementation must follow the ADR's Implementation + Guidelines. If the guidelines conflict with what seems "better," flag it in the + summary rather than silently deviating. +- **Stay in scope** — the Out of Scope section is a contract. If implementing + the story requires touching an out-of-scope file, stop and surface it: + "Implementing [criterion] requires modifying [file], which is out of scope. + Shall I proceed or create a separate story?" +- **Test is not optional for Logic/Integration** — do not mark implementation + complete without the test file existing +- **Visual/Feel criteria are deferred, not skipped** — mark them as DEFERRED + in the summary; they will be manually verified in `/story-done` +- **Ask before large structural decisions** — if the story requires an + architectural pattern not covered by the ADR, surface it before implementing: + "The ADR doesn't specify how to handle [case]. My plan is [X]. Proceed?" + +--- + +## Recommended Next Steps + +- Run `/code-review [file1] [file2]` to review the implementation before closing the story +- Run `/story-done [story-path]` to verify acceptance criteria and mark the story complete +- After all sprint stories are done: run `/team-qa sprint` for the full QA cycle before advancing the project stage diff --git a/.omc/skills/estimate/SKILL.md b/.omc/skills/estimate/SKILL.md new file mode 100644 index 0000000..f1a6068 --- /dev/null +++ b/.omc/skills/estimate/SKILL.md @@ -0,0 +1,131 @@ +--- +name: estimate +description: "Estimates task effort by analyzing complexity, dependencies, historical velocity, and risk factors. Produces a structured estimate with confidence levels." +argument-hint: "[task-description]" +user-invocable: true +allowed-tools: Read, Glob, Grep +--- + +## Phase 1: Understand the Task + +Read the task description from the argument. If the description is too vague to estimate meaningfully, ask for clarification before proceeding. + +Read CLAUDE.md for project context: tech stack, coding standards, architectural patterns, and any estimation guidelines. + +Read relevant design documents from `design/gdd/` if the task relates to a documented feature or system. + +--- + +## Phase 2: Scan Affected Code + +Identify files and modules that would need to change: + +- Assess complexity (size, dependency count, cyclomatic complexity) +- Identify integration points with other systems +- Check for existing test coverage in the affected areas +- Read past sprint data from `production/sprints/` for similar completed tasks and historical velocity + +--- + +## Phase 3: Analyze Complexity Factors + +**Code Complexity:** +- Lines of code in affected files +- Number of dependencies and coupling level +- Whether this touches core/engine code vs leaf/feature code +- Whether existing patterns can be followed or new patterns are needed + +**Scope:** +- Number of systems touched +- New code vs modification of existing code +- Amount of new test coverage required +- Data migration or configuration changes needed + +**Risk:** +- New technology or unfamiliar libraries +- Unclear or ambiguous requirements +- Dependencies on unfinished work +- Cross-system integration complexity +- Performance sensitivity + +--- + +## Phase 4: Generate the Estimate + +```markdown +## Task Estimate: [Task Name] +Generated: [Date] + +### Task Description +[Restate the task clearly in 1-2 sentences] + +### Complexity Assessment + +| Factor | Assessment | Notes | +|--------|-----------|-------| +| Systems affected | [List] | [Core, gameplay, UI, etc.] | +| Files likely modified | [Count] | [Key files listed below] | +| New code vs modification | [Ratio] | | +| Integration points | [Count] | [Which systems interact] | +| Test coverage needed | [Low / Medium / High] | | +| Existing patterns available | [Yes / Partial / No] | | + +**Key files likely affected:** +- `[path/to/file1]` -- [what changes here] + +### Effort Estimate + +| Scenario | Days | Assumption | +|----------|------|------------| +| Optimistic | [X] | Everything goes right, no surprises | +| Expected | [Y] | Normal pace, minor issues, one round of review | +| Pessimistic | [Z] | Significant unknowns surface, blocked for a day | + +**Recommended budget: [Y days]** + +### Confidence: [High / Medium / Low] + +[Explain which factors drive the confidence level for this specific task.] + +### Risk Factors + +| Risk | Likelihood | Impact | Mitigation | +|------|-----------|--------|------------| + +### Dependencies + +| Dependency | Status | Impact if Delayed | +|-----------|--------|-------------------| + +### Suggested Breakdown + +| # | Sub-task | Estimate | Notes | +|---|----------|----------|-------| +| 1 | [Research / spike] | [X days] | | +| 2 | [Core implementation] | [X days] | | +| 3 | [Testing and validation] | [X days] | | +| | **Total** | **[Y days]** | | + +### Notes and Assumptions +- [Key assumption that affects the estimate] +- [Any caveats about scope boundaries] +``` + +Output the estimate with a brief summary: recommended budget, confidence level, and the single biggest risk factor. + +This skill is read-only — no files are written. Verdict: **COMPLETE** — estimate generated. + +--- + +## Phase 5: Next Steps + +- If confidence is Low: recommend a time-boxed spike (`/prototype`) before committing. +- If the task is > 10 days: recommend breaking it into smaller stories via `/create-stories`. +- To schedule the task: run `/sprint-plan update` to add it to the next sprint. + +### Guidelines + +- Always give a range (optimistic / expected / pessimistic), never a single number +- The recommended budget should be the expected estimate, not the optimistic one +- Round to half-day increments — estimating in hours implies false precision for tasks longer than a day +- Do not pad estimates silently — call out risk explicitly so the team can decide diff --git a/.omc/skills/gate-check/SKILL.md b/.omc/skills/gate-check/SKILL.md new file mode 100644 index 0000000..9625dbb --- /dev/null +++ b/.omc/skills/gate-check/SKILL.md @@ -0,0 +1,508 @@ +--- +name: gate-check +description: "Validate readiness to advance between development phases. Produces a PASS/CONCERNS/FAIL verdict with specific blockers and required artifacts. Use when user says 'are we ready to move to X', 'can we advance to production', 'check if we can start the next phase', 'pass the gate'." +argument-hint: "[target-phase: systems-design | technical-setup | pre-production | production | polish | release] [--review full|lean|solo]" +user-invocable: true +allowed-tools: Read, Glob, Grep, Bash, Write, Task, AskUserQuestion +model: opus +--- + +# Phase Gate Validation + +This skill validates whether the project is ready to advance to the next development +phase. It checks for required artifacts, quality standards, and blockers. + +**Distinct from `/project-stage-detect`**: That skill is diagnostic ("where are we?"). +This skill is prescriptive ("are we ready to advance?" with a formal verdict). + +## Production Stages (7) + +The project progresses through these stages: + +1. **Concept** — Brainstorming, game concept document +2. **Systems Design** — Mapping systems, writing GDDs +3. **Technical Setup** — Engine config, architecture decisions +4. **Pre-Production** — Prototyping, vertical slice validation +5. **Production** — Feature development (Epic/Feature/Task tracking active) +6. **Polish** — Performance, playtesting, bug fixing +7. **Release** — Launch prep, certification + +**When a gate passes**, write the new stage name to `production/stage.txt` +(single line, e.g. `Production`). This updates the status line immediately. + +--- + +## 1. Parse Arguments + +**Target phase:** `$ARGUMENTS[0]` (blank = auto-detect current stage, then validate next transition) + +Also resolve the review mode (once, store for all gate spawns this run): +1. If `--review [full|lean|solo]` was passed → use that +2. Else read `production/review-mode.txt` → use that value +3. Else → default to `lean` + +Note: in `solo` mode, director spawns (CD-PHASE-GATE, TD-PHASE-GATE, PR-PHASE-GATE, AD-PHASE-GATE) are skipped — gate-check becomes artifact-existence checks only. In `lean` mode, all four directors still run (phase gates are the purpose of lean mode). + +- **With argument**: `/gate-check production` — validate readiness for that specific phase +- **No argument**: Auto-detect current stage using the same heuristics as + `/project-stage-detect`, then **confirm with the user before running**: + + Use `AskUserQuestion`: + - Prompt: "Detected stage: **[current stage]**. Running gate for [Current] → [Next] transition. Is this correct?" + - Options: + - `[A] Yes — run this gate` + - `[B] No — pick a different gate` (if selected, show a second widget listing all gate options: Concept → Systems Design, Systems Design → Technical Setup, Technical Setup → Pre-Production, Pre-Production → Production, Production → Polish, Polish → Release) + + Do not skip this confirmation step when no argument is provided. + +--- + +## 2. Phase Gate Definitions + +### Gate: Concept → Systems Design + +**Required Artifacts:** +- [ ] `design/gdd/game-concept.md` exists and has content +- [ ] Game pillars defined (in concept doc or `design/gdd/game-pillars.md`) +- [ ] Visual Identity Anchor section exists in `design/gdd/game-concept.md` (from brainstorm Phase 4 art-director output) + +**Quality Checks:** +- [ ] Game concept has been reviewed (`/design-review` verdict not MAJOR REVISION NEEDED) +- [ ] Core loop is described and understood +- [ ] Target audience is identified +- [ ] Visual Identity Anchor contains a one-line visual rule and at least 2 supporting visual principles + +--- + +### Gate: Systems Design → Technical Setup + +**Required Artifacts:** +- [ ] Systems index exists at `design/gdd/systems-index.md` with at least MVP systems enumerated +- [ ] All MVP-tier GDDs exist in `design/gdd/` and individually pass `/design-review` +- [ ] A cross-GDD review report exists in `design/gdd/` (from `/review-all-gdds`) + +**Quality Checks:** +- [ ] All MVP GDDs pass individual design review (8 required sections, no MAJOR REVISION NEEDED verdict) +- [ ] `/review-all-gdds` verdict is not FAIL (cross-GDD consistency and design theory checks pass) +- [ ] All cross-GDD consistency issues flagged by `/review-all-gdds` are resolved or explicitly accepted +- [ ] System dependencies are mapped in the systems index and are bidirectionally consistent +- [ ] MVP priority tier is defined +- [ ] No stale GDD references flagged (older GDDs updated to reflect decisions made in later GDDs) + +--- + +### Gate: Technical Setup → Pre-Production + +**Required Artifacts:** +- [ ] Engine chosen (CLAUDE.md Technology Stack is not `[CHOOSE]`) +- [ ] Technical preferences configured (`.claude/docs/technical-preferences.md` populated) +- [ ] Art bible exists at `design/art/art-bible.md` with at least Sections 1–4 (Visual Identity Foundation) +- [ ] At least 3 Architecture Decision Records in `docs/architecture/` covering + Foundation-layer systems (scene management, event architecture, save/load) +- [ ] Engine reference docs exist in `docs/engine-reference/[engine]/` +- [ ] Test framework initialized: `tests/unit/` and `tests/integration/` directories exist +- [ ] CI/CD test workflow exists at `.github/workflows/tests.yml` (or equivalent) +- [ ] At least one example test file exists to confirm the framework is functional +- [ ] Master architecture document exists at `docs/architecture/architecture.md` +- [ ] Architecture traceability index exists at `docs/architecture/architecture-traceability.md` +- [ ] `/architecture-review` has been run (a review report file exists in `docs/architecture/`) +- [ ] `design/accessibility-requirements.md` exists with accessibility tier committed +- [ ] `design/ux/interaction-patterns.md` exists (pattern library initialized, even if minimal) + +**Quality Checks:** +- [ ] Architecture decisions cover core systems (rendering, input, state management) +- [ ] Technical preferences have naming conventions and performance budgets set +- [ ] Accessibility tier is defined and documented (even "Basic" is acceptable — undefined is not) +- [ ] At least one screen's UX spec started (often the main menu or core HUD is designed during Technical Setup) +- [ ] All ADRs have an **Engine Compatibility section** with engine version stamped +- [ ] All ADRs have a **GDD Requirements Addressed section** with explicit GDD linkage +- [ ] No ADR references APIs listed in `docs/engine-reference/[engine]/deprecated-apis.md` +- [ ] All HIGH RISK engine domains (per VERSION.md) have been explicitly addressed + in the architecture document or flagged as open questions +- [ ] Architecture traceability matrix has **zero Foundation layer gaps** + (all Foundation requirements must have ADR coverage before Pre-Production) + +**ADR Circular Dependency Check**: For all ADRs in `docs/architecture/`, read each ADR's +"ADR Dependencies" / "Depends On" section. Build a dependency graph (ADR-A → ADR-B means +A depends on B). If any cycle is detected (e.g. A→B→A, or A→B→C→A): +- Flag as **FAIL**: "Circular ADR dependency: [ADR-X] → [ADR-Y] → [ADR-X]. + Neither can reach Accepted while the cycle exists. Remove one 'Depends On' edge to + break the cycle." + +**Engine Validation** (read `docs/engine-reference/[engine]/VERSION.md` first): +- [ ] ADRs that touch post-cutoff engine APIs are flagged with Knowledge Risk: HIGH/MEDIUM +- [ ] `/architecture-review` engine audit shows no deprecated API usage +- [ ] All ADRs agree on the same engine version (no stale version references) + +--- + +### Gate: Pre-Production → Production + +**Required Artifacts:** +- [ ] At least 1 prototype in `prototypes/` with a README +- [ ] First sprint plan exists in `production/sprints/` +- [ ] Art bible is complete (all 9 sections) and AD-ART-BIBLE sign-off verdict is recorded in `design/art/art-bible.md` +- [ ] Character visual profiles exist for key characters referenced in narrative docs +- [ ] All MVP-tier GDDs from systems index are complete +- [ ] Master architecture document exists at `docs/architecture/architecture.md` +- [ ] At least 3 ADRs covering Foundation-layer decisions exist in `docs/architecture/` +- [ ] Control manifest exists at `docs/architecture/control-manifest.md` + (generated by `/create-control-manifest` from Accepted ADRs) +- [ ] Epics defined in `production/epics/` with at least Foundation and Core + layer epics present (use `/create-epics layer: foundation` and + `/create-epics layer: core` to create them, then `/create-stories [epic-slug]` + for each epic) +- [ ] Vertical Slice build exists and is playable (not just scope-defined) +- [ ] Vertical Slice has been playtested with at least 3 sessions (internal OK) +- [ ] Vertical Slice playtest report exists at `production/playtests/` or equivalent +- [ ] UX specs exist for key screens: main menu, core gameplay HUD (at `design/ux/`), pause menu +- [ ] HUD design document exists at `design/ux/hud.md` (if game has in-game HUD) +- [ ] All key screen UX specs have passed `/ux-review` (verdict APPROVED or NEEDS REVISION accepted) + +**Quality Checks:** +- [ ] **Core loop fun is validated** — playtest data confirms the central mechanic is enjoyable, not just functional. Explicitly check the Vertical Slice playtest report. +- [ ] UX specs cover all UI Requirements sections from MVP-tier GDDs +- [ ] Interaction pattern library documents patterns used in key screens +- [ ] Accessibility tier from `design/accessibility-requirements.md` is addressed in all key screen UX specs +- [ ] Sprint plan references real story file paths from `production/epics/` + (not just GDDs — stories must embed GDD req ID + ADR reference) +- [ ] **Vertical Slice is COMPLETE**, not just scoped — the build demonstrates the full core loop end-to-end. At least one complete [start → challenge → resolution] cycle works. +- [ ] Architecture document has no unresolved open questions in Foundation or Core layers +- [ ] All ADRs have Engine Compatibility sections stamped with the engine version +- [ ] All ADRs have ADR Dependencies sections (even if all fields are "None") +- [ ] Manual validation confirms GDDs + architecture + epics are coherent + (run `/review-all-gdds` and `/architecture-review` if not done recently) +- [ ] **Core fantasy is delivered** — at least one playtester independently described an experience that matches the Player Fantasy section of the core system GDDs (without being prompted). + +**Vertical Slice Validation** (FAIL if any item is NO): +- [ ] A human has played through the core loop without developer guidance +- [ ] The game communicates what to do within the first 2 minutes of play +- [ ] No critical "fun blocker" bugs exist in the Vertical Slice build +- [ ] The core mechanic feels good to interact with (this is a subjective check — ask the user) + +> **Note**: If any Vertical Slice Validation item is FAIL, the verdict is automatically FAIL +> regardless of other checks. Advancing without a validated Vertical Slice is the #1 cause of +> production failure in game development (per GDC postmortem data from 155 projects). + +--- + +### Gate: Production → Polish + +**Required Artifacts:** +- [ ] `src/` has active code organized into subsystems +- [ ] All core mechanics from GDD are implemented (cross-reference `design/gdd/` with `src/`) +- [ ] Main gameplay path is playable end-to-end +- [ ] Test files exist in `tests/unit/` and `tests/integration/` covering Logic and Integration stories +- [ ] All Logic stories from this sprint have corresponding unit test files in `tests/unit/` +- [ ] Smoke check has been run with a PASS or PASS WITH WARNINGS verdict — report exists in `production/qa/` +- [ ] QA plan exists in `production/qa/` (generated by `/qa-plan`) covering this sprint or final production sprint +- [ ] QA sign-off report exists in `production/qa/` (generated by `/team-qa`) with verdict APPROVED or APPROVED WITH CONDITIONS +- [ ] At least 3 distinct playtest sessions documented in `production/playtests/` +- [ ] Playtest reports cover: new player experience, mid-game systems, and difficulty curve +- [ ] Fun hypothesis from Game Concept has been explicitly validated or revised + +**Quality Checks:** +- [ ] Tests are passing (run test suite via Bash) +- [ ] No critical/blocker bugs in any bug tracker or known issues +- [ ] Core loop plays as designed (compare to GDD acceptance criteria) +- [ ] Performance is within budget (check technical-preferences.md targets) +- [ ] Playtest findings have been reviewed and critical fun issues addressed (not just documented) +- [ ] No "confusion loops" identified — no point in the game where >50% of playtesters got stuck without knowing why +- [ ] Difficulty curve matches the Difficulty Curve design doc (if one exists at `design/difficulty-curve.md`) +- [ ] All implemented screens have corresponding UX specs (no "designed in-code" screens) +- [ ] Interaction pattern library is up-to-date with all patterns used in implementation +- [ ] Accessibility compliance verified against committed tier in `design/accessibility-requirements.md` + +--- + +### Gate: Polish → Release + +**Required Artifacts:** +- [ ] All features from milestone plan are implemented +- [ ] Content is complete (all levels, assets, dialogue referenced in design docs exist) +- [ ] Localization strings are externalized (no hardcoded player-facing text in `src/`) +- [ ] QA test plan exists (`/qa-plan` output in `production/qa/`) +- [ ] QA sign-off report exists (`/team-qa` output — APPROVED or APPROVED WITH CONDITIONS) +- [ ] All Must Have story test evidence is present (Logic/Integration: test files pass; Visual/Feel/UI: sign-off docs in `production/qa/evidence/`) +- [ ] Smoke check passes cleanly (PASS verdict) on the release candidate build +- [ ] No test regressions from previous sprint (test suite passes fully) +- [ ] Balance data has been reviewed (`/balance-check` run) +- [ ] Release checklist completed (`/release-checklist` or `/launch-checklist` run) +- [ ] Store metadata prepared (if applicable) +- [ ] Changelog / patch notes drafted + +**Quality Checks:** +- [ ] Full QA pass signed off by `qa-lead` +- [ ] All tests passing +- [ ] Performance targets met across all target platforms +- [ ] No known critical, high, or medium-severity bugs +- [ ] Accessibility basics covered (remapping, text scaling if applicable) +- [ ] Localization verified for all target languages +- [ ] Legal requirements met (EULA, privacy policy, age ratings if applicable) +- [ ] Build compiles and packages cleanly + +--- + +## 3. Run the Gate Check + +**Before running artifact checks**, read `docs/consistency-failures.md` if it exists. +Extract entries whose Domain matches the target phase (e.g., if checking +Systems Design → Technical Setup, pull entries in Economy, Combat, or any GDD domain; +if checking Technical Setup → Pre-Production, pull entries in Architecture, Engine). +Carry these as context — recurring conflict patterns in the target domain warrant +increased scrutiny on those specific checks. + +For each item in the target gate: + +### Artifact Checks +- Use `Glob` and `Read` to verify files exist and have meaningful content +- Don't just check existence — verify the file has real content (not just a template header) +- For code checks, verify directory structure and file counts + +**Systems Design → Technical Setup gate — cross-GDD review check**: +Use `Glob('design/gdd/gdd-cross-review-*.md')` to find the `/review-all-gdds` report. +If no file matches, mark the "cross-GDD review report exists" artifact as **FAIL** and +surface it prominently: "No `/review-all-gdds` report found in `design/gdd/`. Run +`/review-all-gdds` before advancing to Technical Setup." +If a file is found, read it and check the verdict line: a FAIL verdict means the +cross-GDD consistency check failed and must be resolved before advancing. + +### Quality Checks +- For test checks: Run the test suite via `Bash` if a test runner is configured +- For design review checks: `Read` the GDD and check for the 8 required sections +- For performance checks: `Read` technical-preferences.md and compare against any + profiling data in `tests/performance/` or recent `/perf-profile` output +- For localization checks: `Grep` for hardcoded strings in `src/` + +### Cross-Reference Checks +- Compare `design/gdd/` documents against `src/` implementations +- Check that every system referenced in architecture docs has corresponding code +- Verify sprint plans reference real work items + +--- + +## 4. Collaborative Assessment + +For items that can't be automatically verified, **ask the user**: + +- "I can't automatically verify that the core loop plays well. Has it been playtested?" +- "No playtest report found. Has informal testing been done?" +- "Performance profiling data isn't available. Would you like to run `/perf-profile`?" + +**Never assume PASS for unverifiable items.** Mark them as MANUAL CHECK NEEDED. + +--- + +## 4b. Director Panel Assessment + +Before generating the final verdict, spawn all four directors as **parallel subagents** via Task using the parallel gate protocol from `.claude/docs/director-gates.md`. Issue all four Task calls simultaneously — do not wait for one before starting the next. + +**Spawn in parallel:** + +1. **`creative-director`** — gate **CD-PHASE-GATE** (`.claude/docs/director-gates.md`) +2. **`technical-director`** — gate **TD-PHASE-GATE** (`.claude/docs/director-gates.md`) +3. **`producer`** — gate **PR-PHASE-GATE** (`.claude/docs/director-gates.md`) +4. **`art-director`** — gate **AD-PHASE-GATE** (`.claude/docs/director-gates.md`) + +Pass to each: target phase name, list of artifacts present, and the context fields listed in that gate's definition. + +**Collect all four responses, then present the Director Panel summary:** + +``` +## Director Panel Assessment + +Creative Director: [READY / CONCERNS / NOT READY] + [feedback] + +Technical Director: [READY / CONCERNS / NOT READY] + [feedback] + +Producer: [READY / CONCERNS / NOT READY] + [feedback] + +Art Director: [READY / CONCERNS / NOT READY] + [feedback] +``` + +**Apply to the verdict:** +- Any director returns NOT READY → verdict is minimum FAIL (user may override with explicit acknowledgement) +- Any director returns CONCERNS → verdict is minimum CONCERNS +- All four READY → eligible for PASS (still subject to artifact and quality checks from Section 3) + +--- + +## 5. Output the Verdict + +``` +## Gate Check: [Current Phase] → [Target Phase] + +**Date**: [date] +**Checked by**: gate-check skill + +### Required Artifacts: [X/Y present] +- [x] design/gdd/game-concept.md — exists, 2.4KB +- [ ] docs/architecture/ — MISSING (no ADRs found) +- [x] production/sprints/ — exists, 1 sprint plan + +### Quality Checks: [X/Y passing] +- [x] GDD has 8/8 required sections +- [ ] Tests — FAILED (3 failures in tests/unit/) +- [?] Core loop playtested — MANUAL CHECK NEEDED + +### Blockers +1. **No Architecture Decision Records** — Run `/architecture-decision` to create one + covering core system architecture before entering production. +2. **3 test failures** — Fix failing tests in tests/unit/ before advancing. + +### Recommendations +- [Priority actions to resolve blockers] +- [Optional improvements that aren't blocking] + +### Verdict: [PASS / CONCERNS / FAIL] +- **PASS**: All required artifacts present, all quality checks passing +- **CONCERNS**: Minor gaps exist but can be addressed during the next phase +- **FAIL**: Critical blockers must be resolved before advancing +``` + +--- + +## 5a. Chain-of-Verification + +After drafting the verdict in Phase 5, challenge it before finalising. + +**Step 1 — Generate 5 challenge questions** designed to disprove the verdict: + +For a **PASS** draft: +- "Which quality checks did I verify by actually reading a file, vs. inferring they passed?" +- "Are there MANUAL CHECK NEEDED items I marked PASS without user confirmation?" +- "Did I confirm all listed artifacts have real content, not just empty headers?" +- "Could any blocker I dismissed as minor actually prevent the phase from succeeding?" +- "Which single check am I least confident in, and why?" + +For a **CONCERNS** draft: +- "Could any listed CONCERN be elevated to a blocker given the project's current state?" +- "Is the concern resolvable within the next phase, or does it compound over time?" +- "Did I soften any FAIL condition into a CONCERN to avoid a harder verdict?" +- "Are there artifacts I didn't check that could reveal additional blockers?" +- "Do all the CONCERNS together create a blocking problem even if each is minor alone?" + +For a **FAIL** draft: +- "Have I accurately separated hard blockers from strong recommendations?" +- "Are there any PASS items I was too lenient about?" +- "Am I missing any additional blockers the user should know about?" +- "Can I provide a minimal path to PASS — the specific 3 things that must change?" +- "Is the fail condition resolvable, or does it indicate a deeper design problem?" + +**Step 2 — Answer each question** independently. +Do NOT reference the draft verdict text — re-check specific files or ask the user. + +**Step 3 — Revise if needed:** +- If any answer reveals a missed blocker → upgrade verdict (PASS→CONCERNS or CONCERNS→FAIL) +- If any answer reveals an over-stated blocker → downgrade only if citing specific evidence +- If answers are consistent → confirm verdict unchanged + +**Step 4 — Note the verification** in the final report output: +`Chain-of-Verification: [N] questions checked — verdict [unchanged | revised from X to Y]` + +--- + +## 6. Update Stage on PASS + +When the verdict is **PASS** and the user confirms they want to advance: + +1. Write the new stage name to `production/stage.txt` (single line, no trailing newline) +2. This immediately updates the status line for all future sessions + +Example: if passing the "Pre-Production → Production" gate: +```bash +echo -n "Production" > production/stage.txt +``` + +**Always ask before writing**: "Gate passed. May I update `production/stage.txt` to 'Production'?" + +--- + +## 7. Closing Next-Step Widget + +After the verdict is presented and any stage.txt update is complete, close with a structured next-step prompt using `AskUserQuestion`. + +**Tailor the options to the gate that just ran:** + +For **systems-design PASS**: +``` +Gate passed. What would you like to do next? +[A] Run /create-architecture — produce your master architecture blueprint and ADR work plan (recommended next step) +[B] Design more GDDs first — return here when all MVP systems are complete +[C] Stop here for this session +``` + +> **Note for systems-design PASS**: `/create-architecture` is the required next step before writing any ADRs. It produces the master architecture document and a prioritized list of ADRs to write. Running `/architecture-decision` without this step means writing ADRs without a blueprint — skip it at your own risk. + +For **technical-setup PASS**: +``` +Gate passed. What would you like to do next? +[A] Start Pre-Production — begin prototyping the Vertical Slice +[B] Write more ADRs first — run /architecture-decision [next-system] +[C] Stop here for this session +``` + +For all other gates, offer the two most logical next steps for that phase plus "Stop here". + +--- + +## 8. Follow-Up Actions + +Based on the verdict, suggest specific next steps: + +- **No art bible?** → `/art-bible` to create the visual identity specification +- **Art bible exists but no asset specs?** → `/asset-spec system:[name]` to generate per-asset visual specs and generation prompts from approved GDDs +- **No game concept?** → `/brainstorm` to create one +- **No systems index?** → `/map-systems` to decompose the concept into systems +- **Missing design docs?** → `/reverse-document` or delegate to `game-designer` +- **Small design change needed?** → `/quick-design` for changes under ~4 hours (bypasses full GDD pipeline) +- **No UX specs?** → `/ux-design [screen name]` to author specs, or `/team-ui [feature]` for full pipeline +- **UX specs not reviewed?** → `/ux-review [file]` or `/ux-review all` to validate +- **No accessibility requirements doc?** → Use `AskUserQuestion` to offer to create it now: + - Prompt: "The gate requires `design/accessibility-requirements.md`. Shall I create it from the template?" + - Options: `Create it now — I'll choose an accessibility tier`, `I'll create it myself`, `Skip for now` + - If "Create it now": use a second `AskUserQuestion` to ask for the tier: + - Prompt: "Which accessibility tier fits this project?" + - Options: `Basic — remapping + subtitles only (lowest effort)`, `Standard — Basic + colorblind modes + scalable UI`, `Comprehensive — Standard + motor accessibility + full settings menu`, `Exemplary — Comprehensive + external audit + full customization` + - Then write `design/accessibility-requirements.md` using the template at `.claude/docs/templates/accessibility-requirements.md`, filling in the chosen tier. Confirm: "May I write `design/accessibility-requirements.md`?" +- **No interaction pattern library?** → `/ux-design patterns` to initialize it +- **GDDs not cross-reviewed?** → `/review-all-gdds` (run after all MVP GDDs are individually approved) +- **Cross-GDD consistency issues?** → fix flagged GDDs, then re-run `/review-all-gdds` +- **No test framework?** → `/test-setup` to scaffold the framework for your engine +- **No QA plan for current sprint?** → `/qa-plan sprint` to generate one before implementation begins +- **Missing ADRs?** → `/architecture-decision` for individual decisions +- **No master architecture doc?** → `/create-architecture` for the full blueprint +- **ADRs missing engine compatibility sections?** → Re-run `/architecture-decision` + or manually add Engine Compatibility sections to existing ADRs +- **Missing control manifest?** → `/create-control-manifest` (requires Accepted ADRs) +- **Missing epics?** → `/create-epics layer: foundation` then `/create-epics layer: core` (requires control manifest) +- **Missing stories for an epic?** → `/create-stories [epic-slug]` (run after each epic is created) +- **Stories not implementation-ready?** → `/story-readiness` to validate stories before developers pick them up +- **Tests failing?** → delegate to `lead-programmer` or `qa-tester` +- **No playtest data?** → `/playtest-report` +- **Less than 3 playtest sessions?** → Run more playtests before advancing. Use `/playtest-report` to structure findings. +- **No Difficulty Curve doc?** → Consider creating one at `design/difficulty-curve.md` before polish +- **No player journey document?** → create `design/player-journey.md` using the player journey template +- **Need a quick sprint check?** → `/sprint-status` for current sprint progress snapshot +- **Performance unknown?** → `/perf-profile` +- **Not localized?** → `/localize` +- **Ready for release?** → `/launch-checklist` + +--- + +## Collaborative Protocol + +This skill follows the collaborative design principle: + +1. **Scan first**: Check all artifacts and quality gates +2. **Ask about unknowns**: Don't assume PASS for things you can't verify +3. **Present findings**: Show the full checklist with status +4. **User decides**: The verdict is a recommendation — the user makes the final call +5. **Get approval**: "May I write this gate check report to production/gate-checks/?" + +**Never** block a user from advancing — the verdict is advisory. Document the risks +and let the user decide whether to proceed despite concerns. diff --git a/.omc/skills/help/SKILL.md b/.omc/skills/help/SKILL.md new file mode 100644 index 0000000..12757c8 --- /dev/null +++ b/.omc/skills/help/SKILL.md @@ -0,0 +1,228 @@ +--- +name: help +description: "Analyzes what is done and the users query and offers advice on what to do next. Use if user says what should I do next or what do I do now or I'm stuck or I don't know what to do" +argument-hint: "[optional: what you just finished, e.g. 'finished design-review' or 'stuck on ADRs']" +user-invocable: true +allowed-tools: Read, Glob, Grep +context: | + !echo "=== Live Project State ===" && echo "Stage: $(cat production/stage.txt 2>/dev/null | tr -d '[:space:]' || echo 'not set')" && echo "Latest sprint: $(ls -t production/sprints/*.md 2>/dev/null | head -1 || echo 'none')" && echo "Session state: $(head -5 production/session-state/active.md 2>/dev/null || echo 'none')" +model: haiku +--- + +# Studio Help — What Do I Do Next? + +This skill is read-only — it reports findings but writes no files. + +This skill figures out exactly where you are in the game development pipeline and +tells you what comes next. It is **lightweight** — not a full audit. For a full +gap analysis, use `/project-stage-detect`. + +--- + +## Step 1: Read the Catalog + +Read `.claude/docs/workflow-catalog.yaml`. This is the authoritative list of all +phases, their steps (in order), whether each step is required or optional, and +the artifact globs that indicate completion. + +--- + +## Step 1b: Find Skills Not in the Catalog + +After reading the catalog, Glob `.claude/skills/*/SKILL.md` to get the full list +of installed skills. For each file, extract the `name:` field from its frontmatter. + +Compare against the `command:` values in the catalog. Any skill whose name does +not appear as a catalog command is an **uncataloged skill** — still usable but not +part of the phase-gated workflow. + +Collect these for the output in Step 7 — show them as a footer block: + +``` +### Also installed (not in workflow) +- `/skill-name` — [description from SKILL.md frontmatter] +- `/skill-name` — [description] +``` + +Only show this block if at least one uncataloged skill exists. Limit to the 10 +most relevant based on the user's current phase (QA skills in production, team +skills in production/polish, etc.). + +--- + +## Step 2: Determine Current Phase + +Check in this order: + +1. **Read `production/stage.txt`** — if it exists and has content, this is the + authoritative phase name. Map it to a catalog phase key: + - "Concept" → `concept` + - "Systems Design" → `systems-design` + - "Technical Setup" → `technical-setup` + - "Pre-Production" → `pre-production` + - "Production" → `production` + - "Polish" → `polish` + - "Release" → `release` + +2. **If stage.txt is missing**, infer phase from artifacts (most-advanced match wins): + - `src/` has 10+ source files → `production` + - `production/stories/*.md` exists → `pre-production` + - `docs/architecture/adr-*.md` exists → `technical-setup` + - `design/gdd/systems-index.md` exists → `systems-design` + - `design/gdd/game-concept.md` exists → `concept` + - Nothing → `concept` (fresh project) + +--- + +## Step 3: Read Session Context + +Read `production/session-state/active.md` if it exists. Extract: +- What was most recently worked on +- Any in-progress tasks or open questions +- Current epic/feature/task from STATUS block (if present) + +This tells you what the user just finished or is stuck on — use it to personalize +the output. + +--- + +## Step 4: Check Step Completion for the Current Phase + +For each step in the current phase (from the catalog): + +### Artifact-based checks + +If the step has `artifact.glob`: +- Use Glob to check if files matching the pattern exist +- If `min_count` is specified, verify at least that many files match +- If `artifact.pattern` is specified, use Grep to verify the pattern exists in the matched file +- **Complete** = artifact condition is met +- **Incomplete** = artifact is missing or pattern not found + +If the step has `artifact.note` (no glob): +- Mark as **MANUAL** — cannot auto-detect, will ask user + +If the step has no `artifact` field: +- Mark as **UNKNOWN** — completion not trackable (e.g. repeatable implementation work) + +### Special case: production phase — read `sprint-status.yaml` + +When the current phase is `production`, check for `production/sprint-status.yaml` +before doing any glob-based story checks. If it exists, read it directly: + +- Stories with `status: in-progress` → surface as "currently active" +- Stories with `status: ready-for-dev` → surface as "next up" +- Stories with `status: done` → count as complete +- Stories with `status: blocked` → surface as blocker with the `blocker` field + +This gives precise per-story status without markdown scanning. Skip the glob +artifact check for the `implement` and `story-done` steps — the YAML is authoritative. + +### Special case: `repeatable: true` (non-production) + +For repeatable steps outside production (e.g. "System GDDs"), the artifact +check tells you whether *any* work has been done, not whether it's finished. +Label these differently — show what's been detected, then note it may be ongoing. + +--- + +## Step 5: Find Position and Identify Next Steps + +From the completion data, determine: + +1. **Last confirmed complete step** — the furthest completed required step +2. **Current blocker** — the first incomplete *required* step (this is what the + user must do next) +3. **Optional opportunities** — incomplete *optional* steps that can be done + before or alongside the blocker +4. **Upcoming required steps** — required steps after the current blocker + (show as "coming up" so user can plan ahead) + +If the user provided an argument (e.g. "just finished design-review"), use that +to advance past the step they named even if the artifact check is ambiguous. + +--- + +## Step 6: Check for In-Progress Work + +If `active.md` shows an active task or epic: +- Surface it prominently at the top: "It looks like you were working on [X]" +- Suggest continuing it or confirm if it's done + +--- + +## Step 7: Present Output + +Keep it **short and direct**. This is a quick orientation, not a report. + +``` +## Where You Are: [Phase Label] + +**In progress:** [from active.md, if any] + +### ✓ Done +- [completed step name] +- [completed step name] + +### → Next up (REQUIRED) +**[Step name]** — [description] +Command: `[/command]` + +### ~ Also available (OPTIONAL) +- **[Step name]** — [description] → `/command` +- **[Step name]** — [description] → `/command` + +### Coming up after that +- [Next required step name] (`/command`) +- [Next required step name] (`/command`) + +--- +Approaching **[next phase]** gate → run `/gate-check` when ready. +``` + +**Formatting rules:** +- `✓` for confirmed complete +- `→` for the current required next step (only one — the first blocker) +- `~` for optional steps available now +- Show commands inline as backtick code +- If a step has no command (e.g. "Implement Stories"), explain what to do instead of showing a slash command +- For MANUAL steps, ask the user: "I can't tell if [step] is done — has it been completed?" + +Verdict: **COMPLETE** — next steps identified. + +--- + +## Step 8: Gate Warning (if close) + +After the current phase's steps, check if the user is likely approaching a gate: +- If all required steps in the current phase are complete (or nearly complete), + add: "You're close to the **[Current] → [Next]** gate. Run `/gate-check` when ready." +- If multiple required steps remain, skip the gate warning — it's not relevant yet. + +--- + +## Step 9: Escalation Paths + +After the recommendations, if the user seems stuck or confused, add: + +``` +--- +Need more detail? +- `/project-stage-detect` — full gap analysis with all missing artifacts listed +- `/gate-check` — formal readiness check for your next phase +- `/start` — re-orient from scratch +``` + +Only show this if the user's input suggested confusion (e.g. "I don't know", "stuck", +"lost", "not sure"). Don't show it for simple "what's next?" queries. + +--- + +## Collaborative Protocol + +- **Never auto-run the next skill.** Recommend it, let the user invoke it. +- **Ask about MANUAL steps** rather than assuming complete or incomplete. +- **Match the user's tone** — if they sound stressed ("I'm totally lost"), be + reassuring and give one action, not a list of six. +- **One primary recommendation** — the user should leave knowing exactly one thing + to do next. Optional steps and "coming up" are secondary context. diff --git a/.omc/skills/hotfix/SKILL.md b/.omc/skills/hotfix/SKILL.md new file mode 100644 index 0000000..2efd09d --- /dev/null +++ b/.omc/skills/hotfix/SKILL.md @@ -0,0 +1,154 @@ +--- +name: hotfix +description: "Emergency fix workflow that bypasses normal sprint processes with a full audit trail. Creates hotfix branch, tracks approvals, and ensures the fix is backported correctly." +argument-hint: "[bug-id or description]" +user-invocable: true +allowed-tools: Read, Glob, Grep, Write, Edit, Bash, Task +--- + +> **Explicit invocation only**: This skill should only run when the user explicitly requests it with `/hotfix`. Do not auto-invoke based on context matching. + +## Phase 1: Assess Severity + +Read the bug description or ID. Determine severity: + +- **S1 (Critical)**: Game unplayable, data loss, security vulnerability — hotfix immediately +- **S2 (Major)**: Significant feature broken, workaround exists — hotfix within 24 hours +- If severity is S3 or lower, recommend using the normal bug fix workflow instead and stop. + +--- + +## Phase 2: Create Hotfix Record + +Draft the hotfix record: + +```markdown +## Hotfix: [Short Description] +Date: [Date] +Severity: [S1/S2] +Reporter: [Who found it] +Status: IN PROGRESS + +### Problem +[Clear description of what is broken and the player impact] + +### Root Cause +[To be filled during investigation] + +### Fix +[To be filled during implementation] + +### Testing +[What was tested and how] + +### Approvals +- [ ] Fix reviewed by lead-programmer +- [ ] Regression test passed (qa-tester) +- [ ] Release approved (producer) + +### Rollback Plan +[How to revert if the fix causes new issues] +``` + +Ask: "May I write this to `production/hotfixes/hotfix-[date]-[short-name].md`?" + +If yes, write the file, creating the directory if needed. + +--- + +## Phase 3: Create Hotfix Branch + +If git is initialized, create the hotfix branch: + +``` +git checkout -b hotfix/[short-name] [release-tag-or-main] +``` + +--- + +## Phase 4: Investigate and Implement + +Focus on the minimal change that resolves the issue. Do NOT refactor, clean up, or add features alongside the hotfix. + +Validate the fix by running targeted tests for the affected system. Check for regressions in adjacent systems. + +Update the hotfix record with root cause, fix details, and test results. + +--- + +## Phase 5: Collect Approvals + +Use the Task tool to request sign-off in parallel: + +- `subagent_type: lead-programmer` — Review the fix for correctness and side effects +- `subagent_type: qa-tester` — Run targeted regression tests on the affected system +- `subagent_type: producer` — Approve deployment timing and communication plan + +All three must return APPROVE before proceeding. If any returns CONCERNS or REJECT, do not deploy — surface the issue and resolve it first. + +--- + +## Phase 5b: QA Re-Entry Gate + +After approvals, determine the QA scope required before deploying the hotfix. Spawn `qa-lead` via Task with: +- The hotfix description and affected system +- The regression test results from Phase 5 +- A list of all systems that touch the changed files (use Grep to find callers) + +Ask qa-lead: **Is a full smoke check sufficient, or does this fix require a targeted team-qa pass?** + +Apply the verdict: +- **Smoke check sufficient** — run `/smoke-check` against the hotfix build. If PASS, proceed to Phase 6. +- **Targeted QA pass required** — run `/team-qa [affected-system]` scoped to the changed system only. If QA returns APPROVED or APPROVED WITH CONDITIONS, proceed to Phase 6. +- **Full QA required** — S1 fixes that touch core systems may require a full `/team-qa sprint`. This delays deployment but prevents a bad patch. + +Do not skip this gate. A hotfix that breaks something else is worse than the original bug. + +--- + +## Phase 6: Update Bug Status and Deploy + +Update the original bug file if one exists: + +```markdown +## Fix Record +**Fixed in**: hotfix/[branch-name] — [commit hash or description] +**Fixed date**: [date] +**Status**: Fixed — Pending Verification +``` + +Set `**Status**: Fixed — Pending Verification` in the bug file header. + +Output a deployment summary: + +``` +## Hotfix Ready to Deploy: [short-name] + +**Severity**: [S1/S2] +**Root cause**: [one line] +**Fix**: [one line] +**QA gate**: [Smoke check PASS / Team-QA APPROVED] +**Approvals**: lead-programmer ✓ / qa-tester ✓ / producer ✓ +**Rollback plan**: [from Phase 2 record] + +Merge to: release branch AND development branch +Next: /bug-report verify [BUG-ID] after deploy to confirm resolution +``` + +### Rules +- Hotfixes must be the MINIMUM change to fix the issue — no cleanup, no refactoring +- Every hotfix must have a rollback plan documented before deployment +- Hotfix branches merge to BOTH the release branch AND the development branch +- All hotfixes require a post-incident review within 48 hours +- If the fix is complex enough to need more than 4 hours, escalate to `technical-director` + +--- + +## Phase 7: Post-Deploy Verification + +After deploying, run `/bug-report verify [BUG-ID]` to confirm the fix resolved the issue in the deployed build. + +If VERIFIED FIXED: run `/bug-report close [BUG-ID]` to formally close it. +If STILL PRESENT: the hotfix failed — immediately re-open, assess rollback, and escalate. + +Schedule a post-incident review within 48 hours using `/retrospective hotfix`. diff --git a/.omc/skills/launch-checklist/SKILL.md b/.omc/skills/launch-checklist/SKILL.md new file mode 100644 index 0000000..46dc237 --- /dev/null +++ b/.omc/skills/launch-checklist/SKILL.md @@ -0,0 +1,239 @@ +--- +name: launch-checklist +description: "Complete launch readiness validation covering every department: code, content, store, marketing, community, infrastructure, legal, and go/no-go sign-offs." +argument-hint: "[launch-date or 'dry-run']" +user-invocable: true +allowed-tools: Read, Glob, Grep, Write +--- + +> **Explicit invocation only**: This skill should only run when the user explicitly requests it with `/launch-checklist`. Do not auto-invoke based on context matching. + +## Phase 1: Parse Arguments + +Read the argument for the launch date or `dry-run` mode. Dry-run mode generates the checklist without creating sign-off entries or writing files. + +--- + +## Phase 2: Gather Project Context + +- Read `CLAUDE.md` for tech stack, target platforms, and team structure +- Read the latest milestone in `production/milestones/` +- Read any existing release checklist in `production/releases/` +- Read the content calendar in `design/live-ops/content-calendar.md` if it exists + +--- + +## Phase 3: Scan Codebase Health + +- Count `TODO`, `FIXME`, `HACK` comments and their locations +- Check for any `console.log`, `print()`, or debug output left in production code +- Check for placeholder assets (search for `placeholder`, `temp_`, `WIP_`) +- Check for hardcoded test/dev values (localhost, test credentials, debug flags) + +--- + +## Phase 4: Generate the Launch Checklist + +```markdown +# Launch Checklist: [Game Title] +Target Launch: [Date or DRY RUN] +Generated: [Date] + +--- + +## 1. Code Readiness + +### Build Health +- [ ] Clean build on all target platforms +- [ ] Zero compiler warnings +- [ ] All unit tests passing +- [ ] All integration tests passing +- [ ] Performance benchmarks within targets +- [ ] No memory leaks (verified via extended soak test) +- [ ] Build size within platform limits +- [ ] Build version correctly set and tagged in source control + +### Code Quality +- [ ] TODO count: [N] (zero required for launch, or documented exceptions) +- [ ] FIXME count: [N] (zero required) +- [ ] HACK count: [N] (each must have documented justification) +- [ ] No debug output in production code +- [ ] No hardcoded dev/test values +- [ ] All feature flags set to production values +- [ ] Error handling covers all critical paths +- [ ] Crash reporting integrated and verified + +### Security +- [ ] No exposed API keys or credentials in source +- [ ] Save data encrypted +- [ ] Network communication secured (TLS/DTLS) +- [ ] Anti-cheat measures active (if multiplayer) +- [ ] Input validation on all server endpoints (if multiplayer) +- [ ] Privacy policy compliance verified + +--- + +## 2. Content Readiness + +### Assets +- [ ] All placeholder art replaced with final assets +- [ ] All placeholder audio replaced with final audio +- [ ] Audio mix finalized and approved by audio director +- [ ] All VFX polished and performance-verified +- [ ] No missing or broken asset references +- [ ] Asset naming conventions enforced + +### Text and Localization +- [ ] All player-facing text proofread +- [ ] No hardcoded strings (all externalized for localization) +- [ ] All supported languages translated and verified +- [ ] Text fits UI in all languages (text fitting pass complete) +- [ ] Font coverage verified for all supported languages +- [ ] Credits complete, accurate, and up to date + +### Game Content +- [ ] All levels/maps playable from start to finish +- [ ] Tutorial flow complete and tested with new players +- [ ] All achievements/trophies implemented and tested +- [ ] Save/load works correctly for all game states +- [ ] Difficulty settings balanced and tested +- [ ] End-game/credits sequence complete + +--- + +## 3. Quality Assurance + +### Testing +- [ ] Full regression test suite passed +- [ ] Zero S1 (Critical) bugs open +- [ ] Zero S2 (Major) bugs open (or documented exceptions) +- [ ] Soak test passed (8+ hours continuous play) +- [ ] Multiplayer stress test passed (if applicable) +- [ ] All critical user paths tested on every platform +- [ ] Edge cases tested (full storage, no network, suspend/resume) + +### Platform Certification +- [ ] PC: Steam/Epic/GOG SDK requirements met +- [ ] Console: TRC/TCR/Lotcheck submission prepared +- [ ] Mobile: App Store/Play Store guidelines compliant +- [ ] Accessibility: minimum standards met (remapping, text scaling, colorblind) +- [ ] Age ratings obtained (ESRB, PEGI, regional) + +### Performance +- [ ] Target FPS met on minimum spec hardware +- [ ] Load times within budget on all platforms +- [ ] Memory usage within budget on all platforms +- [ ] Network bandwidth within targets (if multiplayer) +- [ ] No frame hitches in critical gameplay moments + +--- + +## 4. Store and Distribution + +### Store Pages +- [ ] Store page copy finalized and proofread +- [ ] Screenshots current and per-platform resolution +- [ ] Trailers current and approved +- [ ] Key art and capsule images finalized +- [ ] System requirements accurate (PC) +- [ ] Pricing configured for all regions +- [ ] Pre-purchase/wishlist campaigns active (if applicable) + +### Legal +- [ ] EULA finalized and approved by legal +- [ ] Privacy policy published and linked +- [ ] Third-party license attributions complete +- [ ] Music/audio licensing verified +- [ ] Trademark/IP clearance confirmed +- [ ] GDPR/CCPA compliance verified (data collection, consent, deletion) + +--- + +## 5. Infrastructure + +### Servers (if multiplayer/online) +- [ ] Production servers provisioned and load-tested +- [ ] Auto-scaling configured and tested +- [ ] Database backups configured +- [ ] CDN configured for content delivery +- [ ] DDoS protection active +- [ ] Monitoring and alerting configured + +### Analytics and Monitoring +- [ ] Analytics pipeline verified and receiving data +- [ ] Crash reporting active and dashboard accessible +- [ ] Server monitoring dashboards live +- [ ] Key metrics tracked: DAU, session length, retention, crashes +- [ ] Alerts configured for critical thresholds + +--- + +## 6. Community and Marketing + +### Community Readiness +- [ ] Community guidelines published +- [ ] Moderation team briefed and tools ready +- [ ] Discord/forum/social channels set up +- [ ] FAQ and known issues page prepared +- [ ] Support email/ticketing system active + +### Marketing +- [ ] Launch trailer published +- [ ] Press/influencer review keys distributed +- [ ] Social media launch posts scheduled +- [ ] Launch day blog post/dev update drafted +- [ ] Patch notes for launch version published + +--- + +## 7. Operations + +### Team Readiness +- [ ] On-call schedule set for first 72 hours post-launch +- [ ] Incident response playbook reviewed by team +- [ ] Rollback plan documented and tested +- [ ] Hotfix pipeline tested (can ship emergency fix within 4 hours) +- [ ] Communication plan for launch issues (who posts, where, how fast) + +### Day-One Plan +- [ ] Day-one patch prepared (if needed) +- [ ] Server unlock/go-live procedure documented +- [ ] Launch monitoring dashboard bookmarked by all leads +- [ ] War room/channel established for launch day + +--- + +## Go / No-Go Decision + +**Overall Status**: [READY / NOT READY / CONDITIONAL] + +### Blocking Items +[List any items that must be resolved before launch] + +### Conditional Items +[List items that have documented workarounds or accepted risk] + +### Sign-Offs Required +- [ ] Creative Director — Content and experience quality +- [ ] Technical Director — Technical health and stability +- [ ] QA Lead — Quality and test coverage +- [ ] Producer — Schedule and overall readiness +- [ ] Release Manager — Build and deployment readiness +``` + +--- + +## Phase 5: Save Checklist + +Present the completed checklist and summary to the user (total items, blocking items count, conditional items count, departments with incomplete sections). + +If not in dry-run mode, ask: "May I write this to `production/releases/launch-checklist-[date].md`?" + +If yes, write the file, creating directories as needed. + +--- + +## Phase 6: Next Steps + +- Run `/gate-check` to get a formal PASS/CONCERNS/FAIL verdict before launch. +- Coordinate sign-offs via `/team-release`. diff --git a/.omc/skills/localize/SKILL.md b/.omc/skills/localize/SKILL.md new file mode 100644 index 0000000..8b241bf --- /dev/null +++ b/.omc/skills/localize/SKILL.md @@ -0,0 +1,440 @@ +--- +name: localize +description: "Full localization pipeline: scan for hardcoded strings, extract and manage string tables, validate translations, generate translator briefings, run cultural/sensitivity review, manage VO localization, test RTL/platform requirements, enforce string freeze, and report coverage." +argument-hint: "[scan|extract|validate|status|brief|cultural-review|vo-pipeline|rtl-check|freeze|qa]" +user-invocable: true +agent: localization-lead +allowed-tools: Read, Glob, Grep, Write, Bash, Task, AskUserQuestion +--- + +# Localization Pipeline + +Localization is not just translation — it is the full process of making a game +feel native in every language and region. Poor localization breaks immersion, +confuses players, and blocks platform certification. This skill covers the +complete pipeline from string extraction through cultural review, VO recording, +RTL layout testing, and localization QA sign-off. + +**Modes:** +- `scan` — Find hardcoded strings and localization anti-patterns (read-only) +- `extract` — Extract strings and generate translation-ready tables +- `validate` — Check translations for completeness, placeholders, and length +- `status` — Coverage matrix across all locales +- `brief` — Generate translator context briefing document for an external team +- `cultural-review` — Flag culturally sensitive content, symbols, colours, idioms +- `vo-pipeline` — Manage voice-over localization: scripts, recording specs, integration +- `rtl-check` — Validate RTL language layout, mirroring, and font support +- `freeze` — Enforce string freeze; lock source strings before translation begins +- `qa` — Run the full localization QA cycle before release + +If no subcommand is provided, output usage and stop. Verdict: **FAIL** — missing required subcommand. + +--- + +## Phase 2A: Scan Mode + +Search `src/` for hardcoded user-facing strings: + +- String literals in UI code not wrapped in a localization function (`tr()`, `Tr()`, `NSLocalizedString`, `GetText`, etc.) +- Concatenated strings that should be parameterized +- Strings with positional placeholders (`%s`, `%d`) instead of named ones (`{playerName}`) +- Format strings that mix locale-sensitive data (numbers, dates, currencies) without locale-aware formatting + +Search for localization anti-patterns: + +- Date/time formatting not using locale-aware functions +- Number formatting without locale awareness (`1,000` vs `1.000`) +- Text embedded in images or textures (flag asset files in `assets/`) +- Strings that assume left-to-right text direction (positional layout, string assembly order) +- Gender/plurality assumptions baked into string logic (must use plural forms or gender tokens) +- Hardcoded punctuation (e.g. `"You won!"` — exclamation styles vary by locale) + +Report all findings with file paths and line numbers. This mode is read-only — no files are written. + +--- + +## Phase 2B: Extract Mode + +- Scan all source files for localized string references +- Compare against the existing string table in `assets/data/strings/` +- Generate new entries for strings not yet keyed +- Suggest key names following the convention: `[category].[subcategory].[description]` + - Example: `ui.hud.health_label`, `dialogue.npc.merchant.greeting`, `menu.main.play_button` +- Each new entry must include a `context` field — a translator comment explaining: + - Where it appears (which screen, which scene) + - Maximum character length + - Any placeholder meaning (`{playerName}` = the player's chosen display name) + - Gender/plurality context if applicable + +Output a diff of new strings to add to the string table. + +Present the diff to the user. Ask: "May I write these new entries to `assets/data/strings/strings-en.json`?" + +If yes, write only the diff (new entries), not a full replacement. Verdict: **COMPLETE** — strings extracted and written. + +--- + +## Phase 2C: Validate Mode + +Read all string table files in `assets/data/strings/`. For each locale, check: + +- **Completeness** — key exists in source (en) but no translation for this locale +- **Placeholder mismatches** — source has `{name}` but translation omits it or adds extras +- **String length violations** — translation exceeds the character limit recorded in the source `context` field +- **Plural form count** — locale requires N plural forms; translation provides fewer +- **Orphaned keys** — translation exists but nothing in `src/` references the key +- **Stale translations** — source string changed after translation was written (flag for re-translation) +- **Encoding** — non-ASCII characters present and font atlas supports them (flag if uncertain) + +Report validation results grouped by locale and severity. This mode is read-only — no files are written. + +--- + +## Phase 2D: Status Mode + +- Count total localizable strings in the source table +- Per locale: count translated, untranslated, stale (source changed since translation) +- Generate a coverage matrix: + +```markdown +## Localization Status +Generated: [Date] +String freeze: [Active / Not yet called / Lifted] + +| Locale | Total | Translated | Missing | Stale | Coverage | +|--------|-------|-----------|---------|-------|----------| +| en (source) | [N] | [N] | 0 | 0 | 100% | +| [locale] | [N] | [N] | [N] | [N] | [X]% | + +### Issues +- [N] hardcoded strings found in source code (run /localize scan) +- [N] strings exceeding character limits +- [N] placeholder mismatches +- [N] orphaned keys +- [N] strings added after freeze was called (freeze violations) +``` + +This mode is read-only — no files are written. + +--- + +## Phase 2E: Brief Mode + +Generate a translator context briefing document. This document is sent to the +external translation team or localisation vendor alongside the string table export. + +Read: +- `design/gdd/` — extract game genre, tone, setting, character names +- `assets/data/strings/strings-en.json` — the source string table +- Any existing lore or narrative documents in `design/narrative/` + +Generate `production/localization/translator-brief-[locale]-[date].md`: + +```markdown +# Translator Brief — [Game Name] — [Locale] + +## Game Overview +[2-3 paragraph summary of the game, genre, tone, and audience] + +## Tone and Voice +- **Overall tone**: [e.g., "Darkly comic, not slapstick — think Terry Pratchett, not Looney Tunes"] +- **Player address**: [e.g., "Second person, informal. Never formal 'vous' — always 'tu' for French"] +- **Profanity policy**: [e.g., "Mild — PG-13 equivalent. Match intensity to source, do not soften or escalate"] +- **Humour**: [e.g., "Wordplay exists — if a pun cannot translate, invent an equivalent local joke; do not translate literally"] + +## Character Glossary +| Name | Role | Personality | Notes | +|------|------|-------------|-------| +| [Name] | [Role] | [Personality] | [Do not translate / transliterate as X] | + +## World Glossary +| Term | Meaning | Notes | +|------|---------|-------| +| [Term] | [What it means] | [Keep in English / translate as X] | + +## Do Not Translate List +The following must appear verbatim in all locales: +- [Game name] +- [UI terms that match in-engine labels] +- [Brand or trademark names] + +## Placeholder Reference +| Placeholder | What it represents | Example | +|-------------|-------------------|---------| +| `{playerName}` | Player's chosen display name | "Shadowblade" | +| `{count}` | Integer quantity | "3" | + +## Character Limits +Tight UI fields with hard limits are marked in the string table `context` field. +Where no limit is stated, target ±30% of the English length as a guideline. + +## Contact +Direct questions to: [placeholder for user/team contact] +Delivery format: JSON, same schema as strings-en.json +``` + +Ask: "May I write this translator brief to `production/localization/translator-brief-[locale]-[date].md`?" + +--- + +## Phase 2F: Cultural Review Mode + +Spawn `localization-lead` via Task. Ask them to audit the following for cultural sensitivity across the target locales (read from `assets/data/strings/` and `assets/`): + +### Content Areas to Review + +**Symbols and gestures** +- Thumbs up, OK hand, peace sign — meanings vary by region +- Religious or spiritual symbols in art, UI, or audio +- National flags, map representations, disputed territories + +**Colours** +- White (mourning in some Asian cultures), green (political associations in some regions), red (luck vs danger) +- Alert/warning colours that conflict with cultural associations + +**Numbers** +- 4 (death in Japanese/Chinese), 13, 666 — flag use in UI (room numbers, item counts, prices) + +**Humour and idioms** +- Idioms that translate as offensive in other locales +- Toilet/bodily humour that is inappropriate in some markets (notably Japan, Germany, Middle East) +- Dark humour around topics that are culturally sensitive in specific regions + +**Violence and content ratings** +- Content that would require ratings changes in DE (Germany), AU (Australia), CN (China), or AE (UAE) +- Blood colour, gore level, drug references — flag all for region-specific asset variants if needed + +**Names and representations** +- Character names that are offensive, profane, or carry negative meaning in target locales +- Stereotyped representation of nationalities, religions, or ethnic groups + +Present findings as a table: + +| Finding | Locale(s) Affected | Severity | Recommended Action | +|---------|--------------------|----------|--------------------| +| [Description] | [Locale] | [BLOCKING / ADVISORY / NOTE] | [Change / Flag for review / Accept] | + +BLOCKING = must fix before shipping that locale. ADVISORY = recommend change. NOTE = informational only. + +Ask: "May I write this cultural review report to `production/localization/cultural-review-[date].md`?" + +--- + +## Phase 2G: VO Pipeline Mode + +Manage the voice-over localization process. Determine the sub-task from the argument: + +- `vo-pipeline scan` — identify all dialogue lines that require VO recording +- `vo-pipeline script` — generate recording scripts with director notes +- `vo-pipeline validate` — check that all recorded VO files are present and correctly named +- `vo-pipeline integrate` — verify VO files are correctly referenced in code/assets + +### VO Pipeline: Scan + +Read `assets/data/strings/` and `design/narrative/`. Identify: +- All dialogue lines (keys matching `dialogue.*`) with source text +- Lines already recorded (audio file exists in `assets/audio/vo/`) +- Lines not yet recorded + +Output a recording manifest: + +``` +## VO Recording Manifest — [Date] + +| Key | Character | Source Line | Status | +|-----|-----------|-------------|--------| +| dialogue.npc.merchant.greeting | Merchant | "Welcome, traveller." | Recorded | +| dialogue.npc.merchant.haggle | Merchant | "That's my final offer." | Needs recording | +``` + +### VO Pipeline: Script + +Generate a recording script document for each character, grouped by scene. Include: + +- Character name and brief personality note +- Full dialogue line with pronunciation guide for unusual proper nouns +- Emotion/direction note for each line (`[Warm, welcoming]`, `[Annoyed, clipped]`) +- Any lines that are responses in a conversation (provide context: "Player just said X") + +Ask: "May I write the VO recording scripts to `production/localization/vo-scripts-[locale]-[date].md`?" + +### VO Pipeline: Validate + +Glob `assets/audio/vo/[locale]/` for all `.wav`/`.ogg` files. Cross-reference against the VO manifest. Report: +- Missing files (line in script, no audio file) +- Extra files (audio file exists, no matching string key) +- Naming convention violations + +### VO Pipeline: Integrate + +Grep `src/` for VO audio references. Verify each referenced path exists in `assets/audio/vo/[locale]/`. Report broken references. + +--- + +## Phase 2H: RTL Check Mode + +Right-to-left languages (Arabic, Hebrew, Persian, Urdu) require layout mirroring beyond +just translating text. This mode validates the implementation. + +Read `.claude/docs/technical-preferences.md` to determine the engine. Then check: + +**Layout mirroring** +- Is RTL layout enabled in the engine? (Godot: `Control.layout_direction`, Unity: `RTL Support` package, Unreal: text direction flags) +- Are all UI containers set to auto-mirror, or are positions hardcoded? +- Do progress bars, health bars, and directional indicators mirror correctly? + +**Text rendering** +- Are fonts loaded that support Arabic/Hebrew character sets? +- Is Arabic text rendered with correct ligatures (connected script)? +- Are numbers displayed as Eastern Arabic numerals where required? + +**String assembly** +- Are there any string concatenations that assume left-to-right reading order? +- Do `{placeholder}` positions in sentences work correctly when sentence structure is reversed? + +**Asset review** +- Are there UI icons with directional arrows or asymmetric designs that need mirrored variants? +- Do any text-in-image assets exist that require RTL versions? + +Grep patterns to check: +- Engine-specific RTL flags in scene/prefab files +- Any `HBoxContainer`, `LinearLayout`, `HorizontalBox` nodes — verify layout_direction settings +- String concatenation with `+` near dialogue or UI code + +Report findings. Flag BLOCKING issues (content unreadable without fix) vs ADVISORY (cosmetic improvements). + +Ask: "May I write this RTL check report to `production/localization/rtl-check-[date].md`?" + +--- + +## Phase 2I: Freeze Mode + +String freeze locks the source (English) string table so that translations can proceed +without the source changing under the translators. + +### freeze call + +Check current freeze status in `production/localization/freeze-status.md` (if it exists). + +If already frozen: +> "String freeze is currently ACTIVE (called [date]). [N] strings have been added or modified since freeze. These are freeze violations — they require re-translation or an approved freeze lift." + +If not frozen, present the pre-freeze checklist: + +``` +Pre-Freeze Checklist +[ ] All planned UI screens are implemented +[ ] All dialogue lines are final (no further narrative revisions planned) +[ ] All system strings (error messages, tutorial text) are complete +[ ] /localize scan shows zero hardcoded strings +[ ] /localize validate shows no placeholder mismatches in source (en) +[ ] Marketing strings (store description, achievements) are final +``` + +Use `AskUserQuestion`: +- Prompt: "Are all items above confirmed? Calling string freeze locks the source table." +- Options: `[A] Yes — call string freeze now` / `[B] No — I still have strings to add` + +If [A]: Write `production/localization/freeze-status.md`: + +```markdown +# String Freeze Status + +**Status**: ACTIVE +**Called**: [date] +**Called by**: [user] +**Total strings at freeze**: [N] + +## Post-Freeze Changes +[Any strings added or modified after freeze are listed here automatically by /localize extract] +``` + +### freeze lift + +If argument includes `lift`: update `freeze-status.md` Status to `LIFTED`, record the reason and date. Warn: "Lifting the freeze requires re-translation of all modified strings. Notify the translation team." + +### freeze check (auto-integrated into extract) + +When `extract` mode finds new or modified strings and `freeze-status.md` shows Status: ACTIVE — append the new keys to `## Post-Freeze Changes` and warn: +> "⚠️ String freeze is active. [N] new/modified strings have been added. These are freeze violations. Notify your localization vendor before proceeding." + +--- + +## Phase 2J: QA Mode + +Localization QA is a dedicated pass that runs after translations are delivered but +before any locale ships. This is not the same as `/validate` (which checks completeness) +— this is a structured playthrough-based quality check. + +Spawn `localization-lead` via Task with: +- The target locale(s) to QA +- The list of all screens/flows in the game (from `design/gdd/` or `/content-audit` output) +- The current `/localize validate` report +- The cultural review report (if it exists) + +Ask the localization-lead to produce a QA plan covering: + +1. **Functional string check** — every string displays in-game without truncation, placeholder errors, or encoding corruption +2. **UI overflow check** — translated strings that exceed UI bounds (even if within character limits, some languages expand) +3. **Contextual accuracy** — a sample of 10% of strings reviewed in-game for translation accuracy and natural phrasing +4. **Cultural review items** — verify all BLOCKING items from the cultural review are resolved +5. **VO sync check** — if VO exists, verify lip sync or subtitle timing is acceptable after translation +6. **Platform cert requirements** — check platform-specific localization requirements (age ratings text, legal notices, ESRB/PEGI/CERO text) + +Output a QA verdict per locale: + +``` +## Localization QA Verdict — [Locale] + +**Status**: PASS / PASS WITH CONDITIONS / FAIL +**Reviewed by**: localization-lead +**Date**: [date] + +### Findings +| ID | Area | Description | Severity | Status | +|----|------|-------------|----------|--------| +| LOC-001 | UI Overflow | "Settings" button text overflows on [Screen] | BLOCKING | Open | +| LOC-002 | Translation | [Key] translation is literal — sounds unnatural | ADVISORY | Open | + +### Conditions (if PASS WITH CONDITIONS) +- [Condition 1 — must resolve before ship] + +### Sign-Off +[ ] All BLOCKING findings resolved +[ ] Producer approves shipping [Locale] +``` + +Ask: "May I write this localization QA report to `production/localization/loc-qa-[locale]-[date].md`?" + +**Gate integration**: The Polish → Release gate requires a PASS or PASS WITH CONDITIONS verdict for every locale being shipped. A FAIL blocks release for that locale only — other locales may still proceed if their QA passes. + +--- + +## Phase 3: Rules and Next Steps + +### Rules +- English (en) is always the source locale +- Every string table entry must include a `context` field with translator notes, character limits, and placeholder meaning +- Never modify translation files directly — generate diffs for review +- Character limits must be defined per-UI-element and enforced in validate mode +- String freeze must be called before sending strings to translators — never translate a moving target +- RTL support must be designed in from the start — retrofitting RTL layout is expensive +- Cultural review is required for any locale where the game will be sold commercially +- VO scripts must include director notes — raw dialogue lines produce flat recordings + +### Recommended Workflow + +``` +/localize scan → find hardcoded strings +/localize extract → build string table +/localize freeze → lock source before sending to translators +/localize brief → generate translator briefing document +[Send to translators] +/localize validate → check returned translations +/localize cultural-review → flag culturally sensitive content +/localize rtl-check → if shipping Arabic / Hebrew / Persian +/localize vo-pipeline → if shipping dubbed VO +/localize qa → full localization QA pass +``` + +After `qa` returns PASS for all shipping locales, include the QA report path when running `/gate-check release`. diff --git a/.omc/skills/map-systems/SKILL.md b/.omc/skills/map-systems/SKILL.md new file mode 100644 index 0000000..bcbefc1 --- /dev/null +++ b/.omc/skills/map-systems/SKILL.md @@ -0,0 +1,363 @@ +--- +name: map-systems +description: "Decompose a game concept into individual systems, map dependencies, prioritize design order, and create the systems index." +argument-hint: "[next | system-name] [--review full|lean|solo]" +user-invocable: true +allowed-tools: Read, Glob, Grep, Write, Edit, AskUserQuestion, TodoWrite, Task +--- + +When this skill is invoked: + +## Parse Arguments + +Two modes: + +- **No argument**: `/map-systems` — Run the full decomposition workflow (Phases 1-5) + to create or update the systems index. +- **`next`**: `/map-systems next` — Pick the highest-priority undesigned system + from the index and hand off to `/design-system` (Phase 6). + +Also resolve the review mode (once, store for all gate spawns this run): +1. If `--review [full|lean|solo]` was passed → use that +2. Else read `production/review-mode.txt` → use that value +3. Else → default to `lean` + +See `.claude/docs/director-gates.md` for the full check pattern. + +--- + +## Phase 1: Read Concept (Required Context) + +Read the game concept and any existing design work. This provides the raw material +for systems decomposition. + +**Required:** +- Read `design/gdd/game-concept.md` — **fail with a clear message if missing**: + > "No game concept found at `design/gdd/game-concept.md`. Run `/brainstorm` first + > to create one, then come back to decompose it into systems." + +**Optional (read if they exist):** +- Read `design/gdd/game-pillars.md` — pillars constrain priority and scope +- Read `design/gdd/systems-index.md` — if exists, **resume** from where it left off + (update, don't recreate from scratch) +- Glob `design/gdd/*.md` — check which system GDDs already exist + +**If the systems index already exists:** +- Read it and present current status to the user +- Use `AskUserQuestion` to ask: + "The systems index already exists with [N] systems ([M] designed, [K] not started). + What would you like to do?" + - Options: "Update the index with new systems", "Design the next undesigned system", + "Review and revise priorities" + +--- + +## Phase 2: Systems Enumeration (Collaborative) + +Extract and identify all systems the game needs. This is the creative core of the +skill — it requires human judgment because concept docs rarely enumerate every +system explicitly. + +### Step 2a: Extract Explicit Systems + +Scan the game concept for directly mentioned systems and mechanics: +- Core Mechanics section (most explicit) +- Core Loop section (implies what systems drive each loop tier) +- Technical Considerations section (networking, procedural generation, etc.) +- MVP Definition section (required features = required systems) + +### Step 2b: Identify Implicit Systems + +For each explicit system, identify the **hidden systems** it implies. Games always +need more systems than the concept doc mentions. Use this inference pattern: + +- "Inventory" implies: item database, equipment slots, weight/capacity rules, + inventory UI, item serialization for save/load +- "Combat" implies: damage calculation, health system, hit detection, status effects, + enemy AI, combat UI (health bars, damage numbers), death/respawn +- "Open world" implies: streaming/chunking, LOD system, fast travel, map/minimap, + point of interest tracking, world state persistence +- "Multiplayer" implies: networking layer, lobby/matchmaking, state synchronization, + anti-cheat, network UI (ping, player list) +- "Crafting" implies: recipe database, ingredient gathering, crafting UI, + success/failure mechanics, recipe discovery/learning +- "Dialogue" implies: dialogue tree system, dialogue UI, choice tracking, NPC + state management, localization hooks +- "Progression" implies: XP system, level-up mechanics, skill tree, unlock + tracking, progression UI, progression save data + +Explain in conversation text why each implicit system is needed (with examples). + +### Step 2c: User Review + +Present the enumeration organized by category. For each system, show: +- Name +- Category +- Brief description (1 sentence) +- Whether it was explicit (from concept) or implicit (inferred) + +Then use `AskUserQuestion` to capture feedback: +- "Are there systems missing from this list?" +- "Should any of these be combined or split?" +- "Are there systems listed that this game does NOT need?" + +Iterate until the user approves the enumeration. + +--- + +## Phase 3: Dependency Mapping (Collaborative) + +For each system, determine what it depends on. A system "depends on" another if +it cannot function without that other system existing first. + +### Step 3a: Map Dependencies + +For each system, list its dependencies. Use these dependency heuristics: +- **Input/output dependencies**: System A produces data System B needs +- **Structural dependencies**: System A provides the framework System B plugs into +- **UI dependencies**: Every gameplay system has a corresponding UI system that + depends on it (but UI is designed after the gameplay system) + +### Step 3b: Sort by Dependency Order + +Arrange systems into layers: +1. **Foundation**: Systems with zero dependencies (designed and built first) +2. **Core**: Systems depending only on Foundation systems +3. **Feature**: Systems depending on Core systems +4. **Presentation**: UI and feedback systems that wrap gameplay systems +5. **Polish**: Meta-systems, tutorials, analytics, accessibility + +### Step 3c: Detect Circular Dependencies + +Check for cycles in the dependency graph. If found: +- Highlight them to the user +- Propose resolutions (interface abstraction, simultaneous design, breaking the + cycle by defining a contract between the two systems) + +### Step 3d: Present to User + +Show the dependency map as a layered list. Highlight: +- Any circular dependencies +- Any "bottleneck" systems (many others depend on them — these are high-risk) +- Any systems with no dependents (leaf nodes — lower risk, can be designed late) + +Use `AskUserQuestion` to ask: "Does this dependency ordering look right? Any +dependencies I'm missing or that should be removed?" + +**Review mode check** — apply before spawning TD-SYSTEM-BOUNDARY: +- `solo` → skip. Note: "TD-SYSTEM-BOUNDARY skipped — Solo mode." Proceed to priority assignment. +- `lean` → skip (not a PHASE-GATE). Note: "TD-SYSTEM-BOUNDARY skipped — Lean mode." Proceed to priority assignment. +- `full` → spawn as normal. + +**After dependency mapping is approved, spawn `technical-director` via Task using gate TD-SYSTEM-BOUNDARY (`.claude/docs/director-gates.md`) before proceeding to priority assignment.** + +Pass: the dependency map summary, layer assignments, bottleneck systems list, any circular dependency resolutions. + +Present the assessment. If REJECT, revise the system boundaries with the user before moving to priority assignment. If CONCERNS, note them inline in the systems index and continue. + +--- + +## Phase 4: Priority Assignment (Collaborative) + +Assign each system to a priority tier based on what milestone it's needed for. + +### Step 4a: Auto-Assign Based on Concept + +Use these heuristics for initial assignment: +- **MVP**: Systems mentioned in the concept's "Required for MVP" section, plus their + Foundation-layer dependencies +- **Vertical Slice**: Systems needed for a complete experience in one area +- **Alpha**: All remaining gameplay systems +- **Full Vision**: Polish, meta, and nice-to-have systems + +### Step 4b: User Review + +Present the priority assignments in a table. For each tier, explain why systems +were placed there. + +Use `AskUserQuestion` to ask: "Do these priority assignments match your vision? +Which systems should be higher or lower priority?" + +Explain reasoning in conversation: "I placed [system] in MVP because the core loop +requires it — without [system], the 30-second loop can't function." + +**"Why" column guidance**: When explaining why each system was placed in a priority tier, mix technical necessity with player-experience reasoning. Do not use purely technical justifications like "Combat needs damage math" — connect to player experience where relevant. Examples of good "Why" entries: +- "Required for the core loop — without it, placement decisions have no consequence (Pillar 2: Placement is the Puzzle)" +- "Ballista's punch-through identity is established here — this stat definition is what makes it feel different from Archer" +- "Foundation for all economy decisions — players must understand upgrade costs to make meaningful placement choices" + +Pure technical necessity ("X depends on Y") is insufficient alone when the system directly shapes player experience. + +**Review mode check** — apply before spawning PR-SCOPE: +- `solo` → skip. Note: "PR-SCOPE skipped — Solo mode." Proceed to writing the systems index. +- `lean` → skip (not a PHASE-GATE). Note: "PR-SCOPE skipped — Lean mode." Proceed to writing the systems index. +- `full` → spawn as normal. + +**After priorities are approved, spawn `producer` via Task using gate PR-SCOPE (`.claude/docs/director-gates.md`) before writing the index.** + +Pass: total system count per milestone tier, estimated implementation volume per tier (system count × average complexity), team size, stated project timeline. + +Present the assessment. If UNREALISTIC, offer to revise priority tier assignments before writing the index. If CONCERNS, note them and continue. + +### Step 4c: Determine Design Order + +Combine dependency sort + priority tier to produce the final design order: +1. MVP Foundation systems first +2. MVP Core systems second +3. MVP Feature systems third +4. Vertical Slice Foundation/Core systems +5. ...and so on + +This is the order the team should write GDDs in. + +--- + +## Phase 5: Create Systems Index (Write) + +### Step 5a: Draft the Document + +Using the template at `.claude/docs/templates/systems-index.md`, populate the +systems index with all data from Phases 2-4: +- Fill the enumeration table +- Fill the dependency map +- Fill the recommended design order +- Fill the high-risk systems +- Fill progress tracker (all systems "Not Started" initially, unless GDDs already exist) + +### Step 5b: Approval + +Present a summary of the document: +- Total systems count by category +- MVP system count +- First 3 systems in the design order +- Any high-risk items + +Ask: "May I write the systems index to `design/gdd/systems-index.md`?" + +Wait for approval. Write the file only after "yes." + +**Review mode check** — apply before spawning CD-SYSTEMS: +- `solo` → skip. Note: "CD-SYSTEMS skipped — Solo mode." Proceed to Phase 7 next steps. +- `lean` → skip (not a PHASE-GATE). Note: "CD-SYSTEMS skipped — Lean mode." Proceed to Phase 7 next steps. +- `full` → spawn as normal. + +**After the systems index is written, spawn `creative-director` via Task using gate CD-SYSTEMS (`.claude/docs/director-gates.md`).** + +Pass: systems index path, game pillars and core fantasy (from `design/gdd/game-concept.md`), MVP priority tier system list. + +Present the assessment. If REJECT, revise the system set with the user before GDD authoring begins. If CONCERNS, record them in the systems index as a `> **Creative Director Note**` at the top of the relevant tier section. + +### Step 5c: Update Session State + +After writing, create `production/session-state/active.md` if it does not exist, then update it with: +- Task: Systems decomposition +- Status: Systems index created +- File: design/gdd/systems-index.md +- Next: Design individual system GDDs + +**Verdict: COMPLETE** — systems index written to `design/gdd/systems-index.md`. +If the user declined: **Verdict: BLOCKED** — user did not approve the write. + +--- + +## Phase 6: Design Individual Systems (Handoff to /design-system) + +This phase is entered when: +- The user says "yes" to designing systems after creating the index +- The user invokes `/map-systems [system-name]` +- The user invokes `/map-systems next` + +### Step 6a: Select the System + +- If a system name was provided, find it in the systems index +- If `next` was used, pick the highest-priority undesigned system (by design order) +- If the user just finished the index, ask: + "Would you like to start designing individual systems now? The first system in + the design order is [name]. Or would you prefer to stop here and come back later?" + +Use `AskUserQuestion` for: "Start designing [system-name] now, pick a different +system, or stop here?" + +### Step 6b: Hand Off to /design-system + +Once a system is selected, invoke the `/design-system [system-name]` skill. + +The `/design-system` skill handles the full GDD authoring process: +- Gathers context from game concept, systems index, and dependency GDDs +- Creates a file skeleton immediately +- Walks through all 8 required sections one at a time (collaborative, incremental) +- Cross-references existing docs to prevent contradictions +- Routes to specialist agents for domain expertise +- Writes each section to file as soon as it's approved +- Runs `/design-review` when complete +- Updates the systems index + +**Do not duplicate the /design-system workflow here.** This skill owns the systems +*index*; `/design-system` owns individual system *GDDs*. + +### Step 6c: Loop or Stop + +After `/design-system` completes, use `AskUserQuestion`: +- "Continue to the next system ([next system name])?" +- "Pick a different system?" +- "Stop here for this session?" + +If continuing, return to Step 6a. + +--- + +## Phase 7: Suggest Next Steps + +After the systems index is created (or after designing some systems), present next actions using `AskUserQuestion`: + +- "Systems index is written. What would you like to do next?" + - [A] Start designing GDDs — run `/design-system [first-system-in-order]` + - [B] Ask a director to review the index first — ask `creative-director` or `technical-director` to validate the system set before committing to 10+ GDD sessions + - [C] Stop here for this session + +**The director review option ([B]) is worth highlighting**: having a Creative Director or Technical Director review the completed systems index before starting GDD authoring catches scope issues, missing systems, and boundary problems before they're locked in across many documents. It is optional but recommended for new projects. + +After any individual GDD is completed: +- "Run `/design-review design/gdd/[system].md` in a fresh session to validate quality" +- "Run `/gate-check systems-design` when all MVP GDDs are complete" + +--- + +## Collaborative Protocol + +This skill follows the collaborative design principle at every phase: + +1. **Question -> Options -> Decision -> Draft -> Approval** at every step +2. **AskUserQuestion** at every decision point (Explain -> Capture pattern): + - Phase 2: "Missing systems? Combine or split?" + - Phase 3: "Dependency ordering correct?" + - Phase 4: "Priority assignments match your vision?" + - Phase 5: "May I write the systems index?" + - Phase 6: "Start designing, pick different, or stop?" then hand off to `/design-system` +3. **"May I write to [filepath]?"** before every file write +4. **Incremental writing**: Update the systems index after each system is designed +5. **Handoff**: Individual GDD authoring is owned by `/design-system`, which handles + incremental section writing, cross-referencing, design review, and index updates +6. **Session state updates**: Write to `production/session-state/active.md` after + each milestone (index created, system designed, priorities changed) + +**Never** auto-generate the full systems list and write it without review. +**Never** start designing a system without user confirmation. +**Always** show the enumeration, dependencies, and priorities for user validation. + +## Context Window Awareness + +If context reaches or exceeds 70% at any point, append this notice: + +> **Context is approaching the limit (≥70%).** The systems index is saved to +> `design/gdd/systems-index.md`. Open a fresh Claude Code session to continue +> designing individual GDDs — run `/map-systems next` to pick up where you left off. + +--- + +## Recommended Next Steps + +- Run `/design-system [first-system-in-order]` to author the first GDD (use design order from the index) +- Run `/map-systems next` to always pick the highest-priority undesigned system automatically +- Run `/design-review design/gdd/[system].md` in a fresh session after each GDD is authored +- Run `/gate-check pre-production` when all MVP GDDs are authored and reviewed diff --git a/.omc/skills/milestone-review/SKILL.md b/.omc/skills/milestone-review/SKILL.md new file mode 100644 index 0000000..06a9191 --- /dev/null +++ b/.omc/skills/milestone-review/SKILL.md @@ -0,0 +1,139 @@ +--- +name: milestone-review +description: "Generates a comprehensive milestone progress review including feature completeness, quality metrics, risk assessment, and go/no-go recommendation. Use at milestone checkpoints or when evaluating readiness for a milestone deadline." +argument-hint: "[milestone-name|current] [--review full|lean|solo]" +user-invocable: true +allowed-tools: Read, Glob, Grep, Write, Task, AskUserQuestion +--- + +## Phase 0: Parse Arguments + +Extract the milestone name (`current` or a specific name) and resolve the review mode (once, store for all gate spawns this run): +1. If `--review [full|lean|solo]` was passed → use that +2. Else read `production/review-mode.txt` → use that value +3. Else → default to `lean` + +See `.claude/docs/director-gates.md` for the full check pattern. + +--- + +## Phase 1: Load Milestone Data + +Read the milestone definition from `production/milestones/`. If the argument is `current`, use the most recently modified milestone file. + +Read all sprint reports for sprints within this milestone from `production/sprints/`. + +--- + +## Phase 2: Scan Codebase Health + +- Scan for `TODO`, `FIXME`, `HACK` markers that indicate incomplete work +- Check the risk register at `production/risk-register/` + +--- + +## Phase 3: Generate the Milestone Review + +```markdown +# Milestone Review: [Milestone Name] + +## Overview +- **Target Date**: [Date] +- **Current Date**: [Today] +- **Days Remaining**: [N] +- **Sprints Completed**: [X/Y] + +## Feature Completeness + +### Fully Complete +| Feature | Acceptance Criteria | Test Status | +|---------|-------------------|-------------| + +### Partially Complete +| Feature | % Done | Remaining Work | Risk to Milestone | +|---------|--------|---------------|------------------| + +### Not Started +| Feature | Priority | Can Cut? | Impact of Cutting | +|---------|----------|----------|------------------| + +## Quality Metrics +- **Open S1 Bugs**: [N] -- [List] +- **Open S2 Bugs**: [N] +- **Open S3 Bugs**: [N] +- **Test Coverage**: [X%] +- **Performance**: [Within budget? Details] + +## Code Health +- **TODO count**: [N across codebase] +- **FIXME count**: [N] +- **HACK count**: [N] +- **Technical debt items**: [List critical ones] + +## Risk Assessment +| Risk | Status | Impact if Realized | Mitigation Status | +|------|--------|-------------------|------------------| + +## Velocity Analysis +- **Planned vs Completed** (across all sprints): [X/Y tasks = Z%] +- **Trend**: [Improving / Stable / Declining] +- **Adjusted estimate for remaining work**: [Days needed at current velocity] + +## Scope Recommendations +### Protect (Must ship with milestone) +- [Feature and why] + +### At Risk (May need to cut or simplify) +- [Feature and risk] + +### Cut Candidates (Can defer without compromising milestone) +- [Feature and impact of cutting] + +## Go/No-Go Assessment + +**Recommendation**: [GO / CONDITIONAL GO / NO-GO] + +**Conditions** (if conditional): +- [Condition 1 that must be met] +- [Condition 2 that must be met] + +**Rationale**: [Explanation of the recommendation] + +## Action Items +| # | Action | Owner | Deadline | +|---|--------|-------|----------| +``` + +--- + +## Phase 3b: Producer Risk Assessment + +**Review mode check** — apply before spawning PR-MILESTONE: +- `solo` → skip. Note: "PR-MILESTONE skipped — Solo mode." Present the Go/No-Go section without a producer verdict. +- `lean` → skip (not a PHASE-GATE). Note: "PR-MILESTONE skipped — Lean mode." Present the Go/No-Go section without a producer verdict. +- `full` → spawn as normal. + +Before generating the Go/No-Go recommendation, spawn `producer` via Task using gate **PR-MILESTONE** (`.claude/docs/director-gates.md`). + +Pass: milestone name and target date, current completion percentage, blocked story count, velocity data from sprint reports (if available), list of cut candidates. + +Present the producer's assessment inline within the Go/No-Go section. The producer's verdict (ON TRACK / AT RISK / OFF TRACK) informs the overall recommendation — do not issue a GO against an OFF TRACK producer verdict without explicit user acknowledgement. + +--- + +## Phase 4: Save Review + +Present the review to the user. + +Ask: "May I write this to `production/milestones/[milestone-name]-review.md`?" + +If yes, write the file, creating the directory if needed. Verdict: **COMPLETE** — milestone review saved. + +If no, stop here. Verdict: **BLOCKED** — user declined write. + +--- + +## Phase 5: Next Steps + +- Run `/gate-check` for a formal phase gate verdict if this milestone marks a development phase boundary. +- Run `/sprint-plan` to adjust the next sprint based on the scope recommendations above. diff --git a/.omc/skills/onboard/SKILL.md b/.omc/skills/onboard/SKILL.md new file mode 100644 index 0000000..19b58aa --- /dev/null +++ b/.omc/skills/onboard/SKILL.md @@ -0,0 +1,96 @@ +--- +name: onboard +description: "Generates a contextual onboarding document for a new contributor or agent joining the project. Summarizes project state, architecture, conventions, and current priorities relevant to the specified role or area." +argument-hint: "[role|area]" +user-invocable: true +allowed-tools: Read, Glob, Grep, Write +model: haiku +--- + +## Phase 1: Load Project Context + +Read CLAUDE.md for project overview and standards. + +Read the relevant agent definition from `.claude/agents/` if a specific role is specified. + +--- + +## Phase 2: Scan Relevant Area + +- For programmers: scan `src/` for architecture, patterns, key files +- For designers: scan `design/` for existing design documents +- For narrative: scan `design/narrative/` for world-building and story docs +- For QA: scan `tests/` for existing test coverage +- For production: scan `production/` for current sprint and milestone + +Read recent changes (git log if available) to understand current momentum. + +--- + +## Phase 3: Generate Onboarding Document + +```markdown +# Onboarding: [Role/Area] + +## Project Summary +[2-3 sentence summary of what this game is and its current state] + +## Your Role +[What this role does on this project, key responsibilities, who you report to] + +## Project Architecture +[Relevant architectural overview for this role] + +### Key Directories +| Directory | Contents | Your Interaction | +|-----------|----------|-----------------| + +### Key Files +| File | Purpose | Read Priority | +|------|---------|--------------| + +## Current Standards and Conventions +[Summary of conventions relevant to this role from CLAUDE.md and agent definition] + +## Current State of Your Area +[What has been built, what is in progress, what is planned next] + +## Current Sprint Context +[What the team is working on now and what is expected of this role] + +## Key Dependencies +[What other roles/systems this role interacts with most] + +## Common Pitfalls +[Things that trip up new contributors in this area] + +## First Tasks +[Suggested first tasks to get oriented and productive] + +1. [Read these documents first] +2. [Review this code/content] +3. [Start with this small task] + +## Questions to Ask +[Questions the new contributor should ask to get fully oriented] +``` + +--- + +## Phase 4: Save Document + +Present the onboarding document to the user. + +Ask: "May I write this to `production/onboarding/onboard-[role]-[date].md`?" + +If yes, write the file, creating the directory if needed. + +--- + +## Phase 5: Next Steps + +Verdict: **COMPLETE** — onboarding document generated. + +- Share the onboarding doc with the new contributor before their first session. +- Run `/sprint-status` to show the new contributor current progress. +- Run `/help` if the contributor needs guidance on what to work on next. diff --git a/.omc/skills/patch-notes/SKILL.md b/.omc/skills/patch-notes/SKILL.md new file mode 100644 index 0000000..c16fda8 --- /dev/null +++ b/.omc/skills/patch-notes/SKILL.md @@ -0,0 +1,186 @@ +--- +name: patch-notes +description: "Generate player-facing patch notes from git history, sprint data, and internal changelogs. Translates developer language into clear, engaging player communication." +argument-hint: "[version] [--style brief|detailed|full]" +user-invocable: true +allowed-tools: Read, Glob, Grep, Write, Bash +model: haiku +agent: community-manager +--- + +## Phase 1: Parse Arguments + +- `version`: the release version to generate notes for (e.g., `1.2.0`) +- `--style`: output style — `brief` (bullet points), `detailed` (with context), `full` (with developer commentary). Default: `detailed`. + +If no version is provided, ask the user before proceeding. + +--- + +## Phase 2: Gather Change Data + +- Read the internal changelog at `production/releases/[version]/changelog.md` if it exists +- Also check `docs/CHANGELOG.md` for the relevant version entry +- Run `git log` between the previous release tag and current tag/HEAD as a fallback +- Read sprint retrospectives in `production/sprints/` for context +- Read any balance change documents in `design/balance/` +- Read bug fix records from QA if available + +**If no changelog data is available** (neither `production/releases/[version]/changelog.md` +nor a `docs/CHANGELOG.md` entry for this version exists, and git log is empty or unavailable): + +> "No changelog data found for [version]. Run `/changelog [version]` first to generate the +> internal changelog, then re-run `/patch-notes [version]`." + +Verdict: **BLOCKED** — stop here without generating notes. + +--- + +## Phase 2b: Detect Tone Guide and Template + +**Tone guide detection** — before drafting notes, check for writing style guidance: + +1. Check `.claude/docs/technical-preferences.md` for any "tone", "voice", or "style" + fields or sections. +2. Check `docs/PATCH-NOTES-STYLE.md` if it exists. +3. Check `design/community/tone-guide.md` if it exists. +4. If any source contains tone/voice/style instructions, extract them and apply + them to the language and framing of the generated notes. +5. If no tone guidance is found anywhere, default to: + player-friendly, non-technical language; enthusiastic but not hyperbolic; + focus on what the player experiences, not what the developer changed. + +**Template detection** — check whether a patch notes template exists: + +1. Glob for `docs/patch-notes-template.md` and `.claude/docs/templates/patch-notes-template.md`. +2. If found at either location, read it and use it as the output structure for Phase 4 + instead of the built-in style templates (Brief / Detailed / Full). Fill in the + template's sections with the categorized data. +3. If not found, use the built-in style templates as defined in Phase 4. + +--- + +## Phase 3: Categorize and Translate + +Categorize all changes into player-facing categories: + +- **New Content**: new features, maps, characters, items, modes +- **Gameplay Changes**: balance adjustments, mechanic changes, progression changes +- **Quality of Life**: UI improvements, convenience features, accessibility +- **Bug Fixes**: grouped by system (combat, UI, networking, etc.) +- **Performance**: optimization improvements players might notice +- **Known Issues**: transparency about unresolved problems + +Translate developer language to player language: + +- "Refactored damage calculation pipeline" → "Improved hit detection accuracy" +- "Fixed null reference in inventory manager" → "Fixed a crash when opening inventory" +- "Reduced GC allocations in combat loop" → "Improved combat performance" +- Remove purely internal changes that don't affect players +- Preserve specific numbers for balance changes (damage: 50 → 45) + +--- + +## Phase 4: Generate Patch Notes + +### Brief Style +```markdown +# Patch [Version] — [Title] + +**New** +- [Feature 1] +- [Feature 2] + +**Changes** +- [Balance/mechanic change with before → after values] + +**Fixes** +- [Bug fix 1] +- [Bug fix 2] + +**Known Issues** +- [Issue 1] +``` + +### Detailed Style +```markdown +# Patch [Version] — [Title] +*[Date]* + +## Highlights +[1-2 sentence summary of the most exciting changes] + +## New Content +### [Feature Name] +[2-3 sentences describing the feature and why players should be excited] + +## Gameplay Changes +### Balance +| Change | Before | After | Reason | +| ---- | ---- | ---- | ---- | +| [Item/ability] | [old value] | [new value] | [brief rationale] | + +### Mechanics +- **[Change]**: [explanation of what changed and why] + +## Quality of Life +- [Improvement with context] + +## Bug Fixes +### Combat +- Fixed [description of what players experienced] + +### UI +- Fixed [description] + +### Networking +- Fixed [description] + +## Performance +- [Improvement players will notice] + +## Known Issues +- [Issue and workaround if available] +``` + +### Full Style +Includes everything from Detailed, plus: +```markdown +## Developer Commentary +### [Topic] +> [Developer insight into a major change — why it was made, what was considered, +> what the team learned. Written in first-person team voice.] +``` + +--- + +## Phase 5: Review Output + +Check the generated notes for: + +- No internal jargon (replace technical terms with player-friendly language) +- No references to internal systems, tickets, or sprint numbers +- Balance changes include before/after values +- Bug fixes describe the player experience, not the technical cause +- Tone matches the game's voice (adjust formality based on game style) + +--- + +## Phase 6: Save Patch Notes + +Present the completed patch notes to the user along with: a count of changes by category, and any internal changes that were excluded (for review). + +Ask: "May I write these patch notes to `docs/patch-notes/[version].md`?" + +If yes, write the file to `docs/patch-notes/[version].md`, creating the directory +if needed. Also write to `production/releases/[version]/patch-notes.md` as the +internal archive copy. + +--- + +## Phase 7: Next Steps + +Verdict: **COMPLETE** — patch notes generated and saved. + +- Run `/release-checklist` to verify all other release gates are met before publishing. +- Share the patch notes draft with the community-manager for tone review before posting publicly. diff --git a/.omc/skills/perf-profile/SKILL.md b/.omc/skills/perf-profile/SKILL.md new file mode 100644 index 0000000..813ca25 --- /dev/null +++ b/.omc/skills/perf-profile/SKILL.md @@ -0,0 +1,125 @@ +--- +name: perf-profile +description: "Structured performance profiling workflow. Identifies bottlenecks, measures against budgets, and generates optimization recommendations with priority rankings." +argument-hint: "[system-name or 'full']" +user-invocable: true +agent: performance-analyst +allowed-tools: Read, Glob, Grep, Bash +--- + +## Phase 1: Determine Scope + +Read the argument: + +- System name → focus profiling on that specific system +- `full` → run a comprehensive profile across all systems + +--- + +## Phase 2: Load Performance Budgets + +Check for existing performance targets in design docs or CLAUDE.md: + +- Target FPS (e.g., 60fps = 16.67ms frame budget) +- Memory budget (total and per-system) +- Load time targets +- Draw call budgets +- Network bandwidth limits (if multiplayer) + +--- + +## Phase 3: Analyze Codebase + +**CPU Profiling Targets:** +- `_process()` / `Update()` / `Tick()` functions — list all and estimate cost +- Nested loops over large collections +- String operations in hot paths +- Allocation patterns in per-frame code +- Unoptimized search/sort over game entities +- Expensive physics queries (raycasts, overlaps) every frame + +**Memory Profiling Targets:** +- Large data structures and their growth patterns +- Texture/asset memory footprint estimates +- Object pool vs instantiate/destroy patterns +- Leaked references (objects that should be freed but aren't) +- Cache sizes and eviction policies + +**Rendering Targets (if applicable):** +- Draw call estimates +- Overdraw from overlapping transparent objects +- Shader complexity +- Unoptimized particle systems +- Missing LODs or occlusion culling + +**I/O Targets:** +- Save/load performance +- Asset loading patterns (sync vs async) +- Network message frequency and size + +--- + +## Phase 4: Generate Profiling Report + +```markdown +## Performance Profile: [System or Full] +Generated: [Date] + +### Performance Budgets +| Metric | Budget | Estimated Current | Status | +|--------|--------|-------------------|--------| +| Frame time | [16.67ms] | [estimate] | [OK/WARNING/OVER] | +| Memory | [target] | [estimate] | [OK/WARNING/OVER] | +| Load time | [target] | [estimate] | [OK/WARNING/OVER] | +| Draw calls | [target] | [estimate] | [OK/WARNING/OVER] | + +### Hotspots Identified +| # | Location | Issue | Estimated Impact | Fix Effort | +|---|----------|-------|------------------|------------| + +### Optimization Recommendations (Priority Order) +1. **[Title]** — [Description] + - Location: [file:line] + - Expected gain: [estimate] + - Risk: [Low/Med/High] + - Approach: [How to implement] + +### Quick Wins (< 1 hour each) +- [Simple optimization 1] + +### Requires Investigation +- [Area that needs actual runtime profiling to confirm impact] +``` + +Output the report with a summary: top 3 hotspots, estimated headroom vs budget, and recommended next action. + +--- + +## Phase 5: Scope and Timeline Decision + +Activate this phase only if any hotspot has Fix Effort rated M or L. + +Present significant-effort items and ask the user to choose for each: + +- **A) Implement the optimization** (proceed with fix now or schedule it) +- **B) Reduce feature scope** (run `/scope-check [feature]` to analyze trade-offs) +- **C) Accept the performance hit and defer to Polish phase** (log as known issue) +- **D) Escalate to technical-director for an architectural decision** (run `/architecture-decision`) + +If multiple items are deferred to Polish (choice C), record them under `### Deferred to Polish`. + +This skill is read-only — no files are written. Verdict: **COMPLETE** — performance profile generated. + +--- + +## Phase 6: Next Steps + +- If bottlenecks require architectural change: run `/architecture-decision`. +- If scope reduction is needed: run `/scope-check [feature]`. +- To schedule optimizations: run `/sprint-plan update`. + +### Rules +- Never optimize without measuring first — gut feelings about performance are unreliable +- Recommendations must include estimated impact — "make it faster" is not actionable +- Profile on target hardware, not just development machines +- Static analysis (this skill) identifies candidates; runtime profiling confirms diff --git a/.omc/skills/playtest-report/SKILL.md b/.omc/skills/playtest-report/SKILL.md new file mode 100644 index 0000000..33f981a --- /dev/null +++ b/.omc/skills/playtest-report/SKILL.md @@ -0,0 +1,146 @@ +--- +name: playtest-report +description: "Generates a structured playtest report template or analyzes existing playtest notes into a structured format. Use this to standardize playtest feedback collection and analysis." +argument-hint: "[new|analyze path-to-notes] [--review full|lean|solo]" +user-invocable: true +allowed-tools: Read, Glob, Grep, Write, Task, AskUserQuestion +--- + +## Phase 1: Parse Arguments + +Resolve the review mode (once, store for all gate spawns this run): +1. If `--review [full|lean|solo]` was passed → use that +2. Else read `production/review-mode.txt` → use that value +3. Else → default to `lean` + +See `.claude/docs/director-gates.md` for the full check pattern. + +Determine the mode: + +- `new` → generate a blank playtest report template +- `analyze [path]` → read raw notes and fill in the template with structured findings + +--- + +## Phase 2A: New Template Mode + +Generate this template and output it to the user: + +```markdown +# Playtest Report + +## Session Info +- **Date**: [Date] +- **Build**: [Version/Commit] +- **Duration**: [Time played] +- **Tester**: [Name/ID] +- **Platform**: [PC/Console/Mobile] +- **Input Method**: [KB+M / Gamepad / Touch] +- **Session Type**: [First time / Returning / Targeted test] + +## Test Focus +[What specific features or flows were being tested] + +## First Impressions (First 5 minutes) +- **Understood the goal?** [Yes/No/Partially] +- **Understood the controls?** [Yes/No/Partially] +- **Emotional response**: [Engaged/Confused/Bored/Frustrated/Excited] +- **Notes**: [Observations] + +## Gameplay Flow +### What worked well +- [Observation 1] + +### Pain points +- [Issue 1 -- Severity: High/Medium/Low] + +### Confusion points +- [Where the player was confused and why] + +### Moments of delight +- [What surprised or pleased the player] + +## Bugs Encountered +| # | Description | Severity | Reproducible | +|---|-------------|----------|-------------| + +## Feature-Specific Feedback +### [Feature 1] +- **Understood purpose?** [Yes/No] +- **Found engaging?** [Yes/No] +- **Suggestions**: [Tester suggestions] + +## Quantitative Data (if available) +- **Deaths**: [Count and locations] +- **Time per area**: [Breakdown] +- **Items used**: [What and when] +- **Features discovered vs missed**: [List] + +## Overall Assessment +- **Would play again?** [Yes/No/Maybe] +- **Difficulty**: [Too Easy / Just Right / Too Hard] +- **Pacing**: [Too Slow / Good / Too Fast] +- **Session length preference**: [Shorter / Good / Longer] + +## Top 3 Priorities from this session +1. [Most important finding] +2. [Second priority] +3. [Third priority] +``` + +--- + +## Phase 2B: Analyze Mode + +Read the raw notes at the provided path. Cross-reference with existing design documents. Fill in the template above with structured findings. Flag any playtest observations that conflict with design intent. + +--- + +## Phase 3: Action Routing + +Categorize all findings into four buckets: + +- **Design changes needed** — fun issues, player confusion, broken mechanics, observations that conflict with the GDD's intended experience +- **Balance adjustments** — numbers feel wrong, difficulty too spiked or too flat +- **Bug reports** — clear implementation defects that are reproducible +- **Polish items** — not blocking progress, but friction or feel issues for later + +Present the categorized list, then route: + +- **Design changes:** "Run `/propagate-design-change [path]` on the affected design document to find downstream impacts before making changes." +- **Balance adjustments:** "Run `/balance-check [system]` to verify the full balance picture before tuning values." +- **Bugs:** "Use `/bug-report` to formally track these." +- **Polish items:** "Add to the polish backlog in `production/` when the team reaches that phase." + +--- + +## Phase 3b: Creative Director Player Experience Review + +**Review mode check** — apply before spawning CD-PLAYTEST: +- `solo` → skip. Note: "CD-PLAYTEST skipped — Solo mode." Proceed to Phase 4 (save the report). +- `lean` → skip (not a PHASE-GATE). Note: "CD-PLAYTEST skipped — Lean mode." Proceed to Phase 4 (save the report). +- `full` → spawn as normal. + +After categorising findings, spawn `creative-director` via Task using gate **CD-PLAYTEST** (`.claude/docs/director-gates.md`). + +Pass: the structured report content, game pillars and core fantasy (from `design/gdd/game-concept.md`), the specific hypothesis being tested. + +Present the creative director's assessment before saving the report. If CONCERNS or REJECT, add a `## Creative Director Assessment` section to the report capturing the verdict and feedback. If APPROVE, note the approval in the report. + +--- + +## Phase 4: Save Report + +Ask: "May I write this playtest report to `production/qa/playtests/playtest-[date]-[tester].md`?" + +If yes, write the file, creating the directory if needed. + +--- + +## Phase 5: Next Steps + +Verdict: **COMPLETE** — playtest report generated. + +- Act on the highest-priority finding category first. +- After addressing design changes: re-run `/design-review` on the updated GDD. +- After fixing bugs: re-run `/bug-triage` to update priorities. diff --git a/.omc/skills/project-stage-detect/SKILL.md b/.omc/skills/project-stage-detect/SKILL.md new file mode 100644 index 0000000..148abaa --- /dev/null +++ b/.omc/skills/project-stage-detect/SKILL.md @@ -0,0 +1,195 @@ +--- +name: project-stage-detect +description: "Automatically analyze project state, detect stage, identify gaps, and recommend next steps based on existing artifacts. Use when user asks 'where are we in development', 'what stage are we in', 'full project audit'." +argument-hint: "[optional: role filter like 'programmer' or 'designer']" +user-invocable: true +allowed-tools: Read, Glob, Grep, Bash, Write +model: haiku +# Read-only diagnostic skill — no specialist agent delegation needed +--- + +# Project Stage Detection + +This skill scans your project to determine its current development stage, completeness +of artifacts, and gaps that need attention. It's especially useful when: +- Starting with an existing project +- Onboarding to a codebase +- Checking what's missing before a milestone +- Understanding "where are we?" + +--- + +## Workflow + +### 1. Scan Key Directories + +Analyze project structure and content: + +**Design Documentation** (`design/`): +- Count GDD files in `design/gdd/*.md` +- Check for game-concept.md, game-pillars.md, systems-index.md +- If systems-index.md exists, count total systems vs. designed systems +- Analyze completeness (Overview, Detailed Design, Edge Cases, etc.) +- Count narrative docs in `design/narrative/` +- Count level designs in `design/levels/` + +**Source Code** (`src/`): +- Count source files (language-agnostic) +- Identify major systems (directories with 5+ files) +- Check for core/, gameplay/, ai/, networking/, ui/ directories +- Estimate lines of code (rough scale) + +**Production Artifacts** (`production/`): +- Check for active sprint plans +- Look for milestone definitions +- Find roadmap documents + +**Prototypes** (`prototypes/`): +- Count prototype directories +- Check for READMEs (documented vs undocumented) +- Assess if prototypes are archived or active + +**Architecture Docs** (`docs/architecture/`): +- Count ADRs (Architecture Decision Records) +- Check for overview/index documents + +**Tests** (`tests/`): +- Count test files +- Estimate test coverage (rough heuristic) + +### 2. Classify Project Stage + +Based on scanned artifacts, determine stage. Check `production/stage.txt` first — +if it exists, use its value (explicit override from `/gate-check`). Otherwise, +auto-detect using these heuristics (check from most-advanced backward): + +| Stage | Indicators | +|-------|-----------| +| **Concept** | No game concept doc, brainstorming phase | +| **Systems Design** | Game concept exists, systems index missing or incomplete | +| **Technical Setup** | Systems index exists, engine not configured | +| **Pre-Production** | Engine configured, `src/` has <10 source files | +| **Production** | `src/` has 10+ source files, active development | +| **Polish** | Explicit only (set by `/gate-check` Production → Polish gate) | +| **Release** | Explicit only (set by `/gate-check` Polish → Release gate) | + +### 3. Collaborative Gap Identification + +**DO NOT** just list missing files. Instead, **ask clarifying questions**: + +- "I see combat code (`src/gameplay/combat/`) but no `design/gdd/combat-system.md`. Was this prototyped first, or should we reverse-document?" +- "You have 15 ADRs but no architecture overview. Should I create one to help new contributors?" +- "No sprint plans in `production/`. Are you tracking work elsewhere (Jira, Trello, etc.)?" +- "I found a game concept but no systems index. Have you decomposed the concept into individual systems yet, or should we run `/map-systems`?" +- "Prototypes directory has 3 projects with no READMEs. Were these experiments, or do they need documentation?" + +### 4. Generate Stage Report + +Use template: `.claude/docs/templates/project-stage-report.md` + +**Report structure**: +```markdown +# Project Stage Analysis + +**Date**: [date] +**Stage**: [Concept/Systems Design/Technical Setup/Pre-Production/Production/Polish/Release] +**Stage Confidence**: [PASS — clearly detected / CONCERNS — ambiguous signals / FAIL — critical gaps block progress] + +## Completeness Overview +- Design: [X%] ([N] docs, [gaps]) +- Code: [X%] ([N] files, [systems]) +- Architecture: [X%] ([N] ADRs, [gaps]) +- Production: [X%] ([status]) +- Tests: [X%] ([coverage estimate]) + +## Gaps Identified +1. [Gap description + clarifying question] +2. [Gap description + clarifying question] + +## Recommended Next Steps +[Priority-ordered list based on stage and role] +``` + +### 5. Role-Filtered Recommendations (Optional) + +If user provided a role argument (e.g., `/project-stage-detect programmer`): + +**Programmer**: +- Focus on architecture docs, test coverage, missing ADRs +- Code-to-docs gaps + +**Designer**: +- Focus on GDD completeness, missing design sections +- Prototype documentation + +**Producer**: +- Focus on sprint plans, milestone tracking, roadmap +- Cross-team coordination docs + +**General** (no role): +- Holistic view of all gaps +- Highest-priority items across domains + +### 6. Request Approval Before Writing + +**Collaborative protocol**: +``` +I've analyzed your project. Here's what I found: + +[Show summary] + +Gaps identified: +1. [Gap 1 + question] +2. [Gap 2 + question] + +Recommended next steps: +- [Priority 1] +- [Priority 2] +- [Priority 3] + +May I write the full stage analysis to production/project-stage-report.md? +``` + +Wait for user approval before creating the file. + +--- + +## Example Usage + +```bash +# General project analysis +/project-stage-detect + +# Programmer-focused analysis +/project-stage-detect programmer + +# Designer-focused analysis +/project-stage-detect designer +``` + +--- + +## Follow-Up Actions + +After generating the report, suggest relevant next steps: + +- **Concept exists but no systems index?** → `/map-systems` to decompose into systems +- **Missing design docs?** → `/reverse-document design src/[system]` +- **Missing architecture docs?** → `/architecture-decision` or `/reverse-document architecture` +- **Prototypes need documentation?** → `/reverse-document concept prototypes/[name]` +- **No sprint plan?** → `/sprint-plan` +- **Approaching milestone?** → `/milestone-review` + +--- + +## Collaborative Protocol + +This skill follows the collaborative design principle: + +1. **Question First**: Ask about gaps, don't assume +2. **Present Options**: "Should I create X, or is it tracked elsewhere?" +3. **User Decides**: Wait for direction +4. **Show Draft**: Display report summary +5. **Get Approval**: "May I write to production/project-stage-report.md?" + +**Never** silently write files. **Always** show findings and ask before creating artifacts. diff --git a/.omc/skills/propagate-design-change/SKILL.md b/.omc/skills/propagate-design-change/SKILL.md new file mode 100644 index 0000000..801e931 --- /dev/null +++ b/.omc/skills/propagate-design-change/SKILL.md @@ -0,0 +1,238 @@ +--- +name: propagate-design-change +description: "When a GDD is revised, scans all ADRs and the traceability index to identify which architectural decisions are now potentially stale. Produces a change impact report and guides the user through resolution." +argument-hint: "[path/to/changed-gdd.md]" +user-invocable: true +allowed-tools: Read, Glob, Grep, Write, Bash, Task +agent: technical-director +--- + +# Propagate Design Change + +When a GDD changes, architectural decisions written against it may no longer be +valid. This skill finds every affected ADR, compares what the ADR assumed against +what the GDD now says, and guides the user through resolution. + +**Usage:** `/propagate-design-change design/gdd/combat-system.md` + +--- + +## 1. Validate Argument + +A GDD path argument is **required**. If missing, fail with: +> "Usage: `/propagate-design-change design/gdd/[system].md` +> Provide the path to the GDD that was changed." + +Verify the file exists. If not, fail with: +> "[path] not found. Check the path and try again." + +--- + +## 2. Read the Changed GDD + +Read the current GDD in full. + +--- + +## 3. Read the Previous Version + +Run git to get the previous committed version: + +```bash +git show HEAD:design/gdd/[filename].md +``` + +If the file has no git history (new file), report: +> "No previous version in git — this appears to be a new GDD, not a revision. +> Nothing to propagate." + +If git returns the previous version, do a conceptual diff: +- Identify sections that changed (new rules, removed rules, modified formulas, + changed acceptance criteria, changed tuning knobs) +- Identify sections that are unchanged +- Produce a change summary: + +``` +## Change Summary: [GDD filename] +Date of revision: [today] + +Changed sections: +- [Section name]: [what changed — new rule, removed rule, formula modified, etc.] + +Unchanged sections: +- [Section name] + +Key changes affecting architecture: +- [Change 1 — likely to affect ADRs] +- [Change 2] +``` + +--- + +## 4. Load Architecture Inputs + +Read all ADRs in `docs/architecture/`: +- For each ADR, read the full file +- Extract the "GDD Requirements Addressed" table +- Note which GDD documents and requirement IDs each ADR references + +Read `docs/architecture/architecture-traceability.md` if it exists. + +Report: "Loaded [N] ADRs. [M] reference [gdd filename]." + +--- + +## 5. Impact Analysis + +For each ADR that references the changed GDD: + +Compare the ADR's "GDD Requirements Addressed" entries against the changed sections +of the GDD. For each referenced requirement: + +1. **Locate the requirement** in the current GDD — does it still exist? +2. **Compare**: What did the GDD say when the ADR was written vs. what it says now? +3. **Assess the ADR decision**: Is the architectural decision still valid? + +Classify each affected ADR as one of: + +| Status | Meaning | +|--------|---------| +| ✅ **Still Valid** | The GDD change doesn't affect what this ADR decided | +| ⚠️ **Needs Review** | The GDD change may affect this ADR — human judgment needed | +| 🔴 **Likely Superseded** | The GDD change directly contradicts what this ADR assumed | + +For each affected ADR, produce an impact entry: + +``` +### ADR-NNNN: [title] +Status: [Still Valid / Needs Review / Likely Superseded] + +What the ADR assumed about this GDD: + "[relevant quote from the ADR's GDD Requirements Addressed section]" + +What the GDD now says: + "[relevant quote from the current GDD]" + +Assessment: + [Explanation of whether the ADR decision is still valid, and why] + +Recommended action: + [Keep as-is | Review and update | Mark Superseded and write new ADR] +``` + +--- + +## 6. Present Impact Report + +Present the full impact report to the user before asking for any action. Format: + +``` +## Design Change Impact Report +GDD: [filename] +Date: [today] +Changes detected: [N sections changed] +ADRs referencing this GDD: [M] + +### Not Affected +[ADRs referencing this GDD whose decisions remain valid] + +### Needs Review ([count]) +[ADRs that may need updating] + +### Likely Superseded ([count]) +[ADRs whose assumptions are now contradicted] +``` + +--- + +## 6b. Director Gate — Technical Impact Review + +**Review mode check** — apply before spawning TD-CHANGE-IMPACT: +- `solo` → skip. Note: "TD-CHANGE-IMPACT skipped — Solo mode." Proceed to Phase 7. +- `lean` → skip. Note: "TD-CHANGE-IMPACT skipped — Lean mode." Proceed to Phase 7. +- `full` → spawn as normal. + +Spawn `technical-director` via Task using gate **TD-CHANGE-IMPACT** (`.claude/docs/director-gates.md`). + +Pass: the full Design Change Impact Report from Phase 6 (change summary, all affected ADRs with their Still Valid / Needs Review / Likely Superseded classifications, and recommended actions). + +The technical-director reviews whether: +- The impact classifications are correct (no ADRs under-classified) +- The recommended actions are architecturally sound +- Any cascading effects on other ADRs or systems were missed + +Apply the verdict: +- **APPROVE** → proceed to Phase 7 resolution workflow +- **CONCERNS** → surface the specific ADRs or recommendations flagged; use `AskUserQuestion` with options: `Revise the impact assessment` / `Accept with noted concerns` / `Discuss further` +- **REJECT** → do not proceed to resolution; re-analyze the impact before continuing + +--- + +## 7. Resolution Workflow + +For each ADR marked "Needs Review" or "Likely Superseded", ask the user what to do: + +Ask for each ADR in turn: +> "ADR-NNNN ([title]) — [status]. What would you like to do?" +> Options: +> - "Mark Superseded (I'll write a new ADR)" — updates ADR status line to `Superseded by: [pending]` +> - "Update in place (minor revision)" — opens the ADR for editing; note what to revise +> - "Keep as-is (the change doesn't actually affect this decision)" +> - "Skip for now (revisit later)" + +For ADRs marked **Superseded**: +- Update the ADR's Status field: `Superseded by ADR-[next number] (pending — see change-impact-[date]-[system].md)` +- Ask: "May I update the status in [ADR filename]?" + +--- + +## 8. Update Traceability Index + +If `docs/architecture/architecture-traceability.md` exists: +- Add the changed GDD requirements to the "Superseded Requirements" table: + +```markdown +## Superseded Requirements +| Date | GDD | Requirement | Changed To | ADRs Affected | Resolution | +|------|-----|-------------|------------|---------------|------------| +| [date] | [gdd] | [old requirement text] | [new requirement text] | ADR-NNNN | [Superseded/Updated/Valid] | +``` + +Ask: "May I update the traceability index?" + +--- + +## 9. Output Change Impact Document + +Ask: "May I write the change impact report to `docs/architecture/change-impact-[date]-[system-slug].md`?" + +The document contains: +- The change summary from step 3 +- The full impact analysis from step 5 +- Resolution decisions made in step 7 +- List of ADRs that need to be written or updated + +If user approved: Verdict: **COMPLETE** — change impact report saved. +If user declined: Verdict: **BLOCKED** — user declined write. + +--- + +## 10. Follow-Up Actions + +Based on the resolution decisions, suggest: + +- **ADRs marked Superseded**: "Run `/architecture-decision [title]` to write the + replacement ADR. Then re-run `/propagate-design-change` to verify coverage." +- **ADRs to update in place**: List the specific fields to update in each ADR +- **If many ADRs affected**: "Run `/architecture-review` after all ADRs are updated + to verify the full traceability matrix is still coherent." + +--- + +## Collaborative Protocol + +1. **Read silently** — compute the full impact before presenting anything +2. **Show the full report first** — let the user see the scope before asking for action +3. **Ask per-ADR** — don't batch decisions; each affected ADR may need different treatment +4. **Ask before writing** — always confirm before modifying any file +5. **Non-destructive** — never delete ADR content; only add "Superseded by" notes diff --git a/.omc/skills/prototype/SKILL.md b/.omc/skills/prototype/SKILL.md new file mode 100644 index 0000000..12f8c8e --- /dev/null +++ b/.omc/skills/prototype/SKILL.md @@ -0,0 +1,157 @@ +--- +name: prototype +description: "Rapid prototyping workflow. Skips normal standards to quickly validate a game concept or mechanic. Produces throwaway code and a structured prototype report." +argument-hint: "[concept-description] [--review full|lean|solo]" +user-invocable: true +allowed-tools: Read, Glob, Grep, Write, Edit, Bash, Task +agent: prototyper +isolation: worktree +--- + +## Phase 1: Define the Question + +Resolve the review mode (once, store for all gate spawns this run): +1. If `--review [full|lean|solo]` was passed → use that +2. Else read `production/review-mode.txt` → use that value +3. Else → default to `lean` + +See `.claude/docs/director-gates.md` for the full check pattern. + +Read the concept description from the argument. Identify the core question this prototype must answer. If the concept is vague, state the question explicitly before proceeding — a prototype without a clear question wastes time. + +--- + +## Phase 2: Load Project Context + +Read `CLAUDE.md` for project context and the current tech stack. Understand what engine, language, and frameworks are in use so the prototype is built with compatible tooling. + +--- + +## Phase 3: Plan the Prototype + +Define in 3-5 bullet points what the minimum viable prototype looks like: + +- What is the core question? +- What is the absolute minimum code needed to answer it? +- What can be skipped (error handling, polish, architecture)? + +Present this plan to the user before building. Ask for confirmation if scope seems unclear. + +--- + +## Phase 4: Implement + +Ask: "May I create the prototype directory at `prototypes/[concept-name]/` and begin implementation?" + +If yes, create the directory. Every file must begin with: + +``` +// PROTOTYPE - NOT FOR PRODUCTION +// Question: [Core question being tested] +// Date: [Current date] +``` + +Standards are intentionally relaxed: + +- Hardcode values freely +- Use placeholder assets +- Skip error handling +- Use the simplest approach that works +- Copy code rather than importing from production + +Run the prototype. Observe behavior. Collect any measurable data (frame times, interaction counts, feel assessments). + +--- + +## Phase 5: Generate Prototype Report + +Draft the report: + +```markdown +## Prototype Report: [Concept Name] + +### Hypothesis +[What we expected to be true -- the question we set out to answer] + +### Approach +[What we built, how long it took, what shortcuts we took] + +### Result +[What actually happened -- specific observations, not opinions] + +### Metrics +[Any measurable data collected during testing] +- Frame time: [if relevant] +- Feel assessment: [subjective but specific -- "response felt sluggish at + 200ms delay" not "felt bad"] +- Player action counts: [if relevant] +- Iteration count: [how many attempts to get it working] + +### Recommendation: [PROCEED / PIVOT / KILL] + +[One paragraph explaining the recommendation with evidence] + +### If Proceeding +[What needs to change for a production-quality implementation] +- Architecture requirements +- Performance targets +- Scope adjustments from the original design +- Estimated production effort + +### If Pivoting +[What alternative direction the results suggest] + +### If Killing +[Why this concept does not work and what we should do instead] + +### Lessons Learned +[Discoveries that affect other systems or future work] +``` + +Ask: "May I write this report to `prototypes/[concept-name]/REPORT.md`?" + +If yes, write the file. + +--- + +## Phase 6: Creative Director Review + +**Review mode check** — apply before spawning CD-PLAYTEST: +- `solo` → skip. Note: "CD-PLAYTEST skipped — Solo mode." Proceed to Phase 7 summary with the prototyper's recommendation as the final verdict. +- `lean` → skip (not a PHASE-GATE). Note: "CD-PLAYTEST skipped — Lean mode." Proceed to Phase 7 summary with the prototyper's recommendation as the final verdict. +- `full` → spawn as normal. + +Spawn `creative-director` via Task using gate **CD-PLAYTEST** (`.claude/docs/director-gates.md`). + +Pass: the full REPORT.md content, the original design question, game pillars and core fantasy from `design/gdd/game-concept.md` (if it exists). + +The creative director evaluates the prototype result against the game's creative vision and pillars, then confirms, modifies, or overrides the prototyper's PROCEED / PIVOT / KILL recommendation. Their verdict is final. Update the REPORT.md `Recommendation` section if the creative director's verdict differs from the prototyper's. + +--- + +## Phase 7: Summary and Next Steps + +Output a summary to the user: the core question, the result, the prototyper's initial recommendation, and the creative-director's final decision. Link to the full report at `prototypes/[concept-name]/REPORT.md`. + +If **PROCEED**: run `/design-system` to begin the production GDD for this mechanic, or `/architecture-decision` to record key technical decisions before implementation. + +If **PIVOT** or **KILL**: no further action needed — the prototype report is the deliverable. + +Verdict: **COMPLETE** — prototype finished. Recommendation is PROCEED, PIVOT, or KILL based on findings above. + +### Important Constraints + +- Prototype code must NEVER import from production source files +- Production code must NEVER import from prototype directories +- If the recommendation is PROCEED, the production implementation must be written from scratch — prototype code is not refactored into production +- Total prototype effort should be timeboxed to 1-3 days equivalent of work +- If the prototype scope starts growing, stop and reassess whether the question can be simplified + +--- + +## Recommended Next Steps + +- **If PROCEED**: Run `/design-system [mechanic]` to author the production GDD, or `/architecture-decision` to record key technical decisions before implementation +- **If PIVOT**: Run `/prototype [revised-concept]` to test the adjusted direction +- **If KILL**: No further action required — the prototype report is the deliverable +- Run `/playtest-report` to formally document any playtest sessions conducted during prototyping diff --git a/.omc/skills/qa-plan/SKILL.md b/.omc/skills/qa-plan/SKILL.md new file mode 100644 index 0000000..054edf3 --- /dev/null +++ b/.omc/skills/qa-plan/SKILL.md @@ -0,0 +1,259 @@ +--- +name: qa-plan +description: "Generate a QA test plan for a sprint or feature. Reads GDDs and story files, classifies stories by test type (Logic/Integration/Visual/UI), and produces a structured test plan covering automated tests required, manual test cases, smoke test scope, and playtest sign-off requirements. Run before sprint begins or when starting a major feature." +argument-hint: "[sprint | feature: system-name | story: path]" +user-invocable: true +allowed-tools: Read, Glob, Grep, Write, AskUserQuestion +agent: qa-lead +--- + +# QA Plan + +This skill generates a structured QA plan for a sprint, feature, or individual +story. It reads all in-scope story files and their referenced GDDs, classifies +each story by test type, and produces a plan that tells developers exactly what +to automate, what to verify manually, what the smoke test scope is, and when +to bring in a playtester. + +Run this before a sprint begins so the team knows upfront what testing work +is required. A test plan written after implementation is a post-mortem, not a +plan. + +**Output:** `production/qa/qa-plan-[sprint-slug]-[date].md` + +--- + +## Phase 1: Parse Scope + +**Argument:** `$ARGUMENTS` (blank = ask user via AskUserQuestion) + +Determine scope from the argument: + +- **`sprint`** — read the most recent file in `production/sprints/`, extract + every story file path referenced. If `production/sprint-status.yaml` exists, + use it as the primary story list and fall back to the sprint plan for story + metadata. +- **`feature: [system-name]`** — glob `production/epics/*/story-*.md`, filter + to stories whose file path or title contains the system name. Also check the + epic index file (`EPIC.md`) in that system's directory. +- **`story: [path]`** — validate that the path exists and load that single file. +- **No argument** — use `AskUserQuestion`: + - "What is the scope for this QA plan?" + - Options: "Current sprint", "Specific feature (enter system name)", + "Specific story (enter path)", "Full epic" + +After resolving scope, report: "Building QA plan for [N] stories in [scope]." + +If a story file path is referenced but the file does not exist, note it as +MISSING and continue with the remaining stories. Do not fail the entire plan +for one missing file. + +--- + +## Phase 2: Load Inputs + +For each in-scope story file, read the full file and extract: + +- **Story title** and story ID (from filename or header) +- **Story Type** field (if present in the file header — e.g., `Type: Logic`) +- **Acceptance criteria** — the complete numbered/bulleted list +- **Implementation files** — listed under "Files to Create / Modify" or similar +- **Engine notes** — any engine API warnings or version-specific notes +- **GDD reference** — the GDD path(s) cited +- **ADR reference** — the ADR(s) cited +- **Estimate** — hours or story points if present +- **Dependencies** — other stories this one depends on + +After reading stories, load supporting context once (not per story): + +- `design/gdd/systems-index.md` — to understand system priorities and which + GDDs are approved +- For each unique GDD referenced across all stories: read only the + **Acceptance Criteria** and **Formulas** sections. Do not load full GDD text — + these two sections contain the testable requirements and the math to verify. +- `docs/architecture/control-manifest.md` — scan for forbidden patterns that + automated tests should guard against (if the file exists) + +If no GDD is referenced in a story, note it as a gap but do not block the plan. +The story will be classified using acceptance criteria alone. + +--- + +## Phase 3: Classify Each Story + +For each story, assign a Story Type. If the story already has a `Type:` field +in its header, use that value and validate it against the criteria below. If the +field is missing or ambiguous, infer the type from the acceptance criteria. + +| Story Type | Classification Indicators | +|---|---| +| **Logic** | Acceptance criteria reference calculations, formulas, numerical thresholds, state transitions, AI decisions, data validation, buff/debuff stacking, economy transactions, or any testable computation | +| **Integration** | Criteria involve two or more systems interacting, signals or events propagating across system boundaries, save/load round-trips, network sync, or persistence | +| **Visual/Feel** | Criteria reference animation behaviour, VFX, shader output, "feels responsive", perceived timing, screen shake, particle effects, audio sync, or visual feedback quality | +| **UI** | Criteria reference menus, HUD elements, buttons, screens, dialogue boxes, inventory panels, tooltips, or any player-facing interface element | +| **Config/Data** | Changes are limited to balance tuning values, data files, or configuration — no new code logic is involved | + +**Mixed stories** (e.g., a story that adds both a formula and a UI display): +assign the primary type based on which acceptance criteria carry the highest +implementation risk, and note the secondary type. Mixed Logic+Integration or +Visual+UI combinations are the most common. + +After classifying all stories, produce a classification summary table in +conversation before proceeding to Phase 4. This gives the user visibility into +how tests will be allocated. + +--- + +## Phase 4: Generate Test Plan + +Assemble the full QA plan document. Use this structure: + +````markdown +# QA Plan: [Sprint/Feature Name] +**Date**: [date] +**Generated by**: /qa-plan +**Scope**: [N stories across [N systems]] +**Engine**: [engine name from .claude/docs/technical-preferences.md, or "Not configured"] +**Sprint File**: [path to sprint plan if applicable] + +--- + +## Test Summary + +| Story | Type | Automated Test Required | Manual Verification Required | +|-------|------|------------------------|------------------------------| +| [story title] | Logic | Unit test — `tests/unit/[system]/` | None | +| [story title] | Integration | Integration test — `tests/integration/[system]/` | Smoke check | +| [story title] | Visual/Feel | None (not automatable) | Screenshot + lead sign-off | +| [story title] | UI | Interaction walkthrough | Manual step-through | +| [story title] | Config/Data | Data validation test | Spot-check in-game values | + +--- + +## Automated Tests Required + +### [Story Title] — [Type] +**Test file path**: `tests/[unit|integration]/[system]/[story-slug]_test.[ext]` +**What to test**: +- [Specific formula or rule from the GDD Formulas section] +- [Each named state transition or decision branch] +- [Each side effect that should or should not occur] + +**Edge cases to cover**: +- Zero/minimum input values (e.g., 0 damage, empty inventory) +- Maximum/boundary input values (e.g., max level, stat cap) +- Invalid or null input (e.g., missing target, dead entity) +- [Any edge case explicitly called out in the GDD Edge Cases section] + +**Estimated test count**: ~[N] unit tests + +[If no GDD formula reference was found for this story, note:] +*No formula found in referenced GDD — test cases must be derived from acceptance +criteria directly. Review the GDD Formulas section before writing tests.* + +--- + +## Manual QA Checklist + +### [Story Title] — [Type] +**Verification method**: [Screenshot + designer sign-off | Playtest session | +Manual step-through | Comparison against reference footage] +**Who must sign off**: [designer / lead-programmer / qa-lead / art-lead] +**Evidence to capture**: [screenshot of X | video clip of Y | written playtest +notes | side-by-side comparison] + +Checklist: +- [ ] [Specific observable condition — concrete and falsifiable] +- [ ] [Another condition] +- [ ] [Every acceptance criterion translated into a manual check item] + +*If any criterion uses subjective language ("feels", "looks", "seems"), it must +be supplemented with a specific benchmark or a playtest protocol note.* + +--- + +## Smoke Test Scope + +Critical paths to verify before any QA hand-off for this sprint: + +1. Game launches to main menu without crash +2. New game / new session can be started +3. [Primary mechanic introduced or changed this sprint] +4. [Any system with a regression risk from this sprint's changes] +5. Save / load cycle completes without data loss (if save system exists) +6. Performance is within budget on target hardware (no new frame spikes) + +*Smoke tests are verified by the developer via `/smoke-check`. Reference this +list when running that skill.* + +--- + +## Playtest Requirements + +| Story | Playtest Goal | Min Sessions | Target Player Type | +|-------|--------------|--------------|-------------------| +| [story] | [What question must the session answer?] | [N] | [new player / experienced] | + +**Sign-off requirement**: Playtest notes must be written to +`production/session-logs/playtest-[sprint]-[story-slug].md` and reviewed by +the [designer / qa-lead] before the story can be marked COMPLETE. + +If no stories require playtest validation: *No playtest sessions required for +this sprint.* + +--- + +## Definition of Done — This Sprint + +A story is DONE when ALL of the following are true: + +- [ ] All acceptance criteria verified — via automated test result OR documented + manual evidence (screenshot, video, or playtest notes with sign-off) +- [ ] Test file exists at the specified path for all Logic and Integration stories +- [ ] Manual evidence document exists for all Visual/Feel and UI stories +- [ ] Smoke check passes (run `/smoke-check sprint` before QA hand-off) +- [ ] No regressions introduced +- [ ] Code reviewed (via `/code-review` or documented peer review) +- [ ] Story file updated to `Status: Complete` (via `/story-done`) +```` + +When generating content, use the actual story titles, GDD formula text, and +acceptance criteria extracted in Phase 2. Do not use placeholder text — every +test entry should reflect the real requirements of these specific stories. + +--- + +## Phase 5: Write Output + +Show the complete plan in conversation (or a summary if the plan is very long), +then ask: + +"May I write this QA plan to `production/qa/qa-plan-[sprint-slug]-[date].md`?" + +Write the plan exactly as generated — do not truncate. + +After writing: + +"QA plan written to `production/qa/qa-plan-[sprint-slug]-[date].md`. + +Next steps: +- Share this plan with the team before sprint implementation begins +- Run `/smoke-check sprint` after all stories are implemented to gate QA hand-off +- For Logic/Integration stories, create the test files at the listed paths + before marking stories done — `/story-done` checks for them" + +--- + +## Collaborative Protocol + +- **Never write the plan without asking** — Phase 5 requires explicit approval. +- **Classify conservatively**: when a story is ambiguous between Logic and + Integration, classify it as Integration — it requires both unit and + integration tests. +- **Do not invent test cases** beyond what acceptance criteria and GDD formulas + support. If a formula is absent from the GDD, flag it rather than guessing. +- **Playtest requirements are advisory**: the user decides whether a playtest + is warranted for borderline Visual/Feel stories. Flag the case; do not mandate. +- Use `AskUserQuestion` for scope selection when no argument is provided. + Keep all other phases non-interactive — present findings, then ask once to + approve the write. diff --git a/.omc/skills/quick-design/SKILL.md b/.omc/skills/quick-design/SKILL.md new file mode 100644 index 0000000..55d28eb --- /dev/null +++ b/.omc/skills/quick-design/SKILL.md @@ -0,0 +1,274 @@ +--- +name: quick-design +description: "Lightweight design spec for small changes — tuning adjustments, minor mechanics, balance tweaks. Skips full GDD authoring when a system GDD already exists or the change is too small to warrant one. Produces a Quick Design Spec that embeds directly into story files." +argument-hint: "[brief description of the change]" +user-invocable: true +allowed-tools: Read, Glob, Grep, Write, Edit +--- + +# Quick Design + +This is the **lightweight design path** for changes that don't need a full GDD. +Full GDD authoring via `/design-system` is the heavyweight path. Use this skill +for work under approximately 4 hours of implementation — tuning adjustments, +minor behavioral tweaks, small additions to existing systems, or standalone +features too small to warrant a full document. + +**Output:** `design/quick-specs/[name]-[date].md` + +**When to run:** Anytime a change is too small for `/design-system` but too +meaningful to implement without a written rationale. + +--- + +## 1. Classify the Change + +First, read the argument and determine which category this change falls into: + +- **Tuning** — changing numbers or balance values in an existing system with no + behavioral change (most minimal path). Example: "increase jump height from 5 + to 6 units", "reduce enemy patrol speed by 10%". +- **Tweak** — a small behavioral change to an existing system that introduces no + new states, branches, or systems. Example: "make dash invincible on frame 1", + "allow combo to cancel into roll". +- **Addition** — adding a small mechanic to an existing system that may introduce + 1-2 new states or interactions. Example: "add a parry window to the block + mechanic", "add a charge variant to the basic attack". +- **New Small System** — a standalone feature small enough that it has no + existing GDD and is under approximately one week of implementation work. + Example: "achievement popup system", "simple day/night visual cycle". + +If the change does NOT fit these categories — it introduces a new system with +significant cross-system dependencies, requires more than one week of +implementation, or fundamentally alters an existing system's core rules — stop +and redirect to `/design-system` instead. + +Present the classification to the user and confirm it is correct before +proceeding. If there is no argument, ask the user to describe the change. + +--- + +## 2. Context Scan + +Before drafting anything, read the relevant context: + +- Search `design/gdd/` for the GDD most relevant to this change. Read the + sections that this change would affect. +- Check whether `design/gdd/systems-index.md` exists. If it does, read it to + understand where this system sits in the dependency graph and what tier it + belongs to. If it does not exist, note "No systems index found — skipping + dependency tier check." and continue. +- Check `design/quick-specs/` for any prior quick specs that touched this + system — avoid contradicting them. +- If this is a Tuning change, also check `assets/data/` for the data file that + holds the relevant values. + +Report what was found: "Found GDD at [path]. Relevant section: [section name]. +No conflicting quick specs found." (or note any conflicts found.) + +--- + +## 3. Draft the Quick Design Spec + +Use the appropriate spec format for the change category. + +### For Tuning changes + +Produce a single table: + +```markdown +# Quick Design Spec: [Title] + +**Type**: Tuning +**System**: [System name] +**GDD Reference**: `design/gdd/[filename].md` — Tuning Knobs section +**Date**: [today] + +## Change + +| Parameter | Old Value | New Value | Rationale | +|-----------|-----------|-----------|-----------| +| [param] | [old] | [new] | [why] | + +## Tuning Knob Mapping + +Maps to GDD Tuning Knob: [knob name and its documented range]. +New value is [within / at the edge of / outside] the documented range. +[If outside: explain why the range should be extended.] + +## Acceptance Criteria + +- [ ] [Parameter] reads [new value] from `assets/data/[file]` +- [ ] Behavior difference is observable in [specific context] +- [ ] No regression in [related behavior] +``` + +### For Tweak and Addition changes + +```markdown +# Quick Design Spec: [Title] + +**Type**: [Tweak / Addition] +**System**: [System name] +**GDD Reference**: `design/gdd/[filename].md` +**Date**: [today] + +## Change Summary + +[1-2 sentences describing what changes and why.] + +## Motivation + +[Why is this change needed? What player experience problem does it solve? +Reference the relevant MDA aesthetic or player feedback if applicable.] + +## Design Delta + +Current GDD says (quoting `design/gdd/[filename].md`, [section]): + +> [exact quote of the relevant rule or description] + +This spec changes that to: + +[New rule or description, written with the same precision as a GDD Detailed +Rules section. A programmer should be able to implement from this text alone.] + +## New Rules / Values + +[Full unambiguous statement of the replacement content. If this introduces +new states, list them. If it introduces new parameters, define their ranges.] + +## Affected Systems + +| System | Impact | Action Required | +|--------|--------|-----------------| +| [system] | [how it is affected] | [update GDD / update data file / no action] | + +## Acceptance Criteria + +- [ ] [Specific, testable criterion 1] +- [ ] [Specific, testable criterion 2] +- [ ] [Specific, testable criterion 3] +- [ ] No regression: [the original behavior this must not break] + +## GDD Update Required? + +[Yes / No] +[If yes: which file, which section, and what the update should say.] +``` + +### For New Small System changes + +Use a trimmed GDD structure. Include only the sections that are directly +necessary — skip Player Fantasy, full Formulas, and Edge Cases unless the +system specifically requires them. + +```markdown +# Quick Design Spec: [Title] + +**Type**: New Small System +**Scope**: [1-2 sentence description of what this system does and doesn't do] +**Date**: [today] +**Estimated Implementation**: [hours] + +## Overview + +[One paragraph a new team member could understand. What does this system do, +when does it activate, and what does it produce?] + +## Core Rules + +[Unambiguous rules for the system. Use numbered lists for sequential behavior +and bullet lists for conditions. Be precise enough that a programmer can +implement without asking questions.] + +## Tuning Knobs + +| Knob | Default | Range | Category | Rationale | +|------|---------|-------|----------|-----------| +| [name] | [value] | [min–max] | [feel/curve/gate] | [why this default] | + +All values must live in `assets/data/[appropriate-file].json`, not hardcoded. + +## Acceptance Criteria + +- [ ] [Functional criterion: does the right thing] +- [ ] [Functional criterion: handles the edge case] +- [ ] [Experiential criterion: feels right — what a playtest validates] +- [ ] [Regression criterion: does not break adjacent system] + +## Systems Index + +This system is not currently in `design/gdd/systems-index.md`. +[If it should be added: suggest which layer and priority tier.] +[If it is too small to track: state "This system is below systems-index +tracking threshold — quick spec is sufficient."] +``` + +--- + +## 4. Approval and Filing + +Present the draft to the user in full. Then ask: + +"May I write this Quick Design Spec to +`design/quick-specs/[kebab-case-title]-[YYYY-MM-DD].md`?" + +Use today's date in the filename. The title should be a kebab-case description +of the change (e.g., `jump-height-tuning-2026-03-10`, +`parry-window-addition-2026-03-10`). + +If yes, create the `design/quick-specs/` directory if it does not exist, then +write the file. + +If a GDD update is required (flagged in the spec), ask separately after +writing the quick spec: + +"This spec modifies rules in [System Name]. May I update +`design/gdd/[filename].md` — specifically the [section name] section?" + +Show the exact text that would be changed (old vs. new) before asking. Do not +make GDD edits without explicit approval. + +--- + +## 5. Handoff + +After writing the file, output: + +``` +Quick Design Spec written to: design/quick-specs/[filename].md +Type: [Tuning / Tweak / Addition / New Small System] +System: [system name] +GDD update: [Required — pending approval / Applied / Not required] + +Next step: This spec is ready for `/story-readiness` validation before +implementation. Reference this spec in the story's GDD Reference field. +``` + +### Pipeline Notes + +Verdict: **COMPLETE** — quick design spec written and ready for implementation. + +Quick Design Specs **bypass** `/design-review` and `/review-all-gdds` by +design. They are for small, low-risk, well-scoped changes where the cost of +the full review pipeline exceeds the risk of the change itself. + +Redirect to the full pipeline if any of the following are true: +- The change adds a new system that belongs in the systems index +- The change significantly alters cross-system behavior or a system's + contracts with other systems +- The change introduces new player-facing mechanics that affect the + game's MDA aesthetic balance +- Implementation is likely to exceed one week of work + +In those cases: "This change has grown beyond quick-spec scope. I recommend +using `/design-system` to author a full GDD for this." + +--- + +## Recommended Next Steps + +- Run `/story-readiness [story-path]` to validate the story before implementation begins — reference this spec in the story's GDD Reference field +- Run `/dev-story [story-path]` to implement once the story passes readiness checks +- If the change is larger than expected, run `/design-system [system-name]` to author a full GDD instead diff --git a/.omc/skills/regression-suite/SKILL.md b/.omc/skills/regression-suite/SKILL.md new file mode 100644 index 0000000..376d2d0 --- /dev/null +++ b/.omc/skills/regression-suite/SKILL.md @@ -0,0 +1,250 @@ +--- +name: regression-suite +description: "Map test coverage to GDD critical paths, identify fixed bugs without regression tests, flag coverage drift from new features, and maintain tests/regression-suite.md. Run after implementing a bug fix or before a release gate." +argument-hint: "[update | audit | report]" +user-invocable: true +allowed-tools: Read, Glob, Grep, Write, Edit +--- + +# Regression Suite + +This skill ensures that every bug fix is backed by a test that would have +caught the original bug — and that the regression suite stays current as the +game evolves. It also detects when new features have been added without +corresponding regression coverage. + +A regression suite is not a new test category — it is a **curated list of +tests already in `tests/`** that collectively cover the game's critical paths +and known failure points. This skill maintains that list. + +**Output:** `tests/regression-suite.md` + +**When to run:** +- After fixing a bug (confirm a regression test was written or identify gap) +- Before a release gate (`/gate-check polish` requires regression suite exists) +- As part of sprint close to detect coverage drift + +--- + +## 1. Parse Arguments + +**Modes:** +- `/regression-suite update` — scan new bug fixes this sprint and check + for regression test presence; add new tests to the suite manifest +- `/regression-suite audit` — full audit of all GDD critical paths vs. + existing test coverage; flag paths with no regression test +- `/regression-suite report` — read-only status report (no writes); suitable + for sprint reviews +- No argument — run `update` if a sprint is active, else `audit` + +--- + +## 2. Load Context + +### Step 2a — Load existing regression suite + +Read `tests/regression-suite.md` if it exists. Extract: +- Total registered regression tests +- Last updated date +- Any tests flagged as `STALE` or `QUARANTINED` + +If it does not exist: note "No regression suite found — will create one." + +### Step 2b — Load test inventory + +Glob all test files: +``` +tests/unit/**/*_test.* +tests/integration/**/*_test.* +tests/regression/**/* +``` + +For each file, note the system (from directory path) and file name. +Do not read test file contents unless needed for name-to-test mapping. + +### Step 2c — Load GDD critical paths + +For `audit` mode: read `design/gdd/systems-index.md` to get all systems. +For each MVP-tier system, read its GDD and extract: +- Acceptance Criteria (these define the critical paths) +- Formulas section (formulas must have regression tests) +- Edge Cases section (known edge cases should have regression tests) + +For `update` mode: skip full GDD scan. Instead read the current sprint plan +and story files to find stories with Status: Complete this sprint. + +### Step 2d — Load closed bugs + +Glob `production/qa/bugs/*.md` and filter for bugs with a `Status: Closed` +or `Status: Fixed` field. Note: +- Which story or system the bug was in +- Whether a regression test was mentioned in the fix description + +--- + +## 3. Map Coverage — Critical Paths + +For `audit` mode only: + +For each GDD acceptance criterion, determine whether a test exists: + +1. Grep `tests/unit/[system]/` and `tests/integration/[system]/` for file names + and function names related to the criterion's key noun/verb +2. Assign coverage: + +| Status | Meaning | +|--------|---------| +| **COVERED** | A test file exists that targets this criterion's logic | +| **PARTIAL** | A test exists but doesn't cover all cases (e.g. happy path only) | +| **MISSING** | No test found for this critical path | +| **EXEMPT** | Visual/Feel or UI criterion — not automatable by design | + +3. Elevate MISSING items that correspond to formulas or state machines to + **HIGH PRIORITY** gap — these are the most likely regression sources. + +--- + +## 4. Map Coverage — Fixed Bugs + +For each closed bug: + +1. Extract the system slug from the bug's metadata +2. Grep `tests/unit/[system]/` and `tests/integration/[system]/` for a test + that references the bug ID or the specific failure scenario +3. Assign: + - **HAS REGRESSION TEST** — a test was found that would catch this bug + - **MISSING REGRESSION TEST** — bug was fixed but no test guards against recurrence + +For MISSING REGRESSION TEST items: +- Flag them as regression gaps +- Suggest the test file path: `tests/unit/[system]/[bug-slug]_regression_test.[ext]` +- Note: "Without this test, this bug can silently return in a future sprint." + +--- + +## 5. Detect Coverage Drift + +Coverage drift occurs when the game grows but the regression suite doesn't. + +Check for drift indicators: +- Stories completed this sprint with no corresponding test files in `tests/` +- New systems added to `systems-index.md` since the last regression-suite update +- GDD sections added or revised since the regression suite was last updated + (use Grep on GDD file modification hints if available, or ask the user) +- `tests/regression-suite.md` last-updated date vs. current date — if gap > + 2 sprints, flag as likely stale + +--- + +## 6. Generate Report and Suite Manifest + +### Report format (in conversation) + +``` +## Regression Suite Status + +**Mode**: [update | audit | report] +**Existing registered tests**: [N] +**Test files scanned**: [N] + +### Critical Path Coverage (audit mode only) +| System | Total ACs | Covered | Partial | Missing | Exempt | +|--------|-----------|---------|---------|---------|--------| +| [name] | [N] | [N] | [N] | [N] | [N] | + +**Coverage rate (non-exempt)**: [N]% + +### Bug Regression Coverage +| Bug ID | System | Severity | Has Regression Test? | +|--------|--------|----------|----------------------| +| BUG-NNN | [system] | S[N] | YES / NO ⚠ | + +**Bugs without regression tests**: [N] + +### Coverage Drift Indicators +[List new systems or stories with no test coverage, or "None detected."] + +### Recommended New Regression Tests +| Priority | System | Suggested Test File | Covers | +|----------|--------|---------------------|--------| +| HIGH | [system] | `tests/unit/[system]/[slug]_regression_test.[ext]` | BUG-NNN / AC-[N] | +| MEDIUM | [system] | `tests/unit/[system]/[slug]_test.[ext]` | [criterion] | +``` + +### Suite manifest format (`tests/regression-suite.md`) + +The manifest is a curated index — not the tests themselves, but a registry +of which tests should always pass before a release: + +```markdown +# Regression Suite Manifest + +> Last Updated: [date] +> Total registered tests: [N] +> Coverage: [N]% of GDD critical paths + +## How to run + +[Engine-specific command to run all regression tests] + +## Registered Regression Tests + +### [System Name] + +| Test File | Test Function (if known) | Covers | Added | +|-----------|--------------------------|--------|-------| +| `tests/unit/[system]/[file]_test.[ext]` | `test_[scenario]` | AC-N / BUG-NNN | [date] | + +## Known Gaps + +Tests that should exist but don't yet: + +| Priority | System | Suggested Path | Covers | Reason Not Yet Written | +|----------|--------|----------------|--------|------------------------| +| HIGH | [system] | `tests/unit/[system]/[path]` | BUG-NNN | Bug fixed without test | + +## Quarantined Tests + +Tests that are flaky or disabled (do not run in CI): + +| Test File | Function | Reason | Quarantined Since | +|-----------|----------|--------|-------------------| +| (none) | | | | +``` + +--- + +## 7. Write Output + +Ask: "May I write/update `tests/regression-suite.md` with the current +regression suite manifest?" + +For `update` mode: append new entries; never remove existing entries +(use `Edit` with targeted insertions). +For `audit` mode: rewrite the full manifest with updated coverage data. +For `report` mode: do not write anything. + +After writing (if approved): + +- For each HIGH priority gap: "Consider creating the missing regression test + before the next sprint. Run `/test-helpers` to scaffold the test file." +- If bug regression gaps > 0: "These bugs can silently return without regression + tests. The next sprint should include a story to write the missing tests." +- If coverage drift detected: "Regression suite may be drifting. Consider + running `/regression-suite audit` at the next sprint boundary." + +Verdict: **COMPLETE** — regression suite updated. (If user declined write: Verdict: **BLOCKED**.) + +--- + +## Collaborative Protocol + +- **Never remove existing regression tests from the manifest** without + explicit user approval — removing a test that was deliberately written is a + regression risk itself +- **Gaps are advisory, not blocking** — surface them clearly but do not prevent + other work from proceeding (except at release gate where regression suite is required) +- **Quarantine is not deletion** — tests with intermittent failures should be + quarantined (noted in manifest) but not removed; they should be fixed by + `/test-flakiness` +- **Ask before writing** — always confirm before creating or updating the manifest diff --git a/.omc/skills/release-checklist/SKILL.md b/.omc/skills/release-checklist/SKILL.md new file mode 100644 index 0000000..8415a28 --- /dev/null +++ b/.omc/skills/release-checklist/SKILL.md @@ -0,0 +1,181 @@ +--- +name: release-checklist +description: "Generates a comprehensive pre-release validation checklist covering build verification, certification requirements, store metadata, and launch readiness." +argument-hint: "[platform: pc|console|mobile|all]" +user-invocable: true +allowed-tools: Read, Glob, Grep, Write +--- + +> **Explicit invocation only**: This skill should only run when the user explicitly requests it with `/release-checklist`. Do not auto-invoke based on context matching. + +## Phase 1: Parse Arguments + +Read the argument for the target platform (`pc`, `console`, `mobile`, or `all`). If no platform is specified, default to `all`. + +--- + +## Phase 2: Load Project Context + +- Read `CLAUDE.md` for project context, version information, and platform targets. +- Read the current milestone from `production/milestones/` to understand what features and content should be included in this release. + +--- + +## Phase 3: Scan Codebase + +Scan for outstanding issues: + +- Count `TODO` comments +- Count `FIXME` comments +- Count `HACK` comments +- Note their locations and severity + +Check for test results in any test output directories or CI logs if available. + +--- + +## Phase 4: Generate the Release Checklist + +```markdown +## Release Checklist: [Version] -- [Platform] +Generated: [Date] + +### Codebase Health +- TODO count: [N] ([list top 5 if many]) +- FIXME count: [N] ([list all -- these are potential blockers]) +- HACK count: [N] ([list all -- these need review]) + +### Build Verification +- [ ] Clean build succeeds on all target platforms +- [ ] No compiler warnings (zero-warning policy) +- [ ] All assets included and loading correctly +- [ ] Build size within budget ([target size]) +- [ ] Build version number correctly set ([version]) +- [ ] Build is reproducible from tagged commit + +### Quality Gates +- [ ] Zero S1 (Critical) bugs +- [ ] Zero S2 (Major) bugs -- or documented exceptions with producer approval +- [ ] All critical path features tested and signed off by QA +- [ ] Performance within budgets: + - [ ] Target FPS met on minimum spec hardware + - [ ] Memory usage within budget + - [ ] Load times within budget + - [ ] No memory leaks over extended play sessions +- [ ] No regression from previous build +- [ ] Soak test passed (4+ hours continuous play) + +### Content Complete +- [ ] All placeholder assets replaced with final versions +- [ ] All TODO/FIXME in content files resolved or documented +- [ ] All player-facing text proofread +- [ ] All text localization-ready (no hardcoded strings) +- [ ] Audio mix finalized and approved +- [ ] Credits complete and accurate +``` + +Add platform-specific sections based on the argument: + +**For `pc`:** +```markdown +### Platform Requirements: PC +- [ ] Minimum and recommended specs verified and documented +- [ ] Keyboard+mouse controls fully functional +- [ ] Controller support tested (Xbox, PlayStation, generic) +- [ ] Resolution scaling tested (1080p, 1440p, 4K, ultrawide) +- [ ] Windowed, borderless, and fullscreen modes working +- [ ] Graphics settings save and load correctly +- [ ] Steam/Epic/GOG SDK integrated and tested +- [ ] Achievements functional +- [ ] Cloud saves functional +- [ ] Steam Deck compatibility verified (if targeting) +``` + +**For `console`:** +```markdown +### Platform Requirements: Console +- [ ] TRC/TCR/Lotcheck requirements checklist complete +- [ ] Platform-specific controller prompts display correctly +- [ ] Suspend/resume works correctly +- [ ] User switching handled properly +- [ ] Network connectivity loss handled gracefully +- [ ] Storage full scenario handled +- [ ] Parental controls respected +- [ ] Platform-specific achievement/trophy integration tested +- [ ] First-party certification submission prepared +``` + +**For `mobile`:** +```markdown +### Platform Requirements: Mobile +- [ ] App store guidelines compliance verified +- [ ] All required device permissions justified and documented +- [ ] Privacy policy linked and accurate +- [ ] Data safety/nutrition labels completed +- [ ] Touch controls tested on multiple screen sizes +- [ ] Battery usage within acceptable range +- [ ] Background behavior correct (pause, resume, terminate) +- [ ] Push notification permissions handled correctly +- [ ] In-app purchase flow tested (if applicable) +- [ ] App size within store limits +``` + +**Store and launch sections (all platforms):** +```markdown +### Store / Distribution +- [ ] Store page metadata complete and proofread + - [ ] Short description + - [ ] Long description + - [ ] Feature list + - [ ] System requirements (PC) +- [ ] Screenshots up to date and per-platform resolution requirements met +- [ ] Trailers up to date +- [ ] Key art and capsule images current +- [ ] Age rating obtained and configured: + - [ ] ESRB + - [ ] PEGI + - [ ] Other regional ratings as required +- [ ] Legal notices, EULA, and privacy policy in place +- [ ] Third-party license attributions complete +- [ ] Pricing configured for all regions + +### Launch Readiness +- [ ] Analytics / telemetry verified and receiving data +- [ ] Crash reporting configured and dashboard accessible +- [ ] Day-one patch prepared and tested (if needed) +- [ ] On-call team schedule set for first 72 hours +- [ ] Community launch announcements drafted +- [ ] Press/influencer keys prepared for distribution +- [ ] Support team briefed on known issues and FAQ +- [ ] Rollback plan documented (if critical issues found post-launch) + +### Go / No-Go: [READY / NOT READY] + +**Rationale:** +[Summary of readiness assessment. List any blocking items that must be +resolved before launch. If NOT READY, list the specific items that need +resolution and estimated time to address them.] + +**Sign-offs Required:** +- [ ] QA Lead +- [ ] Technical Director +- [ ] Producer +- [ ] Creative Director +``` + +--- + +## Phase 5: Save Checklist + +Present the checklist to the user with: total checklist items, number of known blockers (FIXME/HACK counts, known bugs). + +Ask: "May I write this to `production/releases/release-checklist-[version].md`?" + +If yes, write the file, creating the directory if needed. + +--- + +## Phase 6: Next Steps + +- Run `/gate-check` for a formal phase gate verdict before proceeding to release. +- Coordinate final sign-offs via `/team-release`. diff --git a/.omc/skills/retrospective/SKILL.md b/.omc/skills/retrospective/SKILL.md new file mode 100644 index 0000000..f404331 --- /dev/null +++ b/.omc/skills/retrospective/SKILL.md @@ -0,0 +1,210 @@ +--- +name: retrospective +description: "Generates a sprint or milestone retrospective by analyzing completed work, velocity, blockers, and patterns. Produces actionable insights for the next iteration." +argument-hint: "[sprint-N|milestone-name]" +user-invocable: true +allowed-tools: Read, Glob, Grep, Write +context: | + !git log --oneline --since="2 weeks ago" 2>/dev/null +--- + +## Phase 1: Parse Arguments + +Determine whether this is a sprint retrospective (`sprint-N`) or a milestone retrospective (`milestone-name`). + +--- + +## Phase 1b: Check for Existing Retrospective + +Before loading any data, glob for an existing retrospective file: + +- For sprint retrospectives: `production/retrospectives/retro-[sprint-slug]-*.md` + (also check `production/sprints/sprint-[N]-retrospective.md` as an alternate location) +- For milestone retrospectives: `production/retrospectives/retro-[milestone-name]-*.md` + +If a matching file is found, present the user with: + +``` +An existing retrospective was found: [filename] + +[A] Update existing retrospective — load it and add/revise sections +[B] Start fresh — generate a new retrospective, archiving the old one +``` + +Wait for user selection before continuing. If updating, read the existing file and +carry its content forward into the generation phase, revising sections with new data. + +--- + +## Phase 2: Load Sprint or Milestone Data + +Read the sprint or milestone plan from the appropriate location: + +- Sprint plans: `production/sprints/` +- Milestone definitions: `production/milestones/` + +**If the file does not exist or is empty**, output: + +> "No sprint data found for [sprint/milestone]. Run `/sprint-status` to generate +> sprint data first, or provide the sprint details manually." + +Then use `AskUserQuestion` to present two options: + +- **[A] Provide data manually** — ask the user to paste or describe the sprint + tasks, dates, and outcomes; use that as the source of truth for the retrospective. +- **[B] Stop** — abort the skill. Verdict: **BLOCKED** — no sprint data available. + +If the user chooses [A], collect the data and continue to Phase 3 using what they provide. +If the user chooses [B], stop here. + +Extract: planned tasks, estimated effort, owners, and goals. + +Read the git log for the period covered by the sprint or milestone to understand what was actually committed and when. + +--- + +## Phase 3: Analyze Completion and Trends + +Scan for completed and incomplete tasks by comparing the plan against actual deliverables. Check for: + +- Tasks completed as planned +- Tasks completed but modified from the plan +- Tasks carried over (not completed) +- Tasks added mid-sprint (unplanned work) +- Tasks removed or descoped + +Scan the codebase for TODO/FIXME trends: + +- Count current TODO/FIXME/HACK comments +- Compare to previous sprint counts if available (check previous retrospectives) +- Note whether technical debt is growing or shrinking + +Read previous retrospectives (if any) from `production/sprints/` or `production/milestones/` to check: + +- Were previous action items addressed? +- Are the same problems recurring? +- How has velocity trended? + +--- + +## Phase 4: Generate the Retrospective + +```markdown +## Retrospective: [Sprint N / Milestone Name] +Period: [Start Date] -- [End Date] +Generated: [Date] + +### Metrics + +| Metric | Planned | Actual | Delta | +|--------|---------|--------|-------| +| Tasks | [X] | [Y] | [+/- Z] | +| Completion Rate | -- | [Z%] | -- | +| Story Points / Effort Days | [X] | [Y] | [+/- Z] | +| Bugs Found | -- | [N] | -- | +| Bugs Fixed | -- | [N] | -- | +| Unplanned Tasks Added | -- | [N] | -- | +| Commits | -- | [N] | -- | + +### Velocity Trend + +| Sprint | Planned | Completed | Rate | +|--------|---------|-----------|------| +| [N-2] | [X] | [Y] | [Z%] | +| [N-1] | [X] | [Y] | [Z%] | +| [N] (current) | [X] | [Y] | [Z%] | + +**Trend**: [Increasing / Stable / Decreasing] +[One sentence explaining the trend] + +### What Went Well +- [Observation backed by specific data or examples] +- [Another positive observation] +- [Recognize specific contributions or decisions that paid off] + +### What Went Poorly +- [Specific issue with measurable impact -- e.g., "Feature X took 5 days + instead of estimated 2, blocking tasks Y and Z"] +- [Another issue with impact] +- [Do not assign blame -- focus on systemic causes] + +### Blockers Encountered + +| Blocker | Duration | Resolution | Prevention | +|---------|----------|------------|------------| +| [What blocked progress] | [How long] | [How it was resolved] | [How to prevent recurrence] | + +### Estimation Accuracy + +| Task | Estimated | Actual | Variance | Likely Cause | +|------|-----------|--------|----------|--------------| +| [Most overestimated task] | [X] | [Y] | [+Z] | [Why] | +| [Most underestimated task] | [X] | [Y] | [-Z] | [Why] | + +**Overall estimation accuracy**: [X%] of tasks within +/- 20% of estimate + +[Analysis: Are we consistently over- or under-estimating? For which types of +tasks? What adjustment should we apply?] + +### Carryover Analysis + +| Task | Original Sprint | Times Carried | Reason | Action | +|------|----------------|---------------|--------|--------| +| [Task that was not completed] | [Sprint N-X] | [N] | [Why] | [Complete / Descope / Redesign] | + +### Technical Debt Status +- Current TODO count: [N] (previous: [N]) +- Current FIXME count: [N] (previous: [N]) +- Current HACK count: [N] (previous: [N]) +- Trend: [Growing / Stable / Shrinking] +- [Note any areas of concern] + +### Previous Action Items Follow-Up + +| Action Item (from Sprint N-1) | Status | Notes | +|-------------------------------|--------|-------| +| [Previous action] | [Done / In Progress / Not Started] | [Context] | + +### Action Items for Next Iteration + +| # | Action | Owner | Priority | Deadline | +|---|--------|-------|----------|----------| +| 1 | [Specific, measurable action] | [Who] | [High/Med/Low] | [When] | +| 2 | [Another action] | [Who] | [Priority] | [When] | + +### Process Improvements +- [Specific change to how we work, with expected benefit] +- [Another improvement -- keep it to 2-3 actionable items, not a wish list] + +### Summary +[2-3 sentence overall assessment: Was this a good sprint/milestone? What is +the single most important thing to change going forward?] +``` + +--- + +## Phase 5: Save Retrospective + +Present the retrospective and top findings to the user (completion rate, velocity trend, top blocker, most important action item). + +Ask: "May I write this to `production/sprints/sprint-[N]-retrospective.md`?" (or the milestone path if applicable) + +If yes, write the file, creating the directory if needed. Verdict: **COMPLETE** — retrospective saved. + +If no, stop here. Verdict: **BLOCKED** — user declined write. + +--- + +## Phase 6: Next Steps + +- Run `/sprint-plan` to incorporate the action items and velocity data into the next sprint. +- If this was a milestone retrospective, run `/gate-check` to formally assess readiness for the next phase. + +### Guidelines + +- Be honest and specific. Vague retrospectives ("communication could be better") produce vague improvements. Use data and examples. +- Focus on systemic issues, not individual blame. +- Limit action items to 3-5. More than that dilutes focus. +- Every action item must have an owner and a deadline. +- Check whether previous action items were completed. Recurring unaddressed items are a process smell. +- If this is a milestone retrospective, also evaluate whether the milestone goals were achieved and what that means for the overall project timeline. diff --git a/.omc/skills/reverse-document/SKILL.md b/.omc/skills/reverse-document/SKILL.md new file mode 100644 index 0000000..d73cc58 --- /dev/null +++ b/.omc/skills/reverse-document/SKILL.md @@ -0,0 +1,262 @@ +--- +name: reverse-document +description: "Generate design or architecture documents from existing implementation. Works backwards from code/prototypes to create missing planning docs." +argument-hint: " (e.g., 'design src/gameplay/combat' or 'architecture src/core')" +user-invocable: true +allowed-tools: Read, Glob, Grep, Write, Edit, Bash +# Read-only diagnostic skill — no specialist agent delegation needed +--- + +# Reverse Documentation + +This skill analyzes existing implementation (code, prototypes, systems) and generates +appropriate design or architecture documentation. Use this when: +- You built a feature without writing a design doc first +- You inherited a codebase without documentation +- You prototyped a mechanic and need to formalize it +- You need to document "why" behind existing code + +--- + +## Workflow + +## Phase 1: Parse Arguments + +**Format**: `/reverse-document ` + +**Type options**: +- `design` → Generate a game design document (GDD section) +- `architecture` → Generate an Architecture Decision Record (ADR) +- `concept` → Generate a concept document from prototype + +**Path**: Directory or file to analyze +- `src/gameplay/combat/` → All combat-related code +- `src/core/event-system.cpp` → Specific file +- `prototypes/stealth-mech/` → Prototype directory + +**Examples**: +```bash +/reverse-document design src/gameplay/magic-system +/reverse-document architecture src/core/entity-component +/reverse-document concept prototypes/vehicle-combat +``` + +## Phase 2: Analyze Implementation + +**Read and understand the code/prototype**: + +**For design docs (GDD):** +- Identify mechanics, rules, formulas +- Extract gameplay values (damage, cooldowns, ranges) +- Find state machines, ability systems, progression +- Detect edge cases handled in code +- Map dependencies (what systems interact?) + +**For architecture docs (ADR):** +- Identify patterns (ECS, singleton, observer, etc.) +- Understand technical decisions (threading, serialization, etc.) +- Map dependencies and coupling +- Assess performance characteristics +- Find constraints and trade-offs + +**For concept docs (prototype analysis):** +- Identify core mechanic +- Extract emergent gameplay patterns +- Note what worked vs what didn't +- Find technical feasibility insights +- Document player fantasy / feel + +## Phase 3: Ask Clarifying Questions + +**DO NOT** just describe the code. **ASK** about intent: + +**Design questions**: +- "I see a [resource] system that depletes during [activity]. Was this for: + - Pacing (prevent spam)? + - Resource management (strategic depth)? + - Or something else?" +- "The [mechanic] seems central. Is this a core pillar, or supporting feature?" +- "[Value] scales exponentially with [factor]. Intentional design, or needs rebalancing?" + +**Architecture questions**: +- "You're using a service locator pattern. Was this chosen for: + - Testability (mock dependencies)? + - Decoupling (reduce hard references)? + - Or inherited from existing code?" +- "I see manual memory management instead of smart pointers. Performance requirement, or legacy?" + +**Concept questions**: +- "The prototype emphasizes stealth over combat. Is that the intended pillar?" +- "Players seem to exploit the grappling hook for speed. Feature or bug?" + +## Phase 4: Present Findings + +Before drafting, show what you discovered: + +``` +I've analyzed [path]/. Here's what I found: + +MECHANICS IMPLEMENTED: +- [mechanic-a] with [property] (e.g. timing windows, cooldowns) +- [mechanic-b] (e.g. interaction between two states) +- [resource] system (depletes on [action], regens on [condition]) +- [state] system (builds up, triggers [effect]) + +FORMULAS DISCOVERED: +- [Output] = [formula using discovered variables] +- [Secondary output] = [formula] + +UNCLEAR INTENT AREAS: +1. [Resource] system — pacing or resource management? +2. [Mechanic] — core pillar or supporting feature? +3. [Value] scaling — intentional design or needs tuning? + +Before I draft the design doc, could you clarify these points? +``` + +Wait for user to clarify intent before drafting. + +## Phase 5: Draft Document Using Template + +Based on type, use appropriate template: + +| Type | Template | Output Path | +|------|----------|-------------| +| `design` | `templates/design-doc-from-implementation.md` | `design/gdd/[system-name].md` | +| `architecture` | `templates/architecture-doc-from-code.md` | `docs/architecture/[decision-name].md` | +| `concept` | `templates/concept-doc-from-prototype.md` | `prototypes/[name]/CONCEPT.md` or `design/concepts/[name].md` | + +**Draft structure**: +- Capture **what exists** (mechanics, patterns, implementation) +- Document **why it exists** (intent clarified with user) +- Identify **what's missing** (edge cases not handled, gaps in design) +- Flag **follow-up work** (balance tuning, missing features) + +## Phase 6: Show Draft and Request Approval + +**Collaborative protocol**: +``` +I've drafted the [system-name] design doc based on your code and clarifications. + +[Show key sections: Overview, Mechanics, Formulas, Design Intent] + +ADDITIONS I MADE: +- Documented [mechanic] as "[intent]" per your clarification +- Added edge cases not in code (e.g., what if [resource] hits 0 mid-[action]?) +- Flagged balance concern: [scaling type] scaling at [boundary condition] + +SECTIONS MARKED AS INCOMPLETE: +- "[System] interaction with [other-system]" (not fully implemented yet) +- "[Variant or feature]" (only [subset] implemented so far) + +May I write this to design/gdd/[system-name].md? +``` + +Wait for approval. User may request changes before writing. + +## Phase 7: Write Document with Metadata + +When approved, write the file with special markers: + +```markdown +--- +status: reverse-documented +source: [path/] +date: [today] +verified-by: [User name] +--- + +# [System Name] Design + +> **Note**: This document was reverse-engineered from the existing implementation. +> It captures current behavior and clarified design intent. Some sections may be +> incomplete where implementation is partial or intent was unclear. + +[Rest of document...] +``` + +## Phase 8: Flag Follow-Up Work + +After writing, suggest next steps: + +``` +✅ Written to design/gdd/combat-system.md + +FOLLOW-UP RECOMMENDED: +1. Run /balance-check on combat formulas (exponential scaling concern) +2. Create ADR for stamina system architecture decision +3. Implement missing edge cases: + - Stamina depletion mid-combo behavior + - Stagger state during dodge +4. Extend design doc when weapon variety is implemented + +Would you like me to tackle any of these now? +``` + +--- + +## Template Selection Logic + +| If analyzing... | Use template... | Because... | +|----------------|-----------------|------------| +| `src/gameplay/*` | design-doc-from-implementation.md | Gameplay mechanics → GDD | +| `src/core/*`, `src/ai/*` | architecture-doc-from-code.md | Core systems → ADR | +| `prototypes/*` | concept-doc-from-prototype.md | Experiments → concept doc | +| `src/networking/*` | architecture-doc-from-code.md | Technical systems → ADR | +| `src/ui/*` | design-doc-from-implementation.md | UI/UX → design spec | + +--- + +## Example Session: Reverse-Document a System + +``` +User: /reverse-document design src/gameplay/[system] + +Agent: I'll analyze your [system] implementation to create a design doc. + [Reads code, discovers mechanics, formulas] + +Agent: I've analyzed src/gameplay/[system]/. Here's what I found: + [Shows mechanics, formulas, unclear areas] + + Before drafting, could you clarify: + 1. [Resource] system intent? + 2. Is [mechanic] a core pillar? + 3. [Value] scaling — intentional or needs tuning? + +User: 1. [Resource] is for pacing, prevent [unwanted behavior] + 2. [Mechanic] is core — I want [design intent] + 3. Yeah, exponential is too much, should be linear + +Agent: Perfect, that clarifies the vision. Let me draft the design doc. + [Shows draft with clarified intent] + + May I write this to design/gdd/[system-name].md? + +User: Yes, write it. + +Agent: ✅ Written to design/gdd/[system-name].md + ✅ Marked as [REVERSE-DOCUMENTED] + ✅ Flagged [value] scaling for rebalancing + + Next steps: + - Update [formula] to [corrected scaling] + - Run /balance-check to validate [curve] + - Document [mechanic] as core pillar in game-pillars.md +``` + +--- + +## Collaborative Protocol + +This skill follows the collaborative design principle: + +1. **Analyze First**: Read code, understand implementation +2. **Question Intent**: Ask about "why", not just "what" +3. **Present Findings**: Show discoveries, highlight unclear areas +4. **User Clarifies**: Separate intent from accidents +5. **Draft Document**: Create doc based on reality + intent +6. **Show Draft**: Display key sections, explain additions +7. **Get Approval**: "May I write to [filepath]?" On approval: Verdict: **COMPLETE** — document generated. On decline: Verdict: **BLOCKED** — user declined write. +8. **Flag Follow-Up**: Suggest related work, don't auto-execute + +**Never assume intent. Always ask before documenting "why".** diff --git a/.omc/skills/review-all-gdds/SKILL.md b/.omc/skills/review-all-gdds/SKILL.md new file mode 100644 index 0000000..dd09d62 --- /dev/null +++ b/.omc/skills/review-all-gdds/SKILL.md @@ -0,0 +1,628 @@ +--- +name: review-all-gdds +description: "Holistic cross-GDD consistency and game design review. Reads all system GDDs simultaneously and checks for contradictions between them, stale references, ownership conflicts, formula incompatibilities, and game design theory violations (dominant strategies, economic imbalance, cognitive overload, pillar drift). Run after all MVP GDDs are written, before architecture begins." +argument-hint: "[focus: full | consistency | design-theory | since-last-review]" +user-invocable: true +allowed-tools: Read, Glob, Grep, Write, Bash, AskUserQuestion, Task +model: opus +--- + +# Review All GDDs + +This skill reads every system GDD simultaneously and performs two complementary +reviews that cannot be done per-GDD in isolation: + +1. **Cross-GDD Consistency** — contradictions, stale references, and ownership + conflicts between documents +2. **Game Design Holism** — issues that only emerge when you see all systems + together: dominant strategies, broken economies, cognitive overload, pillar + drift, competing progression loops + +**This is distinct from `/design-review`**, which reviews one GDD for internal +completeness. This skill reviews the *relationships* between all GDDs. + +**When to run:** +- After all MVP-tier GDDs are individually approved +- After any GDD is significantly revised mid-production +- Before `/create-architecture` begins (architecture built on inconsistent GDDs + inherits those inconsistencies) + +**Argument modes:** + +**Focus:** `$ARGUMENTS[0]` (blank = `full`) + +- **No argument / `full`**: Both consistency and design theory passes +- **`consistency`**: Cross-GDD consistency checks only (faster) +- **`design-theory`**: Game design holism checks only +- **`since-last-review`**: Only GDDs modified since the last review report (git-based) + +--- + +## Phase 1: Load Everything + +### Phase 1a — L0: Summary Scan (fast, low tokens) + +Before reading any full document, use Grep to extract `## Summary` sections +from all GDD files: + +``` +Grep pattern="## Summary" glob="design/gdd/*.md" output_mode="content" -A 5 +``` + +Display a manifest to the user: +``` +Found [N] GDDs. Summaries: + • combat.md — [summary text] + • inventory.md — [summary text] + ... +``` + +For `since-last-review` mode: run `git log --name-only` to identify GDDs +modified since the last review report file was written. Show the user which +GDDs are in scope based on summaries before doing any full reads. Only +proceed to L1 for those GDDs plus any GDDs listed in their "Key deps". + +### Phase 1b — Registry Pre-Load (fast baseline) + +Before full-reading any GDD, check for the entity registry: + +``` +Read path="design/registry/entities.yaml" +``` + +If the registry exists and has entries, use it as a **pre-built conflict +baseline**: known entities, items, formulas, and constants with their +authoritative values and source GDDs. In Phase 2, grep GDDs for registered +names first — this is faster than reading all GDDs in full before knowing +what to look for. + +If the registry is empty or absent: proceed without it. Note in the report: +"Entity registry is empty — consistency checks rely on full GDD reads only. +Run `/consistency-check` after this review to populate the registry." + +### Phase 1c — L1/L2: Full Document Load + +Full-read the in-scope documents: + +1. `design/gdd/game-concept.md` — game vision, core loop, MVP definition +2. `design/gdd/game-pillars.md` if it exists — design pillars and anti-pillars +3. `design/gdd/systems-index.md` — authoritative system list, layers, dependencies, status +4. **Every in-scope system GDD in `design/gdd/`** — read completely (skip + game-concept.md and systems-index.md — those are read above) + +Report: "Loaded [N] system GDDs covering [M] systems. Pillars: [list]. Anti-pillars: [list]." + +If fewer than 2 system GDDs exist, stop: +> "Cross-GDD review requires at least 2 system GDDs. Write more GDDs first, +> then re-run `/review-all-gdds`." + +--- + +### Parallel Execution + +Phase 2 (Consistency) and Phase 3 (Design Theory) are independent — they read +the same GDD inputs but produce separate reports. Spawn both as parallel Task +agents simultaneously rather than waiting for Phase 2 to complete before +starting Phase 3. Collect both results before writing the combined report. + +--- + +## Phase 2: Cross-GDD Consistency + +Work through every pair and group of GDDs to find contradictions and gaps. + +### 2a: Dependency Bidirectionality + +For every GDD's Dependencies section, check that every listed dependency is +reciprocal: +- If GDD-A lists "depends on GDD-B", check that GDD-B lists GDD-A as a dependent +- If GDD-A lists "depended on by GDD-C", check that GDD-C lists GDD-A as a dependency +- Flag any one-directional dependency as a consistency issue + +``` +⚠️ Dependency Asymmetry +[system-a].md lists: Depends On → [system-b].md +[system-b].md does NOT list [system-a].md as a dependent +→ One of these documents has a stale dependency section +``` + +### 2b: Rule Contradictions + +For each game rule, mechanic, or constraint defined in any GDD, check whether +any other GDD defines a contradicting rule for the same situation: + +Categories to scan: +- **Floor/ceiling rules**: Does any GDD define a minimum value for an output? Does any other say a different system can bypass that floor? These contradict. +- **Resource ownership**: If two GDDs both define how a shared resource accumulates or depletes, do they agree? +- **State transitions**: If GDD-A describes what happens when a character dies, + does GDD-B's description of the same event agree? +- **Timing**: If GDD-A says "X happens on the same frame", does GDD-B assume + it happens asynchronously? +- **Stacking rules**: If GDD-A says status effects stack, does GDD-B assume + they don't? + +``` +🔴 Rule Contradiction +[system-a].md: "Minimum [output] after reduction is [floor_value]" +[system-b].md: "[mechanic] bypasses [system-a]'s rules and can reduce [output] to 0" +→ These rules directly contradict. Which GDD is authoritative? +``` + +### 2c: Stale References + +For every cross-document reference (GDD-A mentions a mechanic, value, or +system name from GDD-B), verify the referenced element still exists in GDD-B +with the same name and behaviour: + +- If GDD-A says "combo multiplier from the combat system feeds into score", check + that the combat GDD actually defines a combo multiplier that outputs to score +- If GDD-A references "the progression curve defined in [system].md", check that + [system].md actually has that curve, not a different progression model +- If GDD-A was written before GDD-B and assumed a mechanic that GDD-B later + designed differently, flag GDD-A as containing a stale reference + +``` +⚠️ Stale Reference +inventory.md (written first): "Item weight uses the encumbrance formula + from movement.md" +movement.md (written later): Defines no encumbrance formula — uses a flat + carry limit instead +→ inventory.md references a formula that doesn't exist +``` + +### 2d: Data and Tuning Knob Ownership Conflicts + +Two GDDs should not both claim to own the same data or tuning knob. Scan all +Tuning Knobs sections across all GDDs and flag duplicates: + +``` +⚠️ Ownership Conflict +[system-a].md Tuning Knobs: "[multiplier_name] — controls [output] scaling" +[system-b].md Tuning Knobs: "[multiplier_name] — scales [output] with [factor]" +→ Two GDDs define multipliers on the same output. Which owns the final value? + This will produce either a double-application bug or a design conflict. +``` + +### 2e: Formula Compatibility + +For GDDs whose formulas are connected (output of one feeds input of another), +check that the output range of the upstream formula is within the expected +input range of the downstream formula: + +- If [system-a].md outputs values between [min]–[max], and [system-b].md is + designed to receive values between [min2]–[max2], is the mismatch intentional? +- If an economy GDD expects resource acquisition in range X, and the + progression GDD generates it at range Y, the economy will be trivial or + inaccessible — is that intended? + +Flag incompatibilities as CONCERNS (design judgment needed, not necessarily wrong): + +``` +⚠️ Formula Range Mismatch +[system-a].md: Max [output] = [value_a] (at max [condition]) +[system-b].md: Base [input] = [value_b], max [input] = [value_c] +→ Late-[stage] [scenario] can resolve in a single [event]. + Is this intentional? If not, either [system-a]'s ceiling or [system-b]'s ceiling needs adjustment. +``` + +### 2f: Acceptance Criteria Cross-Check + +Scan Acceptance Criteria sections across all GDDs for contradictions: + +- GDD-A criteria: "Player cannot die from a single hit" +- GDD-B criteria: "Boss attack deals 150% of player max health" +These acceptance criteria cannot both pass simultaneously. + +--- + +## Phase 3: Game Design Holism + +Review all GDDs together through the lens of game design theory and player +psychology. These are issues that individual GDD reviews cannot catch because +they require seeing all systems at once. + +### 3a: Progression Loop Competition + +A game should have one dominant progression loop that players feel is "the +point" of the game, with supporting loops that feed into it. When multiple +systems compete equally as the primary progression driver, players don't know +what the game is about. + +Scan all GDDs for systems that: +- Award the player's primary resource (XP, levels, prestige, unlocks) +- Define themselves as the "core" or "main" loop +- Have comparable depth and time investment to other systems doing the same + +``` +⚠️ Competing Progression Loops +combat.md: Awards XP, unlocks abilities, is described as "the core loop" +crafting.md: Awards XP, unlocks recipes, is described as "the primary activity" +exploration.md: Awards XP, unlocks map areas, described as "the main driver" +→ Three systems all claim to be the primary progression loop and all award + the same primary currency. Players will optimise one and ignore the others. + Consider: one primary loop with the others as support systems. +``` + +### 3b: Player Attention Budget + +Count how many systems require active player attention simultaneously during +a typical session. Each actively-managed system costs attention: + +- Active = player must make decisions about this system regularly during play +- Passive = system runs automatically, player sees results but doesn't manage it + +More than 3-4 simultaneously active systems creates cognitive overload for most +players. Present the count and flag if it exceeds 4 concurrent active systems: + +``` +⚠️ Cognitive Load Risk +Simultaneously active systems during [core loop moment]: + 1. [system-a].md — [decision type] (active) + 2. [system-b].md — [resource management] (active) + 3. [system-c].md — [tracking] (active) + 4. [system-d].md — [item/action use] (active) + 5. [system-e].md — [cooldown/timer management] (active) + 6. [system-f].md — [coordination decisions] (active) +→ 6 simultaneously active systems during the core loop. + Research suggests 3-4 is the comfortable limit for most players. + Consider: which of these can be made passive or simplified? +``` + +### 3c: Dominant Strategy Detection + +A dominant strategy makes other strategies irrelevant — players discover it, +use it exclusively, and find the rest of the game boring. Look for: + +- **Resource monopolies**: One strategy generates a resource significantly + faster than all others +- **Risk-free power**: A strategy that is both high-reward and low-risk + (if high-risk strategies exist, they need proportionally higher reward) +- **No trade-offs**: An option that is superior in all dimensions to all others +- **Obvious optimal path**: If any progression choice is "clearly correct", + the others aren't real choices + +``` +⚠️ Potential Dominant Strategy +combat.md: Ranged attacks deal 80% of melee damage with no risk +combat.md: Melee attacks deal 100% damage but require close range +→ Unless melee has a significant compensating advantage (AOE, stagger, + resource regeneration), ranged is dominant — higher safety, only 20% less + damage. Consider what melee offers that ranged cannot. +``` + +### 3d: Economic Loop Analysis + +Identify all resources across all GDDs (gold, XP, crafting materials, stamina, +health, mana, etc.). For each resource, map its **sources** (how players gain +it) and **sinks** (how players spend it). + +Flag dangerous economic conditions: + +| Condition | Sign | Risk | +|-----------|------|------| +| **Infinite source, no sink** | Resource accumulates indefinitely | Late game becomes trivially easy | +| **Sink, no source** | Resource drains to zero | System becomes unavailable | +| **Source >> Sink** | Surplus accumulates | Resource becomes meaningless | +| **Sink >> Source** | Constant scarcity | Frustration and gatekeeping | +| **Positive feedback loop** | More resource → easier to earn more | Runaway leader, snowball | +| **No catch-up** | Falling behind accelerates deficit | Unrecoverable states | + +``` +🔴 Economic Imbalance: Unbounded Positive Feedback +gold economy: + Sources: monster drops (scales with player power), merchant selling (unlimited) + Sinks: equipment purchase (one-time), ability upgrades (finite count) +→ After equipment and abilities are purchased, gold has no sink. + Infinite surplus. Gold becomes meaningless mid-game. + Add ongoing gold sinks (upkeep, consumables, cosmetics, gambling). +``` + +### 3e: Difficulty Curve Consistency + +When multiple systems scale with player progression, they must scale in +compatible directions and at compatible rates. Mismatched scaling curves +create unintended difficulty spikes or trivialisations. + +For each system that scales over time, extract: +- What scales (enemy health, player damage, resource cost, area size) +- How it scales (linear, exponential, stepped) +- When it scales (level, time, area) + +Compare all scaling curves. Flag mismatches: + +``` +⚠️ Difficulty Curve Mismatch +combat.md: Enemy health scales exponentially with area (×2 per area) +progression.md: Player damage scales linearly with level (+10% per level) +→ By area 5, enemies have 32× base health; player deals ~1.5× base damage. + The gap widens indefinitely. Late areas will become inaccessibly difficult + unless the curves are reconciled. +``` + +### 3f: Pillar Alignment + +Every system should clearly serve at least one design pillar. A system that +serves no pillar is "scope creep by design" — it's in the game but not in +service of what the game is trying to be. + +For each GDD system, check its Player Fantasy section against the design pillars. +Flag any system whose stated fantasy doesn't map to any pillar: + +``` +⚠️ Pillar Drift +fishing-system.md: Player Fantasy — "peaceful, meditative activity" +Pillars: "Brutal Combat", "Tense Survival", "Emergent Stories" +→ The fishing system serves none of the three pillars. Either add a pillar + that covers it, redesign it to serve an existing pillar, or cut it. +``` + +Also check anti-pillars — flag any system that does what an anti-pillar +explicitly says the game will NOT do: + +``` +🔴 Anti-Pillar Violation +Anti-Pillar: "We will NOT have linear story progression — player defines their path" +main-quest.md: Defines a 12-chapter linear story with mandatory sequence +→ This system directly violates the defined anti-pillar. +``` + +### 3g: Player Fantasy Coherence + +The player fantasies across all systems should be compatible — they should +reinforce a consistent identity for what the player IS in this game. Conflicting +player fantasies create identity confusion. + +``` +⚠️ Player Fantasy Conflict +combat.md: "You are a ruthless, precise warrior — every kill is earned" +dialogue.md: "You are a charismatic diplomat — violence is always avoidable" +exploration.md: "You are a reckless adventurer — diving in without a plan" +→ Three systems present incompatible identities. Players will feel the game + doesn't know what it wants them to be. Consider: do these fantasies serve + the same core identity from different angles, or do they genuinely conflict? +``` + +--- + +## Phase 4: Cross-System Scenario Walkthrough + +Walk through the game from the player's perspective to find problems that only +appear at the interaction boundary between multiple systems — things static +analysis of individual GDDs cannot surface. + +### 4a: Identify Key Multi-System Moments + +Scan all GDDs and identify the 3–5 most important player-facing moments where +multiple systems activate simultaneously. Look specifically for: + +- **Combat + Economy overlap**: killing enemies that drop resources, spending + resources during combat, death/respawn interacting with economy state +- **Progression + Difficulty overlap**: level-up triggering mid-fight, ability + unlocks changing combat viability, difficulty scaling at progression milestones +- **Narrative + Gameplay overlap**: dialogue choices locking/unlocking mechanics, + story beats interrupting resource loops, quest completion triggering system + state changes +- **3+ system chains**: any player action that triggers System A, which feeds + into System B, which triggers System C (these are highest-risk interaction paths) + +List each identified scenario with a one-line description before proceeding. + +### 4b: Walk Through Each Scenario + +For each scenario, step through the sequence explicitly: + +1. **Trigger** — what player action or game event starts this? +2. **Activation order** — which systems activate, in what sequence? +3. **Data flow** — what does each system output, and is that output a valid + input for the next system in the chain? +4. **Player experience** — what does the player see, hear, or feel at each step? +5. **Failure modes** — are there any of the following? + - **Race conditions**: two systems trying to modify the same state simultaneously + - **Feedback loops**: System A amplifies System B which re-amplifies System A + with no cap or dampener + - **Broken state transitions**: a system assumes a state that a previous + system may have changed (e.g., "player is alive" assumption after a combat + step that could have caused death) + - **Contradictory messaging**: player receives conflicting feedback from two + systems reacting to the same event (e.g., "success" sound + "failure" UI) + - **Compounding difficulty spikes**: two systems both scaling up at the same + progression point, multiplying the intended difficulty increase + - **Reward conflicts**: two systems both reacting to the same trigger with + rewards that together exceed the intended value (double-dipping) + - **Undefined behavior**: the GDDs don't specify what happens in this combined + state (neither system's rules cover it) + +``` +Example walkthrough: +Scenario: Player kills elite enemy at level-up threshold during active quest + +Trigger: Player lands killing blow on elite enemy +→ combat.md: awards kill XP (100 pts) +→ progression.md: XP total crosses level threshold → triggers level-up + Output: new level, stat increases, ability unlock popup +→ quest.md: kill-count criterion met → triggers quest completion event + Output: quest reward XP (500 pts), completion fanfare +→ progression.md (again): quest XP added → triggers SECOND level-up in same frame + ⚠️ Data flow issue: quest.md awards XP without checking if a level-up + is already in progress. progression.md has no guard against concurrent + level-up events. Undefined behavior: does the player level up once or twice? + Does the ability popup fire twice? Does the second level use the updated or + pre-update stat baseline? +``` + +### 4c: Flag Scenario Issues + +For each problem found during the walkthrough, categorize severity: + +- **BLOCKER**: undefined behavior, broken state transition, or contradictory + player messaging — the experience is broken or incoherent in this scenario +- **WARNING**: compounding spikes, feedback loops without caps, reward conflicts — + the experience works but produces unintended outcomes +- **INFO**: minor ordering ambiguity or messaging overlap — worth noting but + unlikely to cause player-visible problems + +Add all findings to the output report under **"Cross-System Scenario Issues"**. +Each finding must cite: the scenario name, the specific systems involved, the +step where the issue occurs, and the nature of the failure mode. + +--- + +## Phase 5: Output the Review Report + +``` +## Cross-GDD Review Report +Date: [date] +GDDs Reviewed: [N] +Systems Covered: [list] + +--- + +### Consistency Issues + +#### Blocking (must resolve before architecture begins) +🔴 [Issue title] +[What GDDs are involved, what the contradiction is, what needs to change] + +#### Warnings (should resolve, but won't block) +⚠️ [Issue title] +[What GDDs are involved, what the concern is] + +--- + +### Game Design Issues + +#### Blocking +🔴 [Issue title] +[What the problem is, which GDDs are involved, design recommendation] + +#### Warnings +⚠️ [Issue title] +[What the concern is, which GDDs are affected, recommendation] + +--- + +### Cross-System Scenario Issues + +Scenarios walked: [N] +[List scenario names] + +#### Blockers +🔴 [Scenario name] — [Systems involved] +[Step where failure occurs, nature of the failure mode, what must be resolved] + +#### Warnings +⚠️ [Scenario name] — [Systems involved] +[What the unintended outcome is, recommendation] + +#### Info +ℹ️ [Scenario name] — [Systems involved] +[Minor ordering ambiguity or note] + +--- + +### GDDs Flagged for Revision + +| GDD | Reason | Type | Priority | +|-----|--------|------|----------| +| [system-a].md | Rule contradiction with [system-b].md | Consistency | Blocking | +| [system-c].md | Stale reference to nonexistent mechanic | Consistency | Blocking | +| [system-d].md | No pillar alignment | Design Theory | Warning | + +--- + +### Verdict: [PASS / CONCERNS / FAIL] + +PASS: No blocking issues. Warnings present but don't prevent architecture. +CONCERNS: Warnings present that should be resolved but are not blocking. +FAIL: One or more blocking issues must be resolved before architecture begins. + +### If FAIL — required actions before re-running: +[Specific list of what must change in which GDD] +``` + +--- + +## Phase 6: Write Report and Flag GDDs + +Use `AskUserQuestion` for write permission: +- Prompt: "May I write this review to `design/gdd/gdd-cross-review-[date].md`?" +- Options: `[A] Yes — write the report` / `[B] No — skip` + +If any GDDs are flagged for revision, use a second `AskUserQuestion`: +- Prompt: "Should I update the systems index to mark these GDDs as needing revision? ([list of flagged GDDs])" +- Options: `[A] Yes — update systems index` / `[B] No — leave as-is` +- If yes: update each flagged GDD's Status field in systems-index.md to "Needs Revision". + (Do NOT append parentheticals to the status value — other skills match "Needs Revision" + as an exact string and parentheticals break that match.) + +### Session State Update + +After writing the report (and updating systems index if approved), silently +append to `production/session-state/active.md`: + + ## Session Extract — /review-all-gdds [date] + - Verdict: [PASS / CONCERNS / FAIL] + - GDDs reviewed: [N] + - Flagged for revision: [comma-separated list, or "None"] + - Blocking issues: [N — brief one-line descriptions, or "None"] + - Recommended next: [the Phase 7 handoff action, condensed to one line] + - Report: design/gdd/gdd-cross-review-[date].md + +If `active.md` does not exist, create it with this block as the initial content. +Confirm in conversation: "Session state updated." + +--- + +## Phase 7: Handoff + +After all file writes are complete, use `AskUserQuestion` for a closing widget. + +Before building options, check project state: +- Are there any Warning-level items that are simple edits (flagged with "30-second edit", "brief addition", or similar)? → offer inline quick-fix option +- Are any GDDs in the "Flagged for Revision" table? → offer /design-review option for each +- Read systems-index.md for the next system with Status: Not Started → offer /design-system option +- Is the verdict PASS or CONCERNS? → offer /gate-check or /create-architecture + +Build the option list dynamically — only include options that apply: + +**Option pool:** +- `[_] Apply quick fix: [W-XX description] in [gdd-name].md — [effort estimate]` (one option per simple-edit warning; only for Warning-level, not Blocking) +- `[_] Run /design-review [flagged-gdd-path] — address flagged warnings` (one per flagged GDD, if any) +- `[_] Run /design-system [next-system] — next in design order` (always include, name the actual system) +- `[_] Run /create-architecture — begin architecture (verdict is PASS/CONCERNS)` (include if verdict is not FAIL) +- `[_] Run /gate-check — validate Systems Design phase gate` (include if verdict is PASS) +- `[_] Stop here` + +Assign letters A, B, C… only to included options. Mark the most pipeline-advancing option as `(recommended)`. + +Never end the skill with plain text. Always close with this widget. + +--- + +## Error Recovery Protocol + +If any spawned agent returns BLOCKED, errors, or fails to complete: + +1. **Surface immediately**: Report "[AgentName]: BLOCKED — [reason]" before continuing +2. **Assess dependencies**: If the blocked agent's output is required by a later phase, do not proceed past that phase without user input +3. **Offer options** via AskUserQuestion with three choices: + - Skip this agent and note the gap in the final report + - Retry with narrower scope (fewer GDDs, single-system focus) + - Stop here and resolve the blocker first +4. **Always produce a partial report** — output whatever was completed so work is not lost + +--- + +## Collaborative Protocol + +1. **Read silently** — load all GDDs before presenting anything +2. **Show everything** — present the full consistency and design theory analysis + before asking for any action +3. **Distinguish blocking from advisory** — not every issue needs to block + architecture; be clear about which do +4. **Don't make design decisions** — flag contradictions and options, but never + unilaterally decide which GDD is "right" +5. **Ask before writing** — confirm before writing the report or updating the + systems index +6. **Be specific** — every issue must cite the exact GDD, section, and text + involved; no vague warnings diff --git a/.omc/skills/scope-check/SKILL.md b/.omc/skills/scope-check/SKILL.md new file mode 100644 index 0000000..ccffc91 --- /dev/null +++ b/.omc/skills/scope-check/SKILL.md @@ -0,0 +1,128 @@ +--- +name: scope-check +description: "Analyze a feature or sprint for scope creep by comparing current scope against the original plan. Flags additions, quantifies bloat, and recommends cuts. Use when user says 'any scope creep', 'scope review', 'are we staying in scope'." +argument-hint: "[feature-name or sprint-N]" +user-invocable: true +allowed-tools: Read, Glob, Grep, Bash +model: haiku +--- + +# Scope Check + +This skill is read-only — it reports findings but writes no files. + +Compares original planned scope against current state to detect, quantify, and triage +scope creep. + +**Argument:** `$ARGUMENTS[0]` — feature name, sprint number, or milestone name. + +--- + +## Phase 1: Find the Original Plan + +Locate the baseline scope document for the given argument: + +- **Feature name** → read `design/gdd/[feature].md` or matching file in `design/` +- **Sprint number** (e.g., `sprint-3`) → read `production/sprints/sprint-03.md` or similar +- **Milestone** → read `production/milestones/[name].md` + +If the document is not found, report the missing file and stop. Do not proceed without +a baseline to compare against. + +--- + +## Phase 2: Read the Current State + +Check what has actually been implemented or is in progress: + +- Scan the codebase for files related to the feature/sprint +- Read git log for commits related to this work (`git log --oneline --since=[start-date]`) +- Check for TODO/FIXME comments that indicate unfinished scope additions +- Check active sprint plan if the feature is mid-sprint + +--- + +## Phase 3: Compare Original vs Current Scope + +Produce the comparison report: + +```markdown +## Scope Check: [Feature/Sprint Name] +Generated: [Date] + +### Original Scope +[List of items from the original plan] + +### Current Scope +[List of items currently implemented or in progress] + +### Scope Additions (not in original plan) +| Addition | Source | When | Justified? | Effort | +|----------|--------|------|------------|--------| +| [item] | [commit/person] | [date] | [Yes/No/Unclear] | [S/M/L] | + +### Scope Removals (in original but dropped) +| Removed Item | Reason | Impact | +|-------------|--------|--------| +| [item] | [why removed] | [what's affected] | + +### Bloat Score +- Original items: [N] +- Current items: [N] +- Items added: [N] (+[X]%) +- Items removed: [N] +- Net scope change: [+/-N] ([X]%) + +### Risk Assessment +- **Schedule Risk**: [Low/Medium/High] — [explanation] +- **Quality Risk**: [Low/Medium/High] — [explanation] +- **Integration Risk**: [Low/Medium/High] — [explanation] + +### Recommendations +1. **Cut**: [Items that should be removed to stay on schedule] +2. **Defer**: [Items that can move to a future sprint/version] +3. **Keep**: [Additions that are genuinely necessary] +4. **Flag**: [Items that need a decision from producer/creative-director] +``` + +--- + +## Phase 4: Verdict + +Assign a canonical verdict based on net scope change: + +| Net Change | Verdict | Meaning | +|-----------|---------|---------| +| ≤10% | **PASS** | On Track — within acceptable variance | +| 10–25% | **CONCERNS** | Minor Creep — manageable with targeted cuts | +| 25–50% | **FAIL** | Significant Creep — must cut or formally extend timeline | +| >50% | **FAIL** | Out of Control — stop, re-plan, escalate to producer | + +Output the verdict prominently: + +``` +**Scope Verdict: [PASS / CONCERNS / FAIL]** +Net change: [+X%] — [On Track / Minor Creep / Significant Creep / Out of Control] +``` + +--- + +## Phase 5: Next Steps + +After presenting the report, offer concrete follow-up: + +- **PASS** → no action required. Suggest re-running before next milestone. +- **CONCERNS** → offer to identify the 2–3 additions with best cut ratio. Reference `/sprint-plan update` to formally re-scope. +- **FAIL** → recommend escalating to producer. Reference `/sprint-plan update` for re-planning or `/estimate` to re-baseline timeline. + +Always end with: +> "Run `/scope-check [name]` again after cuts are made to verify the verdict improves." + +--- + +### Rules + +- Scope creep is additions without corresponding cuts or timeline extensions +- Not all additions are bad — some are discovered requirements. But they must be acknowledged and accounted for +- When recommending cuts, prioritize preserving the core player experience over nice-to-haves +- Always quantify scope changes — "it feels bigger" is not actionable, "+35% items" is diff --git a/.omc/skills/security-audit/SKILL.md b/.omc/skills/security-audit/SKILL.md new file mode 100644 index 0000000..9e363fe --- /dev/null +++ b/.omc/skills/security-audit/SKILL.md @@ -0,0 +1,244 @@ +--- +name: security-audit +description: "Audit the game for security vulnerabilities: save tampering, cheat vectors, network exploits, data exposure, and input validation gaps. Produces a prioritised security report with remediation guidance. Run before any public release or multiplayer launch." +argument-hint: "[full | network | save | input | quick]" +user-invocable: true +allowed-tools: Read, Glob, Grep, Bash, Write, Task +agent: security-engineer +--- + +# Security Audit + +Security is not optional for any shipped game. Even single-player games have +save tampering vectors. Multiplayer games have cheat surfaces, data exposure +risks, and denial-of-service potential. This skill systematically audits the +codebase for the most common game security failures and produces a prioritised +remediation plan. + +**Run this skill:** +- Before any public release (required for the Polish → Release gate) +- Before enabling any online/multiplayer feature +- After implementing any system that reads from disk or network +- When a security-related bug is reported + +**Output:** `production/security/security-audit-[date].md` + +--- + +## Phase 1: Parse Arguments and Scope + +**Modes:** +- `full` — all categories (recommended before release) +- `network` — network/multiplayer only +- `save` — save file and serialization only +- `input` — input validation and injection only +- `quick` — high-severity checks only (fastest, for iterative use) +- No argument — run `full` + +Read `.claude/docs/technical-preferences.md` to determine: +- Engine and language (affects which patterns to search for) +- Target platforms (affects which attack surfaces apply) +- Whether multiplayer/networking is in scope + +--- + +## Phase 2: Spawn Security Engineer + +Spawn `security-engineer` via Task. Pass: +- The audit scope/mode +- Engine and language from technical preferences +- A manifest of all source directories: `src/`, `assets/data/`, any config files + +The security-engineer runs the audit across 6 categories (see Phase 3). Collect their full findings before proceeding. + +--- + +## Phase 3: Audit Categories + +The security-engineer evaluates each of the following. Skip categories not applicable to the project scope. + +### Category 1: Save File and Serialization Security +- Are save files validated before loading? (no blind deserialization) +- Are save file paths constructed from user input? (path traversal risk) +- Are save files checksummed or signed? (tamper detection) +- Does the game trust numeric values from save files without bounds checking? +- Are there any eval() or dynamic code execution calls near save loading? + +Grep patterns: `File.open`, `load`, `deserialize`, `JSON.parse`, `from_json`, `read_file` — check each for validation. + +### Category 2: Network and Multiplayer Security (skip if single-player only) +- Is game state authoritative on the server, or does the client dictate outcomes? +- Are incoming network packets validated for size, type, and value range? +- Are player positions and state changes validated server-side? +- Is there rate limiting on any network calls? +- Are authentication tokens handled correctly (never sent in plaintext)? +- Does the game expose any debug endpoints in release builds? + +Grep for: `recv`, `receive`, `PacketPeer`, `socket`, `NetworkedMultiplayerPeer`, `rpc`, `rpc_id` — check each call site for validation. + +### Category 3: Input Validation +- Are any player-supplied strings used in file paths? (path traversal) +- Are any player-supplied strings logged without sanitization? (log injection) +- Are numeric inputs (e.g., item quantities, character stats) bounds-checked before use? +- Are achievement/stat values checked before being written to any backend? + +Grep for: `get_input`, `Input.get_`, `input_map`, user-facing text fields — check validation. + +### Category 4: Data Exposure +- Are any API keys, credentials, or secrets hardcoded in `src/` or `assets/`? +- Are debug symbols or verbose error messages included in release builds? +- Does the game log sensitive player data to disk or console? +- Are any internal file paths or system information exposed to players? + +Grep for: `api_key`, `secret`, `password`, `token`, `private_key`, `DEBUG`, `print(` in release-facing code. + +### Category 5: Cheat and Anti-Tamper Vectors +- Are gameplay-critical values stored only in memory, not in easily-editable files? +- Are any critical game progression flags (e.g., "has paid for DLC") validated server-side? +- Is there any protection against memory editing tools (Cheat Engine, etc.) for multiplayer? +- Are leaderboard/score submissions validated before acceptance? + +Note: Client-side anti-cheat is largely unenforceable. Focus on server-side validation for anything competitive or monetised. + +### Category 6: Dependency and Supply Chain +- Are any third-party plugins or libraries used? List them. +- Do any plugins have known CVEs in the version being used? +- Are plugin sources verified (official marketplace, reviewed repository)? + +Glob for: `addons/`, `plugins/`, `third_party/`, `vendor/` — list all external dependencies. + +--- + +## Phase 4: Classify Findings + +For each finding, assign: + +**Severity:** +| Level | Definition | +|-------|-----------| +| **CRITICAL** | Remote code execution, data breach, or trivially-exploitable cheat that breaks multiplayer integrity | +| **HIGH** | Save tampering that bypasses progression, credential exposure, or server-side authority bypass | +| **MEDIUM** | Client-side cheat enablement, information disclosure, or input validation gap with limited impact | +| **LOW** | Defence-in-depth improvement — hardening that reduces attack surface but no direct exploit exists | + +**Status:** Open / Accepted Risk / Out of Scope + +--- + +## Phase 5: Generate Report + +```markdown +# Security Audit Report + +**Date**: [date] +**Scope**: [full | network | save | input | quick] +**Engine**: [engine + version] +**Audited by**: security-engineer via /security-audit +**Files scanned**: [N source files, N config files] + +--- + +## Executive Summary + +| Severity | Count | Must Fix Before Release | +|----------|-------|------------------------| +| CRITICAL | [N] | Yes — all | +| HIGH | [N] | Yes — all | +| MEDIUM | [N] | Recommended | +| LOW | [N] | Optional | + +**Release recommendation**: [CLEAR TO SHIP / FIX CRITICALS FIRST / DO NOT SHIP] + +--- + +## CRITICAL Findings + +### SEC-001: [Title] +**Category**: [Save / Network / Input / Data / Cheat / Dependency] +**File**: `[path]` line [N] +**Description**: [What the vulnerability is] +**Attack scenario**: [How a malicious user would exploit it] +**Remediation**: [Specific code change or pattern to apply] +**Effort**: [Low / Medium / High] + +[repeat per finding] + +--- + +## HIGH Findings + +[same format] + +--- + +## MEDIUM Findings + +[same format] + +--- + +## LOW Findings + +[same format] + +--- + +## Accepted Risk + +[Any findings explicitly accepted by the team with rationale] + +--- + +## Dependency Inventory + +| Plugin / Library | Version | Source | Known CVEs | +|-----------------|---------|--------|------------| +| [name] | [version] | [source] | [none / CVE-XXXX-NNNN] | + +--- + +## Remediation Priority Order + +1. [SEC-NNN] — [1-line description] — Est. effort: [Low/Medium/High] +2. ... + +--- + +## Re-Audit Trigger + +Run `/security-audit` again after remediating any CRITICAL or HIGH findings. +The Polish → Release gate requires this report with no open CRITICAL or HIGH items. +``` + +--- + +## Phase 6: Write Report + +Present the report summary (executive summary + CRITICAL/HIGH findings only) in conversation. + +Ask: "May I write the full security audit report to `production/security/security-audit-[date].md`?" + +Write only after approval. + +--- + +## Phase 7: Gate Integration + +This report is a required artifact for the **Polish → Release gate**. + +After remediating findings, re-run: `/security-audit quick` to confirm CRITICAL/HIGH items are resolved before running `/gate-check release`. + +If CRITICAL findings exist: +> "⛔ CRITICAL security findings must be resolved before any public release. Do not proceed to `/launch-checklist` until these are addressed." + +If no CRITICAL/HIGH findings: +> "✅ No blocking security findings. Report written to `production/security/`. Include this path when running `/gate-check release`." + +--- + +## Collaborative Protocol + +- **Never assume a pattern is safe** — flag it and let the user decide +- **Accepted risk is a valid outcome** — some LOW findings are acceptable trade-offs for a solo team; document the decision +- **Multiplayer games have a higher bar** — any HIGH finding in a multiplayer context should be treated as CRITICAL +- **This is not a penetration test** — this audit covers common patterns; a real pentest by a human security professional is recommended before any competitive or monetised multiplayer launch diff --git a/.omc/skills/setup-engine/SKILL.md b/.omc/skills/setup-engine/SKILL.md new file mode 100644 index 0000000..d39b81e --- /dev/null +++ b/.omc/skills/setup-engine/SKILL.md @@ -0,0 +1,715 @@ +--- +name: setup-engine +description: "Configure the project's game engine and version. Pins the engine in CLAUDE.md, detects knowledge gaps, and populates engine reference docs via WebSearch when the version is beyond the LLM's training data." +argument-hint: "[engine] | [engine version] | refresh | upgrade [old-version] [new-version] | no args for guided selection" +user-invocable: true +allowed-tools: Read, Glob, Grep, Write, Edit, WebSearch, WebFetch, Task, AskUserQuestion +--- + +When this skill is invoked: + +## 1. Parse Arguments + +Four modes: + +- **Full spec**: `/setup-engine godot 4.6` — engine and version provided +- **Engine only**: `/setup-engine unity` — engine provided, version will be looked up +- **No args**: `/setup-engine` — fully guided mode (engine recommendation + version) +- **Refresh**: `/setup-engine refresh` — update reference docs (see Section 10) +- **Upgrade**: `/setup-engine upgrade [old-version] [new-version]` — migrate to a new engine version (see Section 11) + +--- + +## 2. Guided Mode (No Arguments) + +If no engine is specified, run an interactive engine selection process: + +### Check for existing game concept +- Read `design/gdd/game-concept.md` if it exists — extract genre, scope, platform + targets, art style, team size, and any engine recommendation from `/brainstorm` +- If no concept exists, inform the user: + > "No game concept found. Consider running `/brainstorm` first to discover what + > you want to build — it will also recommend an engine. Or tell me about your + > game and I can help you pick." + +### If the user wants to pick without a concept, ask in this order: + +**Question 1 — Prior experience** (ask this first, always, via `AskUserQuestion`): +- Prompt: "Have you worked in any of these engines before?" +- Options: `Godot` / `Unity` / `Unreal Engine 5` / `Multiple — I'll explain` / `None of them` +- If they pick a specific engine → recommend that engine. Prior experience outweighs all other factors. Confirm with them and skip the matrix. +- If "None" or "Multiple" → continue to the questions below. + +**Questions 2-6 — Decision matrix inputs** (only if no prior engine experience): + +**Question 2 — Target platform** (ask this second, always, via `AskUserQuestion` — platform eliminates or heavily weights engines before any other factor): +- Prompt: "What platforms are you targeting for this game?" +- Options: `PC (Steam / Epic)` / `Mobile (iOS / Android)` / `Console` / `Web / Browser` / `Multiple platforms` +- Platform rules that feed directly into the recommendation: + - Mobile → Unity strongly preferred; Unreal is a poor fit; Godot is viable for simple mobile + - Console → Unity or Unreal; Godot console support requires third-party publishers or significant extra work + - Web → Godot exports cleanly to web; Unity WebGL is functional; Unreal has poor web support + - PC only → all engines viable; other factors decide + - Multiple → Unity is the most portable across PC/mobile/console + +1. **What kind of game?** (2D, 3D, or both?) +2. **Primary input method?** (keyboard/mouse, gamepad, touch, or mixed?) +3. **Team size and experience?** (solo beginner, solo experienced, small team?) +4. **Any strong language preferences?** (GDScript, C#, C++, visual scripting?) +5. **Budget for engine licensing?** (free only, or commercial licenses OK?) + +### Produce a recommendation + +Do NOT use a simple scoring matrix that eliminates engines. Instead, reason through the user's profile against the honest tradeoffs below, then present 1-2 recommendations with full context. Always end with the user choosing — never force a verdict. + +**Engine honest tradeoffs:** + +**Godot 4** +- Genuine strengths: 2D (best in class), stylized/indie 3D, rapid iteration, free forever (MIT), open source, gentlest learning curve, best for solo devs who want full control +- Real limitations: 3D ecosystem is thin compared to Unity/Unreal (fewer tutorials, assets, community answers for 3D-specific problems); large open-world 3D is very hard and largely untested in Godot; console export requires third-party publishers or significant extra work; smaller professional job market +- Licensing reality: Truly free with no revenue thresholds ever. MIT license means you own everything. +- Best fit: 2D games of any scope; stylized/atmospheric 3D; contained 3D worlds (not open-world); first game projects where learning curve matters; projects where budget is a hard constraint at any scale + +**Unity** +- Genuine strengths: Industry standard for mid-scope 3D and mobile; massive asset store and tutorial ecosystem; C# is a professional language; best console certification support for indie; strong community for almost every genre +- Real limitations: Licensing controversy in 2023 damaged trust (runtime fee was proposed then walked back — the risk of policy changes remains real); C# has a steeper initial curve than GDScript; heavier editor than Godot for simple projects +- Licensing reality: Free under $200K revenue AND 200K installs (Unity Personal/Plus). Only becomes costly if the game is genuinely successful — most indie games never hit this threshold. The 2023 controversy is worth knowing about but the actual current terms are reasonable for most indie developers. +- Best fit: Mobile games; mid-scope 3D; games targeting console; developers with C# background; projects needing large asset store; teams of 2-5 + +**Unreal Engine 5** +- Genuine strengths: Best-in-class 3D visuals (Lumen, Nanite, Chaos physics); industry standard for AAA and photorealistic 3D; large open-world support is mature and production-tested; Blueprint visual scripting lowers C++ barrier; strong for games targeting high-end PC or console +- Real limitations: Steepest learning curve; heaviest editor (slow compile times, large project sizes); overkill for stylized/2D/small-scope games; C++ is genuinely hard; not suitable for mobile or web; 5% royalty past $1M gross revenue +- Licensing reality: 5% royalty only applies AFTER $1M gross revenue per title. For a first game or any game that doesn't reach $1M, it costs nothing. This threshold is high enough that most indie developers will never pay it. +- Best fit: AAA-quality 3D; large open-world games; photorealistic visuals; developers with C++ experience or willing to use Blueprint; games targeting high-end PC/console where visual fidelity is a core selling point + +**Genre-specific guidance** (factor this into the recommendation): +- 2D any style → Godot strongly preferred +- 3D stylized / atmospheric / contained world → Godot viable, Unity solid alternative +- 3D open world (large, seamless) → Unity or Unreal; Godot is not production-proven for this +- 3D photorealistic / AAA-quality → Unreal +- Mobile-first → Unity strongly preferred +- Console-first → Unity or Unreal; Godot console support requires extra work +- Horror / narrative / walking sim → any engine; match to art style and team experience +- Action RPG / Soulslike → Unity or Unreal for 3D; community support and assets matter here +- Platformer 2D → Godot +- Strategy / top-down / RTS → Godot or Unity depending on 2D vs 3D + +**Recommendation format:** +1. Show a comparison table with the user's specific factors as rows +2. Give a primary recommendation with honest reasoning +3. Name the best alternative and when to choose it instead +4. Explicitly state: "This is a starting point, not a verdict — you can always migrate engines, and many developers switch between projects." +5. Use `AskUserQuestion` to confirm: "Does this recommendation feel right, or would you like to explore a different engine?" + - Options: `[Primary engine] (Recommended)` / `[Alternative engine]` / `[Third engine]` / `Explore further` / `Type something` + +**If the user picks "Explore further":** +Use `AskUserQuestion` with concept-specific deep-dive topics. Always generate these options from the user's actual concept — do not use generic options. Always include at minimum: +- The primary engine's specific limitations for this concept (e.g., "How far can Godot 3D actually go for [genre]?") +- The alternative engine's specific tradeoffs for this concept +- Language choice impact on this concept's technical challenges +- Any concept-specific technical concern (e.g., adaptive audio, open-world streaming, multiplayer netcode) + +The user can select multiple topics. Answer each selected topic in depth before returning to the engine confirmation question. + +--- + +## 3. Look Up Current Version + +Once the engine is chosen: + +- If version was provided, use it +- If no version provided, use WebSearch to find the latest stable release: + - Search: `"[engine] latest stable version [current year]"` + - Confirm with the user: "The latest stable [engine] is [version]. Use this?" + +--- + +## 4. Update CLAUDE.md Technology Stack + +### Language Selection (Godot only) + +If Godot was chosen, ask the user which language to use **before** showing the proposed Technology Stack: + +> "Godot supports two primary languages: +> +> **A) GDScript** — Python-like, Godot-native, fastest iteration. Best for beginners, solo devs, and teams coming from Python or Lua. +> **B) C#** — .NET 8+, familiar to Unity developers, stronger IDE tooling (Rider / Visual Studio), slight performance advantage on heavy logic. +> **C) Both** — GDScript for gameplay/UI scripting, C# for performance-critical systems. Advanced setup — requires .NET SDK alongside Godot. +> +> Which will this project primarily use?" + +Record the choice. It determines the CLAUDE.md template, naming conventions, specialist routing, and which agent is spawned for code files throughout the project. + +--- + +Read `CLAUDE.md` and show the user the proposed Technology Stack changes. +Ask: "May I write these engine settings to `CLAUDE.md`?" + +Wait for confirmation before making any edits. + +Update the Technology Stack section, replacing the `[CHOOSE]` placeholders with the actual values: + +**For Godot** — use the template matching the language chosen above. See **Appendix A** at the bottom of this skill for all three variants (GDScript, C#, Both). + +**For Unity:** +```markdown +- **Engine**: Unity [version] +- **Language**: C# +- **Build System**: Unity Build Pipeline +- **Asset Pipeline**: Unity Asset Import Pipeline + Addressables +``` + +**For Unreal:** +```markdown +- **Engine**: Unreal Engine [version] +- **Language**: C++ (primary), Blueprint (gameplay prototyping) +- **Build System**: Unreal Build Tool (UBT) +- **Asset Pipeline**: Unreal Content Pipeline +``` + +--- + +## 5. Populate Technical Preferences + +After updating CLAUDE.md, create or update `.claude/docs/technical-preferences.md` with +engine-appropriate defaults. Read the existing template first, then fill in: + +### Engine & Language Section +- Fill from the engine choice made in step 4 + +### Naming Conventions (engine defaults) + +**For Godot** — see **Appendix A** for GDScript, C#, and Both variants. + +**For Unity (C#):** +- Classes: PascalCase (e.g., `PlayerController`) +- Public fields/properties: PascalCase (e.g., `MoveSpeed`) +- Private fields: _camelCase (e.g., `_moveSpeed`) +- Methods: PascalCase (e.g., `TakeDamage()`) +- Files: PascalCase matching class (e.g., `PlayerController.cs`) +- Constants: PascalCase or UPPER_SNAKE_CASE + +**For Unreal (C++):** +- Classes: Prefixed PascalCase (`A` for Actor, `U` for UObject, `F` for struct) +- Variables: PascalCase (e.g., `MoveSpeed`) +- Functions: PascalCase (e.g., `TakeDamage()`) +- Booleans: `b` prefix (e.g., `bIsAlive`) +- Files: Match class without prefix (e.g., `PlayerController.h`) + +### Input & Platform Section + +Populate `## Input & Platform` using the answers gathered in Section 2 (or extracted +from the game concept). Derive the values using this mapping: + +| Platform target | Gamepad Support | Touch Support | +|-----------------|-----------------|---------------| +| PC only | Partial (recommended) | None | +| Console | Full | None | +| Mobile | None | Full | +| PC + Console | Full | None | +| PC + Mobile | Partial | Full | +| Web | Partial | Partial | + +For **Primary Input**, use the dominant input for the game genre: +- Action/RPG/platformer targeting console → Gamepad +- Strategy/point-and-click/RTS → Keyboard/Mouse +- Mobile game → Touch +- Cross-platform → ask the user + +Present the derived values and ask the user to confirm or adjust before writing. + +Example filled section: +```markdown +## Input & Platform +- **Target Platforms**: PC, Console +- **Input Methods**: Keyboard/Mouse, Gamepad +- **Primary Input**: Gamepad +- **Gamepad Support**: Full +- **Touch Support**: None +- **Platform Notes**: All UI must support d-pad navigation. No hover-only interactions. +``` + +### Remaining Sections +- **Performance Budgets**: Use `AskUserQuestion`: + - Prompt: "Should I set default performance budgets now, or leave them for later?" + - Options: `[A] Set defaults now (60fps, 16.6ms frame budget, engine-appropriate draw call limit)` / `[B] Leave as [TO BE CONFIGURED] — I'll set these when I know my target hardware` + - If [A]: populate with the suggested defaults. If [B]: leave as placeholder. +- **Testing**: Suggest engine-appropriate framework (GUT for Godot, NUnit for Unity, etc.) — ask before adding. +- **Forbidden Patterns**: Leave as placeholder — do NOT pre-populate. +- **Allowed Libraries**: Leave as placeholder — do NOT pre-populate dependencies the project does not currently need. Only add a library here when it is actively being integrated, not speculatively. + +> **Guardrail**: Never add speculative dependencies to Allowed Libraries. For example, do NOT add GodotSteam unless Steam integration is actively beginning in this session. Post-launch integrations should be added to Allowed Libraries when that work begins, not during engine setup. + +### Engine Specialists Routing + +Also populate the `## Engine Specialists` section in `technical-preferences.md` with the correct routing for the chosen engine: + +**For Godot** — see **Appendix A** for the routing table matching the language chosen. + +**For Unity:** +```markdown +## Engine Specialists +- **Primary**: unity-specialist +- **Language/Code Specialist**: unity-specialist (C# review — primary covers it) +- **Shader Specialist**: unity-shader-specialist (Shader Graph, HLSL, URP/HDRP materials) +- **UI Specialist**: unity-ui-specialist (UI Toolkit UXML/USS, UGUI Canvas, runtime UI) +- **Additional Specialists**: unity-dots-specialist (ECS, Jobs system, Burst compiler), unity-addressables-specialist (asset loading, memory management, content catalogs) +- **Routing Notes**: Invoke primary for architecture and general C# code review. Invoke DOTS specialist for any ECS/Jobs/Burst code. Invoke shader specialist for rendering and visual effects. Invoke UI specialist for all interface implementation. Invoke Addressables specialist for asset management systems. + +### File Extension Routing + +| File Extension / Type | Specialist to Spawn | +|-----------------------|---------------------| +| Game code (.cs files) | unity-specialist | +| Shader / material files (.shader, .shadergraph, .mat) | unity-shader-specialist | +| UI / screen files (.uxml, .uss, Canvas prefabs) | unity-ui-specialist | +| Scene / prefab / level files (.unity, .prefab) | unity-specialist | +| Native extension / plugin files (.dll, native plugins) | unity-specialist | +| General architecture review | unity-specialist | +``` + +**For Unreal:** +```markdown +## Engine Specialists +- **Primary**: unreal-specialist +- **Language/Code Specialist**: ue-blueprint-specialist (Blueprint graphs) or unreal-specialist (C++) +- **Shader Specialist**: unreal-specialist (no dedicated shader specialist — primary covers materials) +- **UI Specialist**: ue-umg-specialist (UMG widgets, CommonUI, input routing, widget styling) +- **Additional Specialists**: ue-gas-specialist (Gameplay Ability System, attributes, gameplay effects), ue-replication-specialist (property replication, RPCs, client prediction, netcode) +- **Routing Notes**: Invoke primary for C++ architecture and broad engine decisions. Invoke Blueprint specialist for Blueprint graph architecture and BP/C++ boundary design. Invoke GAS specialist for all ability and attribute code. Invoke replication specialist for any multiplayer or networked systems. Invoke UMG specialist for all UI implementation. + +### File Extension Routing + +| File Extension / Type | Specialist to Spawn | +|-----------------------|---------------------| +| Game code (.cpp, .h files) | unreal-specialist | +| Shader / material files (.usf, .ush, Material assets) | unreal-specialist | +| UI / screen files (.umg, UMG Widget Blueprints) | ue-umg-specialist | +| Scene / prefab / level files (.umap, .uasset) | unreal-specialist | +| Native extension / plugin files (Plugin .uplugin, modules) | unreal-specialist | +| Blueprint graphs (.uasset BP classes) | ue-blueprint-specialist | +| General architecture review | unreal-specialist | +``` + +### Collaborative Step +Present the filled-in preferences to the user. For Godot, include the chosen language and note where the full naming conventions and routing tables live: +> "Here are the default technical preferences for [engine] ([language if Godot]). The naming conventions and specialist routing are in Appendix A of this skill — I'll apply the [GDScript/C#/Both] variant. Want to customize any of these, or shall I save the defaults?" + +For all other engines, present the defaults directly without referencing the appendix. + +Wait for approval before writing the file. + +--- + +## 6. Determine Knowledge Gap + +Check whether the engine version is likely beyond the LLM's training data. + +**Known approximate coverage** (update this as models change): +- LLM knowledge cutoff: **May 2025** +- Godot: training data likely covers up to ~4.3 +- Unity: training data likely covers up to ~2023.x / early 6000.x +- Unreal: training data likely covers up to ~5.3 / early 5.4 + +Compare the user's chosen version against these baselines: + +- **Within training data** → `LOW RISK` — reference docs optional but recommended +- **Near the edge** → `MEDIUM RISK` — reference docs recommended +- **Beyond training data** → `HIGH RISK` — reference docs required + +Inform the user which category they're in and why. + +--- + +## 7. Populate Engine Reference Docs + +### If WITHIN training data (LOW RISK): + +Create a minimal `docs/engine-reference//VERSION.md`: + +```markdown +# [Engine] — Version Reference + +| Field | Value | +|-------|-------| +| **Engine Version** | [version] | +| **Project Pinned** | [today's date] | +| **LLM Knowledge Cutoff** | May 2025 | +| **Risk Level** | LOW — version is within LLM training data | + +## Note + +This engine version is within the LLM's training data. Engine reference +docs are optional but can be added later if agents suggest incorrect APIs. + +Run `/setup-engine refresh` to populate full reference docs at any time. +``` + +Do NOT create breaking-changes.md, deprecated-apis.md, etc. — they would +add context cost with minimal value. + +### If BEYOND training data (MEDIUM or HIGH RISK): + +Create the full reference doc set by searching the web: + +1. **Search for the official migration/upgrade guide**: + - `"[engine] [old version] to [new version] migration guide"` + - `"[engine] [version] breaking changes"` + - `"[engine] [version] changelog"` + - `"[engine] [version] deprecated API"` + +2. **Fetch and extract** from official documentation: + - Breaking changes between each version from the training cutoff to current + - Deprecated APIs with replacements + - New features and best practices + +Ask: "May I create the engine reference docs under `docs/engine-reference//`?" + +Wait for confirmation before writing any files. + +3. **Create the full reference directory**: + ``` + docs/engine-reference// + ├── VERSION.md # Version pin + knowledge gap analysis + ├── breaking-changes.md # Version-by-version breaking changes + ├── deprecated-apis.md # "Don't use X → Use Y" tables + ├── current-best-practices.md # New practices since training cutoff + └── modules/ # Per-subsystem references (create as needed) + ``` + +4. **Populate each file** using real data from the web searches, following + the format established in existing reference docs. Every file must have + a "Last verified: [date]" header. + +5. **For module files**: Only create modules for subsystems where significant + changes occurred. Don't create empty or minimal module files. + +--- + +## 8. Update CLAUDE.md Import + +Ask: "May I update the `@` import in `CLAUDE.md` to point to the new engine reference?" + +Wait for confirmation, then update the `@` import under "Engine Version Reference" to point to the +correct engine: + +```markdown +## Engine Version Reference + +@docs/engine-reference//VERSION.md +``` + +If the previous import pointed to a different engine (e.g., switching from +Godot to Unity), update it. + +--- + +## 9. Update Agent Instructions + +Ask: "May I add a Version Awareness section to the engine specialist agent files?" before making any edits. + +For the chosen engine's specialist agents, verify they have a +"Version Awareness" section. If not, add one following the pattern in +the existing Godot specialist agents. + +The section should instruct the agent to: +1. Read `docs/engine-reference//VERSION.md` +2. Check deprecated APIs before suggesting code +3. Check breaking changes for relevant version transitions +4. Use WebSearch to verify uncertain APIs + +--- + +## 10. Refresh Subcommand + +If invoked as `/setup-engine refresh`: + +1. Read the existing `docs/engine-reference//VERSION.md` to get + the current engine and version +2. Use WebSearch to check for: + - New engine releases since last verification + - Updated migration guides + - Newly deprecated APIs +3. Update all reference docs with new findings +4. Update "Last verified" dates on all modified files +5. Report what changed + +--- + +## 11. Upgrade Subcommand + +If invoked as `/setup-engine upgrade [old-version] [new-version]`: + +### Step 1 — Read Current Version State + +Read `docs/engine-reference//VERSION.md` to confirm the current pinned +version, risk level, and any migration note URLs already recorded. If +`old-version` was not provided as an argument, use the pinned version from this +file. + +### Step 2 — Fetch Migration Guide + +Use WebSearch and WebFetch to locate the official migration guide between +`old-version` and `new-version`: + +- Search: `"[engine] [old-version] to [new-version] migration guide"` +- Search: `"[engine] [new-version] breaking changes changelog"` +- Fetch the migration guide URL from VERSION.md if one is already recorded, + or use the URL found via search. + +Extract: renamed APIs, removed APIs, changed defaults, behavior changes, and +any "must migrate" items. + +### Step 3 — Pre-Upgrade Audit + +Scan `src/` for code that uses APIs known to be deprecated or changed in the +target version: + +- Use Grep to search for deprecated API names extracted from the migration + guide (e.g., old function names, removed node types, changed property names) +- List each file that matches, with the specific API reference found + +Present the audit results as a table: + +``` +Pre-Upgrade Audit: [engine] [old-version] → [new-version] +========================================================== + +Files requiring changes: + File | Deprecated API Found | Effort + --------------------------------- | -------------------------- | ------ + src/gameplay/player_movement.gd | old_api_name | Low + src/ui/hud.gd | removed_node_type | Medium + +Breaking changes to watch for: + - [change description from migration guide] + - [change description from migration guide] + +Recommended migration order (dependency-sorted): + 1. [system/layer with fewest dependencies first] + 2. [next system] + ... +``` + +If no deprecated APIs are found in `src/`, report: "No deprecated API usage +found in src/ — upgrade may be low-risk." + +### Step 4 — Confirm Before Updating + +Ask the user before making any changes: + +> "Pre-upgrade audit complete. Found [N] files using deprecated APIs. +> Proceed with upgrading VERSION.md to [new-version]? +> (This will update the pinned version and add migration notes — it does NOT +> change any source files. Source migration is done manually or via stories.)" + +Wait for explicit confirmation before continuing. + +### Step 5 — Update VERSION.md + +After confirmation: + +1. Update `docs/engine-reference//VERSION.md`: + - `Engine Version` → `[new-version]` + - `Project Pinned` → today's date + - `Last Docs Verified` → today's date + - Re-evaluate and update the `Risk Level` and `Post-Cutoff Version Timeline` + table if the new version falls beyond the LLM knowledge cutoff + - Add a `## Migration Notes — [old-version] → [new-version]` section + containing: migration guide URL, key breaking changes, deprecated APIs + found in this project, and recommended migration order from the audit + +2. If `breaking-changes.md` or `deprecated-apis.md` exist in the engine + reference directory, append the new version's changes to those files. + +### Step 6 — Post-Upgrade Reminder + +After updating VERSION.md, output: + +``` +VERSION.md updated: [engine] [old-version] → [new-version] + +Next steps: +1. Migrate deprecated API usages in the [N] files listed above +2. Run /setup-engine refresh after upgrading the actual engine binary to + verify no new deprecations were missed +3. Run /architecture-review — the engine upgrade may invalidate ADRs that + reference specific APIs or engine capabilities +4. If any ADRs are invalidated, run /propagate-design-change to update + downstream stories +``` + +--- + +## 12. Output Summary + +After setup is complete, output: + +``` +Engine Setup Complete +===================== +Engine: [name] [version] +Language: [GDScript | C# | GDScript + C# | C# | C++ + Blueprint] +Knowledge Risk: [LOW/MEDIUM/HIGH] +Reference Docs: [created/skipped] +CLAUDE.md: [updated] +Tech Prefs: [created/updated] +Agent Config: [verified] + +Next Steps: +1. Review docs/engine-reference//VERSION.md +2. [If from /brainstorm] Run /map-systems to decompose your concept into individual systems +3. [If from /brainstorm] Run /design-system to author per-system GDDs (guided, section-by-section) +4. [If from /brainstorm] Run /prototype [core-mechanic] to test the core loop +5. [If fresh start] Run /brainstorm to discover your game concept +6. Create your first milestone: /sprint-plan new +``` + +--- + +Verdict: **COMPLETE** — engine configured and reference docs populated. + +## Guardrails + +- NEVER guess an engine version — always verify via WebSearch or user confirmation +- NEVER overwrite existing reference docs without asking — append or update +- If reference docs already exist for a different engine, ask before replacing +- Always show the user what you're about to change before making CLAUDE.md edits +- If WebSearch returns ambiguous results, show the user and let them decide +- When the user chose **GDScript**: copy the GDScript CLAUDE.md template from Appendix A1 exactly. NEVER add "C++ via GDExtension" to the Language field. GDScript projects may use GDExtension, but it is not a primary project language. The `godot-gdextension-specialist` in the routing table is available for when native extensions are needed — it does not make C++ a project language. + +--- + +## Appendix A — Godot Language Configuration + +All Godot-specific variants for language-dependent configuration. Referenced from Sections 4 and 5 — only relevant when Godot is the chosen engine. Use the subsection matching the language chosen in Section 4. + +--- + +### A1. CLAUDE.md Technology Stack Templates + +**GDScript:** +```markdown +- **Engine**: Godot [version] +- **Language**: GDScript +- **Build System**: SCons (engine), Godot Export Templates +- **Asset Pipeline**: Godot Import System + custom resource pipeline +``` + +> **Guardrail**: When using this GDScript template, write the Language field as exactly "`GDScript`" — no additions. Do NOT append "C++ via GDExtension" or any other language. The C# template below includes GDExtension because C# projects commonly wrap native code; GDScript projects do not. + +**C#:** +```markdown +- **Engine**: Godot [version] +- **Language**: C# (.NET 8+, primary), C++ via GDExtension (native plugins only) +- **Build System**: .NET SDK + Godot Export Templates +- **Asset Pipeline**: Godot Import System + custom resource pipeline +``` + +**Both — GDScript + C#:** +```markdown +- **Engine**: Godot [version] +- **Language**: GDScript (gameplay/UI scripting), C# (performance-critical systems), C++ via GDExtension (native only) +- **Build System**: .NET SDK + Godot Export Templates +- **Asset Pipeline**: Godot Import System + custom resource pipeline +``` + +--- + +### A2. Naming Conventions + +**GDScript:** +- Classes: PascalCase (e.g., `PlayerController`) +- Variables/functions: snake_case (e.g., `move_speed`) +- Signals: snake_case past tense (e.g., `health_changed`) +- Files: snake_case matching class (e.g., `player_controller.gd`) +- Scenes: PascalCase matching root node (e.g., `PlayerController.tscn`) +- Constants: UPPER_SNAKE_CASE (e.g., `MAX_HEALTH`) + +**C#:** +- Classes: PascalCase (`PlayerController`) — must also be `partial` +- Public properties/fields: PascalCase (`MoveSpeed`, `JumpVelocity`) +- Private fields: `_camelCase` (`_currentHealth`, `_isGrounded`) +- Methods: PascalCase (`TakeDamage()`, `GetCurrentHealth()`) +- Signal delegates: PascalCase + `EventHandler` suffix (`HealthChangedEventHandler`) +- Files: PascalCase matching class (`PlayerController.cs`) +- Scenes: PascalCase matching root node (`PlayerController.tscn`) +- Constants: PascalCase (`MaxHealth`, `DefaultMoveSpeed`) + +**Both — GDScript + C#:** +Use GDScript conventions for `.gd` files and C# conventions for `.cs` files. Mixed-language files do not exist — the boundary is per-file. When in doubt about which language a new system should use, ask the user and record the decision in `technical-preferences.md`. + +--- + +### A3. Engine Specialists Routing + +**GDScript:** +```markdown +## Engine Specialists +- **Primary**: godot-specialist +- **Language/Code Specialist**: godot-gdscript-specialist (all .gd files) +- **Shader Specialist**: godot-shader-specialist (.gdshader files, VisualShader resources) +- **UI Specialist**: godot-specialist (no dedicated UI specialist — primary covers all UI) +- **Additional Specialists**: godot-gdextension-specialist (GDExtension / native C++ bindings only) +- **Routing Notes**: Invoke primary for architecture decisions, ADR validation, and cross-cutting code review. Invoke GDScript specialist for code quality, signal architecture, static typing enforcement, and GDScript idioms. Invoke shader specialist for material design and shader code. Invoke GDExtension specialist only when native extensions are involved. + +### File Extension Routing + +| File Extension / Type | Specialist to Spawn | +|-----------------------|---------------------| +| Game code (.gd files) | godot-gdscript-specialist | +| Shader / material files (.gdshader, VisualShader) | godot-shader-specialist | +| UI / screen files (Control nodes, CanvasLayer) | godot-specialist | +| Scene / prefab / level files (.tscn, .tres) | godot-specialist | +| Native extension / plugin files (.gdextension, C++) | godot-gdextension-specialist | +| General architecture review | godot-specialist | +``` + +**C#:** +```markdown +## Engine Specialists +- **Primary**: godot-specialist +- **Language/Code Specialist**: godot-csharp-specialist (all .cs files) +- **Shader Specialist**: godot-shader-specialist (.gdshader files, VisualShader resources) +- **UI Specialist**: godot-specialist (no dedicated UI specialist — primary covers all UI) +- **Additional Specialists**: godot-gdextension-specialist (GDExtension / native C++ bindings only) +- **Routing Notes**: Invoke primary for architecture decisions, ADR validation, and cross-cutting code review. Invoke C# specialist for code quality, [Signal] delegate patterns, [Export] attributes, .csproj management, and C#-specific Godot idioms. Invoke shader specialist for material design and shader code. Invoke GDExtension specialist only when native C++ plugins are involved. + +### File Extension Routing + +| File Extension / Type | Specialist to Spawn | +|-----------------------|---------------------| +| Game code (.cs files) | godot-csharp-specialist | +| Shader / material files (.gdshader, VisualShader) | godot-shader-specialist | +| UI / screen files (Control nodes, CanvasLayer) | godot-specialist | +| Scene / prefab / level files (.tscn, .tres) | godot-specialist | +| Project config (.csproj, NuGet) | godot-csharp-specialist | +| Native extension / plugin files (.gdextension, C++) | godot-gdextension-specialist | +| General architecture review | godot-specialist | +``` + +**Both — GDScript + C#:** +```markdown +## Engine Specialists +- **Primary**: godot-specialist +- **GDScript Specialist**: godot-gdscript-specialist (.gd files — gameplay/UI scripts) +- **C# Specialist**: godot-csharp-specialist (.cs files — performance-critical systems) +- **Shader Specialist**: godot-shader-specialist (.gdshader files, VisualShader resources) +- **UI Specialist**: godot-specialist (no dedicated UI specialist — primary covers all UI) +- **Additional Specialists**: godot-gdextension-specialist (GDExtension / native C++ bindings only) +- **Routing Notes**: Invoke primary for cross-language architecture decisions and which systems belong in which language. Invoke GDScript specialist for .gd files. Invoke C# specialist for .cs files and .csproj management. Prefer signals over direct cross-language method calls at the boundary. + +### File Extension Routing + +| File Extension / Type | Specialist to Spawn | +|-----------------------|---------------------| +| Game code (.gd files) | godot-gdscript-specialist | +| Game code (.cs files) | godot-csharp-specialist | +| Cross-language boundary decisions | godot-specialist | +| Shader / material files (.gdshader, VisualShader) | godot-shader-specialist | +| UI / screen files (Control nodes, CanvasLayer) | godot-specialist | +| Scene / prefab / level files (.tscn, .tres) | godot-specialist | +| Project config (.csproj, NuGet) | godot-csharp-specialist | +| Native extension / plugin files (.gdextension, C++) | godot-gdextension-specialist | +| General architecture review | godot-specialist | +``` diff --git a/.omc/skills/skill-improve/SKILL.md b/.omc/skills/skill-improve/SKILL.md new file mode 100644 index 0000000..340aee4 --- /dev/null +++ b/.omc/skills/skill-improve/SKILL.md @@ -0,0 +1,144 @@ +--- +name: skill-improve +description: "Improve a skill using a test-fix-retest loop. Runs static checks, proposes targeted fixes, rewrites the skill, re-tests, and keeps or reverts based on score change." +argument-hint: "[skill-name]" +user-invocable: true +allowed-tools: Read, Glob, Grep, Write, Bash +--- + +# Skill Improve + +Runs an improvement loop on a single skill: +test → fix → retest → keep or revert. + +--- + +## Phase 1: Parse Argument + +Read the skill name from the first argument. If missing, output usage and stop: + +``` +Usage: /skill-improve [skill-name] +Example: /skill-improve tech-debt +``` + +Verify `.claude/skills/[name]/SKILL.md` exists. If not, stop with: +"Skill '[name]' not found." + +--- + +## Phase 2: Baseline Test + +Run `/skill-test static [name]` and record the baseline score: +- Count of FAILs +- Count of WARNs +- Which specific checks failed (Check 1–7) + +Display to the user: +``` +Static baseline: [N] failures, [M] warnings +Failing: Check 4 (no ask-before-write), Check 5 (no handoff) +``` + +If baseline is 0 FAILs and 0 WARNs, note it and proceed to Phase 2b. + +### Phase 2b: Category Baseline + +Look up the skill's `category:` field in `CCGS Skill Testing Framework/catalog.yaml`. + +If no `category:` field is found, display: +"Category: not yet assigned — skipping category checks." +and skip to Phase 3. + +If category is found, run `/skill-test category [name]` and record the category baseline: +- Count of FAILs +- Count of WARNs +- Which specific category rubric metrics failed + +Display to the user: +``` +Category baseline: [N] failures, [M] warnings ([category] rubric) +``` + +If BOTH static and category baselines are 0 FAILs and 0 WARNs, stop: +"This skill already passes all static and category checks. No improvements needed." + +--- + +## Phase 3: Diagnose + +Read the full skill file at `.claude/skills/[name]/SKILL.md`. + +For each failing or warning **static** check, identify the exact gap: + +- **Check 1 fail** → which frontmatter field is missing +- **Check 2 fail** → how many phases found vs. minimum required +- **Check 3 fail** → no verdict keywords anywhere in the skill body +- **Check 4 fail** → Write or Edit in allowed-tools but no ask-before-write language +- **Check 5 warn** → no follow-up or next-step section at the end +- **Check 6 warn** → `context: fork` set but fewer than 5 phases found +- **Check 7 warn** → argument-hint is empty or doesn't match documented modes + +For each failing or warning **category** check (if category was assigned in Phase 2b), +identify the exact gap in the skill's text. For example: +- If G2 fails (gate mode, full directors not spawned): skill body never references all 4 + PHASE-GATE director prompts +- If A2 fails (authoring, no per-section May-I-write): skill asks once at the end, not + before each section write +- If T3 fails (team, BLOCKED not surfaced): skill doesn't halt dependent work on blocked agent + +Show the full combined diagnosis to the user before proposing any changes. + +--- + +## Phase 4: Propose Fix + +Write a targeted fix for each failure and warning. Show the proposed changes +as clearly marked before/after blocks. Only change what is failing — do not +rewrite sections that are passing. + +Ask: "May I write this improved version to `.claude/skills/[name]/SKILL.md`?" + +If the user says no, stop here. + +--- + +## Phase 5: Write and Retest + +Record the current content of the skill file (for revert if needed). + +Write the improved skill to `.claude/skills/[name]/SKILL.md`. + +Re-run `/skill-test static [name]` and record the new static score. +If a category was assigned, also re-run `/skill-test category [name]` and record the new category score. + +Display the comparison: +``` +Static: Before [N] failures, [M] warnings → After [N'] failures, [M'] warnings +Category: Before [N] failures, [M] warnings → After [N'] failures, [M'] warnings (if applicable) +Combined change: improved / no change / worse +``` + +--- + +## Phase 6: Verdict + +Count the combined failure total: static FAILs + category FAILs + static WARNs + category WARNs. + +**If combined score improved (combined failure count is lower than baseline):** +Report: "Score improved. Changes kept." +Show a summary of what was fixed in each dimension. + +**If combined score is the same or worse:** +Report: "Combined score did not improve." +Show what changed and why it may not have helped. +Ask: "May I revert `.claude/skills/[name]/SKILL.md` using git checkout?" +If yes: run `git checkout -- .claude/skills/[name]/SKILL.md` + +--- + +## Phase 7: Next Steps + +- Run `/skill-test static all` to find the next skill with failures. +- Run `/skill-improve [next-name]` to continue the loop on another skill. +- Run `/skill-test audit` to see overall coverage progress. diff --git a/.omc/skills/skill-test/SKILL.md b/.omc/skills/skill-test/SKILL.md new file mode 100644 index 0000000..07ba49d --- /dev/null +++ b/.omc/skills/skill-test/SKILL.md @@ -0,0 +1,356 @@ +--- +name: skill-test +description: "Validate skill files for structural compliance and behavioral correctness. Three modes: static (linter), spec (behavioral), audit (coverage report)." +argument-hint: "static [skill-name | all] | spec [skill-name] | category [skill-name | all] | audit" +user-invocable: true +allowed-tools: Read, Glob, Grep, Write +--- + +# Skill Test + +Validates `.claude/skills/*/SKILL.md` files for structural compliance and +behavioral correctness. No external dependencies — runs entirely within the +existing skill/hook/template architecture. + +**Four modes:** + +| Mode | Command | Purpose | Token Cost | +|------|---------|---------|------------| +| `static` | `/skill-test static [name\|all]` | Structural linter — 7 compliance checks per skill | Low (~1k/skill) | +| `spec` | `/skill-test spec [name]` | Behavioral verifier — evaluates assertions in test spec | Medium (~5k/skill) | +| `category` | `/skill-test category [name\|all]` | Category rubric — checks skill against its category-specific metrics | Low (~2k/skill) | +| `audit` | `/skill-test audit` | Coverage report — skills, agent specs, last test dates | Low (~3k total) | + +--- + +## Phase 1: Parse Arguments + +Determine mode from the first argument: + +- `static [name]` → run 7 structural checks on one skill +- `static all` → run 7 structural checks on all skills (Glob `.claude/skills/*/SKILL.md`) +- `spec [name]` → read skill + test spec, evaluate assertions +- `category [name]` → run category-specific rubric from `CCGS Skill Testing Framework/quality-rubric.md` +- `category all` → run category rubric for every skill that has a `category:` in catalog +- `audit` (or no argument) → read catalog, list all skills and agents, show coverage + +If argument is missing or unrecognized, output usage and stop. + +--- + +## Phase 2A: Static Mode — Structural Linter + +For each skill being tested, read its `SKILL.md` fully and run all 7 checks: + +### Check 1 — Required Frontmatter Fields +The file must contain all of these in the YAML frontmatter block: +- `name:` +- `description:` +- `argument-hint:` +- `user-invocable:` +- `allowed-tools:` + +**FAIL** if any are absent. + +### Check 2 — Multiple Phases +The skill must have ≥2 numbered phase headings. Look for patterns like: +- `## Phase N` or `## Phase N:` +- `## N.` (numbered top-level sections) +- At least 2 distinct `##` headings if phases aren't explicitly numbered + +**FAIL** if fewer than 2 phase-like headings are found. + +### Check 3 — Verdict Keywords +The skill must contain at least one of: `PASS`, `FAIL`, `CONCERNS`, `APPROVED`, +`BLOCKED`, `COMPLETE`, `READY`, `COMPLIANT`, `NON-COMPLIANT` + +**FAIL** if none are present. + +### Check 4 — Collaborative Protocol Language +The skill must contain ask-before-write language. Look for: +- `"May I write"` (canonical form) +- `"before writing"` or `"approval"` near file-write instructions +- `"ask"` + `"write"` in close proximity (within same section) + +**WARN** if absent (some read-only skills legitimately skip this). +**FAIL** if `allowed-tools` includes `Write` or `Edit` but no ask-before-write language is found. + +### Check 5 — Next-Step Handoff +The skill must end with a recommended next action or follow-up path. Look for: +- A final section mentioning another skill (e.g., `/story-done`, `/gate-check`) +- "Recommended next" or "next step" phrasing +- A "Follow-Up" or "After this" section + +**WARN** if absent. + +### Check 6 — Fork Context Complexity +If frontmatter contains `context: fork`, the skill should have ≥5 phase headings +(`##` level or numbered Phase N headers). Fork context is for complex multi-phase +skills; simple skills should not use it. + +**WARN** if `context: fork` is set but fewer than 5 phases found. + +### Check 7 — Argument Hint Plausibility +`argument-hint` must be non-empty. If the skill body mentions multiple modes +(e.g., "Mode A | Mode B"), the hint should reflect them. Cross-reference the +hint against the first phase's "Parse Arguments" section. + +**WARN** if hint is `""` or if documented modes don't match hint. + +--- + +### Static Mode Output Format + +For a single skill: +``` +=== Skill Static Check: /[name] === + +Check 1 — Frontmatter Fields: PASS +Check 2 — Multiple Phases: PASS (7 phases found) +Check 3 — Verdict Keywords: PASS (PASS, FAIL, CONCERNS) +Check 4 — Collaborative Protocol: PASS ("May I write" found) +Check 5 — Next-Step Handoff: WARN (no follow-up section found) +Check 6 — Fork Context Complexity: PASS (8 phases, context: fork set) +Check 7 — Argument Hint: PASS + +Verdict: WARNINGS (1 warning, 0 failures) +Recommended: Add a "Follow-Up Actions" section at the end of the skill. +``` + +For `static all`, produce a summary table then list any non-compliant skills: +``` +=== Skill Static Check: All 52 Skills === + +Skill | Result | Issues +-----------------------|--------------|------- +gate-check | COMPLIANT | +design-review | COMPLIANT | +story-readiness | WARNINGS | Check 5: no handoff +... + +Summary: 48 COMPLIANT, 3 WARNINGS, 1 NON-COMPLIANT +Aggregate Verdict: N WARNINGS / N FAILURES +``` + +--- + +## Phase 2B: Spec Mode — Behavioral Verifier + +### Step 1 — Locate Files + +Find skill at `.claude/skills/[name]/SKILL.md`. +Look up the spec path from `CCGS Skill Testing Framework/catalog.yaml` — use the +`spec:` field for the matching skill entry. + +If either is missing: +- Missing skill: "Skill '[name]' not found in `.claude/skills/`." +- Missing spec path in catalog: "No spec path set for '[name]' in catalog.yaml." +- Spec file not found at path: "Spec file missing at [path]. Run `/skill-test audit` + to see coverage gaps." + +### Step 2 — Read Both Files + +Read the skill file and test spec file completely. + +### Step 3 — Evaluate Assertions + +For each **Test Case** in the spec: + +1. Read the **Fixture** description (assumed state of project files) +2. Read the **Expected behavior** steps +3. Read each **Assertion** checkbox + +For each assertion, evaluate whether the skill's written instructions, if +followed correctly given the fixture state, would satisfy it. This is a +Claude-evaluated reasoning check, not code execution. + +Mark each assertion: +- **PASS** — skill instructions clearly satisfy this assertion +- **PARTIAL** — skill instructions partially address it, but with ambiguity +- **FAIL** — skill instructions would NOT satisfy this assertion given the fixture + +For **Protocol Compliance** assertions (always present): +- Check whether the skill requires "May I write" before file writes +- Check whether the skill presents findings before requesting approval +- Check whether the skill ends with a recommended next step +- Check whether the skill avoids auto-creating files without approval + +### Step 4 — Build Report + +``` +=== Skill Spec Test: /[name] === +Date: [date] +Spec: CCGS Skill Testing Framework/skills/[category]/[name].md + +Case 1: [Happy Path — name] + Fixture: [summary] + Assertions: + [PASS] [assertion text] + [FAIL] [assertion text] + Reason: The skill's Phase 3 says "..." but the fixture state means "..." + Case Verdict: FAIL + +Case 2: [Edge Case — name] + ... + Case Verdict: PASS + +Protocol Compliance: + [PASS] Uses "May I write" before file writes + [PASS] Presents findings before asking approval + [WARN] No explicit next-step handoff at end + +Overall Verdict: FAIL (1 case failed, 1 warning) +``` + +### Step 5 — Offer to Write Results + +"May I write these results to `CCGS Skill Testing Framework/results/skill-test-spec-[name]-[date].md` +and update `CCGS Skill Testing Framework/catalog.yaml`?" + +If yes: +- Write results file to `CCGS Skill Testing Framework/results/` +- Update the skill's entry in `CCGS Skill Testing Framework/catalog.yaml`: + - `last_spec: [date]` + - `last_spec_result: PASS|PARTIAL|FAIL` + +--- + +## Phase 2D: Category Mode — Rubric Evaluation + +### Step 1 — Locate Skill and Category + +Find skill at `.claude/skills/[name]/SKILL.md`. +Look up `category:` field in `CCGS Skill Testing Framework/catalog.yaml`. + +If skill not found: "Skill '[name]' not found." +If no `category:` field: "No category assigned for '[name]' in catalog.yaml. +Add `category: [name]` to the skill entry first." + +For `category all`: collect all skills with a `category:` field and process each. +`category: utility` skills are evaluated against U1 (static checks pass) and U2 +(gate mode correct if applicable) only — skip to the static mode for U1. + +### Step 2 — Read Rubric Section + +Read `CCGS Skill Testing Framework/quality-rubric.md`. +Extract the section matching the skill's category (e.g., `### gate`, `### team`). + +### Step 3 — Read Skill + +Read the skill's `SKILL.md` fully. + +### Step 4 — Evaluate Rubric Metrics + +For each metric in the category's rubric table: +1. Check whether the skill's written instructions clearly satisfy the criterion +2. Mark PASS, FAIL, or WARN +3. For FAIL/WARN, identify the exact gap in the skill text (quote the relevant section + or note its absence) + +### Step 5 — Output Report + +``` +=== Skill Category Check: /[name] ([category]) === + +Metric G1 — Review mode read: PASS +Metric G2 — Full mode directors: FAIL + Gap: Phase 3 spawns only CD-PHASE-GATE; TD-PHASE-GATE, PR-PHASE-GATE, AD-PHASE-GATE absent +Metric G3 — Lean mode: PHASE-GATE only: PASS +Metric G4 — Solo mode: no directors: PASS +Metric G5 — No auto-advance: PASS + +Verdict: FAIL (1 failure, 0 warnings) +Fix: Add TD-PHASE-GATE, PR-PHASE-GATE, and AD-PHASE-GATE to the full-mode director + panel in Phase 3. +``` + +### Step 6 — Offer to Update Catalog + +"May I update `CCGS Skill Testing Framework/catalog.yaml` to record this category check +(`last_category`, `last_category_result`) for [name]?" + +--- + +## Phase 2C: Audit Mode — Coverage Report + +### Step 1 — Read Catalog + +Read `CCGS Skill Testing Framework/catalog.yaml`. If missing, note that catalog doesn't exist +yet (first-run state). + +### Step 2 — Enumerate All Skills and Agents + +Glob `.claude/skills/*/SKILL.md` to get the complete list of skills. +Extract skill name from each path (directory name). + +Also read the `agents:` section from `CCGS Skill Testing Framework/catalog.yaml` to get the +complete list of agents. + +### Step 3 — Build Skill Coverage Table + +For each skill: +- Check if a spec file exists (use the `spec:` path from catalog, or glob `CCGS Skill Testing Framework/skills/*/[name].md`) +- Look up `last_static`, `last_static_result`, `last_spec`, `last_spec_result`, + `last_category`, `last_category_result`, `category` from catalog (or mark as + "never" / "—" if not in catalog) +- Priority comes from catalog `priority:` field (critical/high/medium/low) + +### Step 3b — Build Agent Coverage Table + +For each agent in catalog's `agents:` section: +- Check if a spec file exists (use the `spec:` path from catalog, or glob `CCGS Skill Testing Framework/agents/*/[name].md`) +- Look up `last_spec`, `last_spec_result`, `category` from catalog + +### Step 4 — Output Report + +``` +=== Skill Test Coverage Audit === +Date: [date] + +SKILLS (72 total) +Specs written: 72 (100%) | Never static tested: 72 | Never category tested: 72 + +Skill | Cat | Has Spec | Last Static | S.Result | Last Cat | C.Result | Priority +-----------------------|----------|----------|-------------|----------|----------|----------|---------- +gate-check | gate | YES | never | — | never | — | critical +design-review | review | YES | never | — | never | — | critical +... + +AGENTS (49 total) +Agent specs written: 49 (100%) + +Agent | Category | Has Spec | Last Spec | Result +-----------------------|------------|----------|-------------|-------- +creative-director | director | YES | never | — +technical-director | director | YES | never | — +... + +Top 5 Priority Gaps (skills with no spec, critical/high priority): +(none if all specs are written) + +Skill coverage: 72/72 specs (100%) +Agent coverage: 49/49 specs (100%) +``` + +No file writes in audit mode. + +Offer: "Would you like to run `/skill-test static all` to check structural +compliance across all skills? `/skill-test category all` to run category rubric +checks? Or `/skill-test spec [name]` to run a specific behavioral test?" + +--- + +## Phase 3: Recommended Next Steps + +After any mode completes, offer contextual follow-up: + +- After `static [name]`: "Run `/skill-test spec [name]` to validate behavioral + correctness if a test spec exists." +- After `static all` with failures: "Address NON-COMPLIANT skills first. Run + `/skill-test static [name]` individually for detailed remediation guidance." +- After `spec [name]` PASS: "Update `CCGS Skill Testing Framework/catalog.yaml` to record this + pass date. Consider running `/skill-test audit` to find the next spec gap." +- After `spec [name]` FAIL: "Review the failing assertions and update the skill + or the test spec to resolve the mismatch." +- After `audit`: "Start with the critical-priority gaps. Use the spec template + at `CCGS Skill Testing Framework/templates/skill-test-spec.md` to create new specs." diff --git a/.omc/skills/smoke-check/SKILL.md b/.omc/skills/smoke-check/SKILL.md new file mode 100644 index 0000000..6cb1932 --- /dev/null +++ b/.omc/skills/smoke-check/SKILL.md @@ -0,0 +1,417 @@ +--- +name: smoke-check +description: "Run the critical path smoke test gate before QA hand-off. Executes the automated test suite, verifies core functionality, and produces a PASS/FAIL report. Run after a sprint's stories are implemented and before manual QA begins. A failed smoke check means the build is not ready for QA." +argument-hint: "[sprint | quick | --platform pc|console|mobile|all]" +user-invocable: true +allowed-tools: Read, Glob, Grep, Bash, Write, AskUserQuestion +--- + +# Smoke Check + +This skill is the gate between "implementation done" and "ready for QA +hand-off". It runs the automated test suite, checks for test coverage gaps, +batch-verifies critical paths with the developer, and produces a PASS/FAIL +report. + +The rule is simple: **a build that fails smoke check does not go to QA.** +Handing a broken build to QA wastes their time and demoralises the team. + +**Output:** `production/qa/smoke-[date].md` + +--- + +## Parse Arguments + +Arguments can be combined: `/smoke-check sprint --platform console` + +**Base mode** (first argument, default: `sprint`): +- `sprint` — full smoke check against the current sprint's stories +- `quick` — skip coverage scan (Phase 3) and Batch 3; use for rapid re-checks + +**Platform flag** (`--platform`, default: none): +- `--platform pc` — add PC-specific checks (keyboard, mouse, windowed mode) +- `--platform console` — add console-specific checks (gamepad, TV safe zones, + platform certification requirements) +- `--platform mobile` — add mobile-specific checks (touch, portrait/landscape, + battery/thermal behaviour) +- `--platform all` — add all platform variants; output per-platform verdict table + +If `--platform` is provided, Phase 4 adds platform-specific batches and +Phase 5 outputs a per-platform verdict table in addition to the overall verdict. + +--- + +## Phase 1: Detect Test Setup + +Before running anything, understand the environment: + +1. **Test framework check**: verify `tests/` directory exists. + If it does not: "No test directory found at `tests/`. Run `/test-setup` + to scaffold the testing infrastructure, or create the directory manually + if tests live elsewhere." Then stop. + +2. **CI check**: check whether `.github/workflows/` contains a workflow file + referencing tests. Note in the report whether CI is configured. + +3. **Engine detection**: read `.claude/docs/technical-preferences.md` and + extract the `Engine:` value. Store this for test command selection in + Phase 2. + +4. **Smoke test list**: check whether `production/qa/smoke-tests.md` or + `tests/smoke/` exists. If a smoke test list is found, load it for use in + Phase 4. If neither exists, smoke tests will be drawn from the current QA + plan (Phase 4 fallback). + +5. **QA plan check**: glob `production/qa/qa-plan-*.md` and take the most + recently modified file. If found, note the path — it will be used in + Phase 3 and Phase 4. If not found, note: "No QA plan found. Run + `/qa-plan sprint` before smoke-checking for best results." + +Report findings before proceeding: "Environment: [engine]. Test directory: +[found / not found]. CI configured: [yes / no]. QA plan: [path / not found]." + +--- + +## Phase 2: Run Automated Tests + +Attempt to run the test suite via Bash. Select the command based on the engine +detected in Phase 1: + +**Godot 4:** +```bash +godot --headless --script tests/gdunit4_runner.gd 2>&1 +``` +If the GDUnit4 runner script does not exist at that path, try: +```bash +godot --headless -s addons/gdunit4/GdUnitRunner.gd 2>&1 +``` +If neither path exists, note: "GDUnit4 runner not found — confirm the runner +path for your test framework." + +**Unity:** +Unity tests require the editor and cannot be run headlessly via shell in most +environments. Check for recent test result artifacts: +```bash +ls -t test-results/ 2>/dev/null | head -5 +``` +If test result files exist (XML or JSON), read the most recent one and parse +PASS/FAIL counts. If no artifacts exist: "Unity tests must be run from the +editor or CI pipeline. Please confirm test status manually before proceeding." + +**Unreal Engine:** +```bash +ls -t Saved/Logs/ 2>/dev/null | grep -i "test\|automation" | head -5 +``` +If no matching log found: "UE automation tests must be run via the Session +Frontend or CI pipeline. Please confirm test status manually." + +**Unknown engine / not configured:** +"Engine not configured in `.claude/docs/technical-preferences.md`. Run +`/setup-engine` to specify the engine, then re-run `/smoke-check`." + +**If the test runner is not available in this environment** (engine binary not +on PATH, runner script not found, etc.), report clearly: + +"Automated tests could not be executed — engine binary not found on PATH. +Status will be recorded as NOT RUN. Confirm test results from your local IDE +or CI pipeline. Unconfirmed NOT RUN is treated as PASS WITH WARNINGS, not +FAIL — the developer must manually confirm results." + +Do not treat NOT RUN as an automatic FAIL. Record it as a warning. The +developer's manual confirmation in Phase 4 can resolve it. + +Parse runner output and extract: +- Total tests run +- Passing count +- Failing count +- Names of any failing tests (up to 10; if more, note the count) +- Any crash or error output from the runner itself + +--- + +## Phase 3: Check Test Coverage + +Draw the story list from, in priority order: +1. The QA plan found in Phase 1 (its Test Summary table lists expected test + file paths per story) +2. The current sprint plan from `production/sprints/` (most recently modified + file) +3. If the `quick` argument was passed, skip this phase entirely and note: + "Coverage scan skipped — run `/smoke-check sprint` for full coverage + analysis." + +For each story in scope: + +1. Extract the system slug from the story's file path + (e.g., `production/epics/combat/story-001.md` → `combat`) +2. Glob `tests/unit/[system]/` and `tests/integration/[system]/` for files + whose name contains the story slug or a closely related term +3. Check the story file itself for a `Test file:` header field or a + "Test Evidence" section + +Assign a coverage status to each story: + +| Status | Meaning | +|--------|---------| +| **COVERED** | A test file was found matching this story's system and scope | +| **MANUAL** | Story type is Visual/Feel or UI; a test evidence document was found | +| **MISSING** | Logic or Integration story with no matching test file | +| **EXPECTED** | Config/Data story — no test file required; spot-check is sufficient | +| **UNKNOWN** | Story file missing or unreadable | + +MISSING entries are advisory gaps. They do not cause a FAIL verdict but must +appear prominently in the report and must be resolved before `/story-done` can +fully close those stories. + +--- + +## Phase 4: Run Manual Smoke Checks + +Draw the smoke test checklist from, in priority order: +1. The QA plan's "Smoke Test Scope" section (if QA plan was found in Phase 1) +2. `production/qa/smoke-tests.md` (if it exists) +3. `tests/smoke/` directory contents (if it exists) +4. The standard fallback list below (used only when none of the above exist) + +Tailor batches 2 and 3 to the actual systems identified from the sprint or QA +plan. Replace bracketed placeholders with real mechanic names from the current +sprint's stories. + +Use `AskUserQuestion` to batch-verify. Keep to at most 3 calls. + +**Batch 1 — Core stability (always run):** +``` +question: "Smoke check — Batch 1: Core stability. Please verify each:" +options: + - "Game launches to main menu without crash — PASS" + - "Game launches to main menu without crash — FAIL" + - "New game / session starts successfully — PASS" + - "New game / session starts successfully — FAIL" + - "Main menu responds to all inputs — PASS" + - "Main menu responds to all inputs — FAIL" +``` + +**Batch 2 — Sprint mechanic and regression (always run):** +``` +question: "Smoke check — Batch 2: This sprint's changes and regression check:" +options: + - "[Primary mechanic this sprint] — PASS" + - "[Primary mechanic this sprint] — FAIL: [describe what broke]" + - "[Second notable change this sprint, if any] — PASS" + - "[Second notable change this sprint] — FAIL" + - "Previous sprint's features still work (no regressions) — PASS" + - "Previous sprint's features — regression found: [brief description]" +``` + +**Batch 3 — Data integrity and performance (run unless `quick` argument):** +``` +question: "Smoke check — Batch 3: Data integrity and performance:" +options: + - "Save / load completes without data loss — PASS" + - "Save / load — FAIL: [describe what broke]" + - "Save / load — N/A (save system not yet implemented)" + - "No new frame rate drops or hitches observed — PASS" + - "Frame rate drops or hitches found — FAIL: [where]" + - "Performance — not checked in this session" +``` + +Record each response verbatim for the Phase 5 report. + +**Platform Batches** *(run only if `--platform` argument was provided)*: + +**PC platform** (`--platform pc` or `--platform all`): +``` +question: "Smoke check — PC Platform: Verify platform-specific behaviour:" +options: + - "Keyboard controls work correctly across all menus and gameplay — PASS" + - "Keyboard controls — FAIL: [describe issue]" + - "Mouse input and cursor visibility correct in all states — PASS" + - "Mouse input — FAIL: [describe issue]" + - "Windowed and fullscreen modes function without graphical issues — PASS" + - "Windowed/fullscreen — FAIL: [describe issue]" + - "Resolution changes apply correctly — PASS" + - "Resolution changes — FAIL: [describe issue]" +``` + +**Console platform** (`--platform console` or `--platform all`): +``` +question: "Smoke check — Console Platform: Verify platform-specific behaviour:" +options: + - "Gamepad input works correctly for all actions — PASS" + - "Gamepad input — FAIL: [describe issue]" + - "UI fits within TV safe zone margins (no text clipped) — PASS" + - "TV safe zone — FAIL: [describe what is clipped]" + - "No keyboard/mouse-only fallbacks shown to gamepad user — PASS" + - "Input prompt inconsistency — FAIL: [describe]" + - "Game boots correctly from cold start (no prior save) — PASS" + - "Cold start — FAIL: [describe issue]" +``` + +**Mobile platform** (`--platform mobile` or `--platform all`): +``` +question: "Smoke check — Mobile Platform: Verify platform-specific behaviour:" +options: + - "Touch controls work correctly for all primary actions — PASS" + - "Touch controls — FAIL: [describe issue]" + - "Game handles orientation change (portrait ↔ landscape) correctly — PASS" + - "Orientation change — FAIL: [describe what breaks]" + - "Background / foreground transitions (home button) handled gracefully — PASS" + - "Background/foreground — FAIL: [describe issue]" + - "No visible performance issues on target device (no thermal throttling signs) — PASS" + - "Mobile performance — FAIL: [describe issue]" +``` + +--- + +## Phase 5: Generate Report + +Assemble the full smoke check report: + +````markdown +## Smoke Check Report +**Date**: [date] +**Sprint**: [sprint name / number, or "Not identified"] +**Engine**: [engine] +**QA Plan**: [path, or "Not found — run /qa-plan first"] +**Argument**: [sprint | quick | blank] + +--- + +### Automated Tests + +**Status**: [PASS ([N] tests, [N] passing) | FAIL ([N] failures) | +NOT RUN ([reason])] + +[If FAIL, list failing tests:] +- `[test name]` — [brief failure description from runner output] + +[If NOT RUN:] +"Manual confirmation required: did tests pass in your local IDE or CI? This +will determine whether the automated test row contributes to a FAIL verdict." + +--- + +### Test Coverage + +| Story | Type | Test File | Coverage Status | +|-------|------|-----------|----------------| +| [title] | Logic | `tests/unit/[system]/[slug]_test.[ext]` | COVERED | +| [title] | Visual/Feel | `tests/evidence/[slug]-screenshots.md` | MANUAL | +| [title] | Logic | — | MISSING ⚠ | +| [title] | Config/Data | — | EXPECTED | + +**Summary**: [N] covered, [N] manual, [N] missing, [N] expected. + +--- + +### Manual Smoke Checks + +- [x] Game launches without crash — PASS +- [x] New game starts — PASS +- [x] [Core mechanic] — PASS +- [ ] [Other check] — FAIL: [user's description] +- [x] Save / load — PASS +- [-] Performance — not checked this session + +--- + +### Missing Test Evidence + +Stories that must have test evidence before they can be marked COMPLETE via +`/story-done`: + +- **[story title]** (`[path]`) — Logic story has no test file. + Expected location: `tests/unit/[system]/[story-slug]_test.[ext]` + +[If none:] "All Logic and Integration stories have test coverage." + +--- + +### Platform-Specific Results *(only if `--platform` was provided)* + +| Platform | Checks Run | Passed | Failed | Platform Verdict | +|----------|-----------|--------|--------|-----------------| +| PC | [N] | [N] | [N] | PASS / FAIL | +| Console | [N] | [N] | [N] | PASS / FAIL | +| Mobile | [N] | [N] | [N] | PASS / FAIL | + +**Platform notes**: [any platform-specific observations not captured in pass/fail] + +Any platform with one or more FAIL checks contributes to the overall FAIL verdict. + +--- + +### Verdict: [PASS | PASS WITH WARNINGS | FAIL] + +[Verdict rules — first matching rule wins:] + +**FAIL** if ANY of: +- Automated test suite ran and reported one or more test failures +- Any Batch 1 (core stability) check returned FAIL +- Any Batch 2 (primary sprint mechanic or regression check) returned FAIL + +**PASS WITH WARNINGS** if ALL of: +- Automated tests PASS or NOT RUN (developer has not yet confirmed) +- All Batch 1 and Batch 2 smoke checks PASS +- One or more Logic/Integration stories have MISSING test evidence + +**PASS** if ALL of: +- Automated tests PASS +- All smoke checks in all batches PASS or N/A +- No MISSING test evidence entries +```` + +--- + +## Phase 6: Write and Gate + +Present the full report in conversation, then ask: + +"May I write this smoke check report to `production/qa/smoke-[date].md`?" + +Write only after approval. + +After writing, deliver the gate verdict: + +**If verdict is FAIL:** + +"The smoke check failed. Do not hand off to QA until these failures are +resolved: + +[List each failing automated test or smoke check with a one-line description] + +Fix the failures and run `/smoke-check` again to re-gate before QA hand-off." + +**If verdict is PASS WITH WARNINGS:** + +"Smoke check passed with warnings. The build is ready for manual QA. + +Advisory items to resolve before running `/story-done` on affected stories: +[list MISSING test evidence entries] + +QA hand-off: share `production/qa/qa-plan-[sprint].md` with the qa-tester +agent to begin manual verification." + +**If verdict is PASS:** + +"Smoke check passed cleanly. The build is ready for manual QA. + +QA hand-off: share `production/qa/qa-plan-[sprint].md` with the qa-tester +agent to begin manual verification." + +--- + +## Collaborative Protocol + +- **Never treat NOT RUN as automatic FAIL** — record it as NOT RUN and let + the developer confirm status manually. Unconfirmed NOT RUN contributes to + PASS WITH WARNINGS, not FAIL. +- **Never auto-fix failures** — report them and state what must be resolved. + Do not attempt to edit source code or test files. +- **PASS WITH WARNINGS does not block QA hand-off** — it records advisory + gaps for `/story-done` to follow up on. +- **`quick` argument** skips Phase 3 (coverage scan) and Phase 4 Batch 3. + Use it for rapid re-checks after fixing a specific failure. +- Use `AskUserQuestion` for all manual smoke check verification. +- **Never write the report without asking** — Phase 6 requires explicit + approval before any file is created. diff --git a/.omc/skills/soak-test/SKILL.md b/.omc/skills/soak-test/SKILL.md new file mode 100644 index 0000000..389f402 --- /dev/null +++ b/.omc/skills/soak-test/SKILL.md @@ -0,0 +1,283 @@ +--- +name: soak-test +description: "Generate a soak test protocol for extended play sessions. Defines what to observe, measure, and log during long play sessions to surface slow leaks, fatigue effects, and edge cases that only appear after sustained play. Primarily used in Polish and Release phases." +argument-hint: "[duration: 30m | 1h | 2h | 4h] [focus: memory | stability | balance | all]" +user-invocable: true +allowed-tools: Read, Glob, Grep, Write +--- + +# Soak Test + +A soak test (also called an endurance test) is an extended play session run +with specific observation goals. Unlike a smoke check (broad critical path, +~10 min) or a single-feature playtest (~30 min), a soak test runs for **30 +minutes to several hours** to surface: + +- **Memory leaks** — gradual heap growth that only appears after scene transitions +- **Performance drift** — frame time degradation that worsens over time +- **State accumulation bugs** — issues that only appear after N repetitions + of a mechanic (inventory full, score overflow, AI state corruption) +- **Fun fatigue** — mechanics that feel good in a first session but grow + repetitive over extended play +- **Content exhaustion** — the point where players run out of novel content + +**This skill generates the observation protocol and analysis harness — the +human does the actual playing.** + +**Output:** `production/qa/soak-test-[date]-[duration].md` + +**When to run:** +- Polish phase — before `/gate-check release` +- After fixing a memory or stability issue (regression soak) +- When extended play has not been formally tracked + +--- + +## 1. Parse Arguments + +**Duration** (default: `1h`): +- `30m` — short soak; suitable for testing a single mechanic or scene +- `1h` — standard soak; covers most common leak categories +- `2h` — extended soak; recommended for first full Polish soak +- `4h` — deep soak; required for games with long session design (RPGs, sims) + +**Focus** (default: `all`): +- `memory` — focus on heap size, object count, leak patterns +- `stability` — focus on crash/freeze/hang detection +- `balance` — focus on fun fatigue, content exhaustion, difficulty perception +- `all` — all of the above + +--- + +## 2. Load Context + +Read: +- `.claude/docs/technical-preferences.md` — engine (for engine-specific memory + monitoring guidance), performance budgets (memory ceiling, target FPS) +- `design/gdd/game-concept.md` — intended session length (for comparison against + soak duration), core loop description +- Most recent file in `production/playtests/` — prior playtest findings + (to avoid re-documenting known issues) +- Most recent file in `production/qa/qa-plan-*.md` — current sprint test coverage + (to understand what has been formally tested vs. what the soak covers) + +Note any performance budget targets from technical-preferences.md: +- Memory ceiling: [N MB, or "not set"] +- Target FPS: [N, or "not set"] +- Frame budget: [N ms, or "not set"] + +--- + +## 3. Define Observation Checkpoints + +Based on duration, generate timed checkpoints: + +**30m soak**: T+0, T+10, T+20, T+30 +**1h soak**: T+0, T+15, T+30, T+45, T+60 +**2h soak**: T+0, T+20, T+40, T+60, T+80, T+100, T+120 +**4h soak**: T+0, T+30, T+60, T+90, T+120, T+180, T+240 + +At each checkpoint, the observer records the observation items defined in +Phase 4. + +--- + +## 4. Generate the Soak Test Protocol + +### Memory / Stability observation items (if focus = memory or all) + +Engine-specific monitoring guidance: + +**Godot 4:** +- Open Debugger → Monitors tab; track `Memory → Static Memory` and + `Object Count → Objects` across checkpoints +- Record: Static Memory (KB), Object Count, Orphan Nodes count +- Alert threshold: Memory growth > 20% from T+0 after the first 15 minutes + (some growth on load is expected; sustained growth indicates a leak) +- Note: `Performance.get_monitor(Performance.MEMORY_STATIC)` returns bytes + in Godot 4.6 + +**Unity:** +- Open Memory Profiler (Window → Analysis → Memory Profiler) +- Record: Total Reserved Memory (MB), GC Allocated (MB), Object Count at each checkpoint +- Alert threshold: GC Allocated growing monotonically across 3+ checkpoints + +**Unreal Engine:** +- Use `stat memory` console command at each checkpoint +- Record: Physical Memory Used (MB), Physical Memory Available +- Alert threshold: Physical Memory Used growth > 50MB over the full soak + +### Stability observation items (if focus = stability or all) + +At each checkpoint, note: +- [ ] No crash, hang, or freeze occurred since last checkpoint +- [ ] Frame rate still within target budget ([target FPS] fps) +- [ ] Audio still playing correctly (no desync or silence) +- [ ] All HUD elements still rendering correctly +- [ ] Input responding as expected (no input loss or lag spike) + +### Balance / fatigue observation items (if focus = balance or all) + +Collect subjective observations at each checkpoint: +- [ ] Core mechanic still feels rewarding (Y/N) +- [ ] Perceived difficulty level: [too easy / appropriate / too hard] +- [ ] Any "I've seen this before" moments since last checkpoint? (novel content exhaustion) +- [ ] Any moment of frustration since last checkpoint? Note cause. +- [ ] Any moment of peak engagement since last checkpoint? Note cause. + +--- + +## 5. Generate the Protocol Document + +```markdown +# Soak Test Protocol + +> **Date**: [date] +> **Duration**: [duration] +> **Focus**: [memory | stability | balance | all] +> **Engine**: [engine] +> **Generated by**: /soak-test + +--- + +## Pre-Session Setup + +Before starting the soak: + +- [ ] Game is running from a **fresh launch** (not resumed from a prior session) +- [ ] All background applications closed (minimise OS memory interference) +- [ ] Performance monitoring tool open and recording: + - **Godot**: Debugger → Monitors tab → Memory section visible + - **Unity**: Memory Profiler window open + - **Unreal**: `stat memory` ready in console +- [ ] Soak target confirmed: [session design intent from game concept] +- [ ] Prior known issues to watch for: [from most recent playtest / qa-plan] + +--- + +## Baseline (T+0) — Record Before Playing + +| Metric | Baseline Value | +|--------|---------------| +| Memory / Heap | [record before first frame of gameplay] | +| Object Count | [record] | +| FPS (first 30 seconds) | [record] | +| [Engine-specific metric] | [record] | + +--- + +## Checkpoint Log + +### T+[N] minutes + +**Memory / Stability** *(if applicable)*: + +| Metric | Value | Δ from Baseline | Alert? | +|--------|-------|-----------------|--------| +| Memory / Heap | | | | +| Object Count | | | | +| FPS | | | | +| Crashes / Hangs | | | | + +**Stability checks**: +- [ ] No crash or hang since last checkpoint +- [ ] Frame rate within budget ([N] fps target) +- [ ] Audio correct +- [ ] HUD rendering correctly +- [ ] Input responding correctly + +**Balance / Fatigue** *(if applicable)*: +- Core mechanic still rewarding: Y / N +- Difficulty perception: too easy / appropriate / too hard +- Notable moments: [note any peak engagement or frustration] +- Content exhaustion signs: Y / N — [describe] + +**Free observations**: +*(Note anything unexpected observed since the last checkpoint)* + +--- + +[Repeat Checkpoint Log section for each timed checkpoint] + +--- + +## Post-Session Analysis + +### Memory Trend + +| Checkpoint | Memory | Δ/hr extrapolated | +|------------|--------|-------------------| +| T+0 | | | +| [T+N] | | | + +**Leak detected?** Y / N +**Estimated time to OOM at current rate**: [N hours / not applicable] + +### Stability Summary + +Total crashes: [N] +Total hangs: [N] +Worst FPS observed: [N] fps at [checkpoint] +Performance degradation: stable / mild / severe + +### Balance / Fatigue Summary + +Fun curve: [engaged throughout / fatigue onset at T+N / repetitive from start] +Content exhaustion point: [never / at T+N / early] +Difficulty arc: [appropriate / too easy throughout / difficulty spike at T+N] + +### Issues Found + +| ID | Severity | Checkpoint | Description | +|----|----------|------------|-------------| +| SOAK-001 | S[1-4] | T+[N] | [description] | + +--- + +## Verdict: PASS / PASS WITH CONCERNS / FAIL + +**PASS**: No leaks detected, stability maintained, fun factor consistent +**PASS WITH CONCERNS**: Minor drift or fatigue noted; addressable in Polish +**FAIL**: Memory leak confirmed, stability breach, or severe fun fatigue + +--- + +## Sign-Off + +- **Tester**: [name] — [date] +- **QA Lead review**: [name] — [date] +``` + +--- + +## 6. Write Output + +Present the protocol summary in conversation, then ask: + +"May I write this soak test protocol to +`production/qa/soak-test-[date]-[duration].md`?" + +Write only after approval. + +After writing: + +"Protocol written. To run the soak: +1. Open the file and follow the Pre-Session Setup checklist +2. Record each checkpoint as you play +3. Complete the Post-Session Analysis section when done +4. File bugs from 'Issues Found' to `production/qa/bugs/` +5. Run `/bug-triage sprint` after the session to integrate any S1/S2 issues + +If the verdict is FAIL, run `/smoke-check` again after fixing the issues." + +--- + +## Collaborative Protocol + +- **This skill generates a protocol — humans run it** — never attempt to + run a soak test automatically. The observations require a human observer. +- **Duration should match the game's session design** — a 5-minute game + doesn't need a 4h soak; a city-builder might. Use judgment and ask if unclear. +- **First soak should be `all` focus** — narrow focus (memory-only) is for + regression soaks after a specific fix, not the first pass +- **Ask before writing** — always confirm before creating the protocol file diff --git a/.omc/skills/sprint-plan/SKILL.md b/.omc/skills/sprint-plan/SKILL.md new file mode 100644 index 0000000..4cdff74 --- /dev/null +++ b/.omc/skills/sprint-plan/SKILL.md @@ -0,0 +1,228 @@ +--- +name: sprint-plan +description: "Generates a new sprint plan or updates an existing one based on the current milestone, completed work, and available capacity. Pulls context from production documents and design backlogs." +argument-hint: "[new|update|status] [--review full|lean|solo]" +user-invocable: true +allowed-tools: Read, Glob, Grep, Write, Edit, Task, AskUserQuestion +context: | + !ls production/sprints/ 2>/dev/null +--- + +## Phase 0: Parse Arguments + +Extract the mode argument (`new`, `update`, or `status`) and resolve the review mode (once, store for all gate spawns this run): +1. If `--review [full|lean|solo]` was passed → use that +2. Else read `production/review-mode.txt` → use that value +3. Else → default to `lean` + +See `.claude/docs/director-gates.md` for the full check pattern. + +--- + +## Phase 1: Gather Context + +1. **Read the current milestone** from `production/milestones/`. + +2. **Read the previous sprint** (if any) from `production/sprints/` to + understand velocity and carryover. + +3. **Scan design documents** in `design/gdd/` for features tagged as ready + for implementation. + +4. **Check the risk register** at `production/risk-register/`. + +--- + +## Phase 2: Generate Output + +For `new`: + +**Generate a sprint plan** following this format and present it to the user. Do NOT ask to write yet — the producer feasibility gate (Phase 4) runs first and may require revisions before the file is written. + +```markdown +# Sprint [N] -- [Start Date] to [End Date] + +## Sprint Goal +[One sentence describing what this sprint achieves toward the milestone] + +## Capacity +- Total days: [X] +- Buffer (20%): [Y days reserved for unplanned work] +- Available: [Z days] + +## Tasks + +### Must Have (Critical Path) +| ID | Task | Agent/Owner | Est. Days | Dependencies | Acceptance Criteria | +|----|------|-------------|-----------|-------------|-------------------| + +### Should Have +| ID | Task | Agent/Owner | Est. Days | Dependencies | Acceptance Criteria | +|----|------|-------------|-----------|-------------|-------------------| + +### Nice to Have +| ID | Task | Agent/Owner | Est. Days | Dependencies | Acceptance Criteria | +|----|------|-------------|-----------|-------------|-------------------| + +## Carryover from Previous Sprint +| Task | Reason | New Estimate | +|------|--------|-------------| + +## Risks +| Risk | Probability | Impact | Mitigation | +|------|------------|--------|------------| + +## Dependencies on External Factors +- [List any external dependencies] + +## Definition of Done for this Sprint +- [ ] All Must Have tasks completed +- [ ] All tasks pass acceptance criteria +- [ ] QA plan exists (`production/qa/qa-plan-sprint-[N].md`) +- [ ] All Logic/Integration stories have passing unit/integration tests +- [ ] Smoke check passed (`/smoke-check sprint`) +- [ ] QA sign-off report: APPROVED or APPROVED WITH CONDITIONS (`/team-qa sprint`) +- [ ] No S1 or S2 bugs in delivered features +- [ ] Design documents updated for any deviations +- [ ] Code reviewed and merged +``` + +For `status`: + +**Generate a status report**: + +```markdown +# Sprint [N] Status -- [Date] + +## Progress: [X/Y tasks complete] ([Z%]) + +### Completed +| Task | Completed By | Notes | +|------|-------------|-------| + +### In Progress +| Task | Owner | % Done | Blockers | +|------|-------|--------|----------| + +### Not Started +| Task | Owner | At Risk? | Notes | +|------|-------|----------|-------| + +### Blocked +| Task | Blocker | Owner of Blocker | ETA | +|------|---------|-----------------|-----| + +## Burndown Assessment +[On track / Behind / Ahead] +[If behind: What is being cut or deferred] + +## Emerging Risks +- [Any new risks identified this sprint] +``` + +--- + +## Phase 3: Write Sprint Status File + +After generating a new sprint plan, also write `production/sprint-status.yaml`. +This is the machine-readable source of truth for story status — read by +`/sprint-status`, `/story-done`, and `/help` without markdown parsing. + +Ask: "May I also write `production/sprint-status.yaml` to track story status?" + +Format: + +```yaml +# Auto-generated by /sprint-plan. Updated by /story-done. +# DO NOT edit manually — use /story-done to update story status. + +sprint: [N] +goal: "[sprint goal]" +start: "[YYYY-MM-DD]" +end: "[YYYY-MM-DD]" +generated: "[YYYY-MM-DD]" +updated: "[YYYY-MM-DD]" + +stories: + - id: "[epic-story, e.g. 1-1]" + name: "[story name]" + file: "[production/stories/path.md]" + priority: must-have # must-have | should-have | nice-to-have + status: ready-for-dev # backlog | ready-for-dev | in-progress | review | done | blocked + owner: "" + estimate_days: 0 + blocker: "" + completed: "" +``` + +Initialize each story from the sprint plan's task tables: +- Must Have tasks → `priority: must-have`, `status: ready-for-dev` +- Should Have tasks → `priority: should-have`, `status: backlog` +- Nice to Have tasks → `priority: nice-to-have`, `status: backlog` + +For `update`: read the existing `sprint-status.yaml`, carry over statuses for +stories that haven't changed, add new stories, remove dropped ones. + +--- + +## Phase 4: Producer Feasibility Gate + +**Review mode check** — apply before spawning PR-SPRINT: +- `solo` → skip. Note: "PR-SPRINT skipped — Solo mode." Proceed to Phase 5 (QA plan gate). +- `lean` → skip (not a PHASE-GATE). Note: "PR-SPRINT skipped — Lean mode." Proceed to Phase 5 (QA plan gate). +- `full` → spawn as normal. + +Before finalising the sprint plan, spawn `producer` via Task using gate **PR-SPRINT** (`.claude/docs/director-gates.md`). + +Pass: proposed story list (titles, estimates, dependencies), total team capacity in hours/days, any carryover from the previous sprint, milestone constraints and deadline. + +Present the producer's assessment. If UNREALISTIC, revise the story selection (defer stories to Should Have or Nice to Have) before asking for write approval. If CONCERNS, surface them and let the user decide whether to adjust. + +After handling the producer's verdict, ask: "May I write this sprint plan to `production/sprints/sprint-[N].md`?" If yes, write the file, creating the directory if needed. Verdict: **COMPLETE** — sprint plan created. If no: Verdict: **BLOCKED** — user declined write. + +After writing, add: + +> **Scope check:** If this sprint includes stories added beyond the original epic scope, run `/scope-check [epic]` to detect scope creep before implementation begins. + +--- + +## Phase 5: QA Plan Gate + +Before closing the sprint plan, check whether a QA plan exists for this sprint. + +Use `Glob` to look for `production/qa/qa-plan-sprint-[N].md` or any file in `production/qa/` referencing this sprint number. + +**If a QA plan is found**: note it in the sprint plan output — "QA Plan: `[path]`" — and proceed. + +**If no QA plan exists**: do not silently proceed. Surface this explicitly: + +> "This sprint has no QA plan. A sprint plan without a QA plan means test requirements are undefined — developers won't know what 'done' looks like from a QA perspective, and the sprint cannot pass the Production → Polish gate without one. +> +> Run `/qa-plan sprint` now, before starting any implementation. It takes one session and produces the test case requirements each story needs." + +Use `AskUserQuestion`: +- Prompt: "No QA plan found for this sprint. How do you want to proceed?" +- Options: + - `[A] Run /qa-plan sprint now — I'll do that before starting implementation (Recommended)` + - `[B] Skip for now — I understand QA sign-off will be blocked at the Production → Polish gate` + +If [A]: close with "Sprint plan written. Run `/qa-plan sprint` next — then begin implementation." +If [B]: add a warning block to the sprint plan document: + +```markdown +> ⚠️ **No QA Plan**: This sprint was started without a QA plan. Run `/qa-plan sprint` +> before the last story is implemented. The Production → Polish gate requires a QA +> sign-off report, which requires a QA plan. +``` + +--- + +## Phase 6: Next Steps + +After the sprint plan is written and QA plan status is resolved: + +- `/qa-plan sprint` — **required before implementation begins** — defines test cases per story so developers implement against QA specs, not a blank slate +- `/story-readiness [story-file]` — validate a story is ready before starting it +- `/dev-story [story-file]` — begin implementing the first story +- `/sprint-status` — check progress mid-sprint +- `/scope-check [epic]` — verify no scope creep before implementation begins diff --git a/.omc/skills/sprint-status/SKILL.md b/.omc/skills/sprint-status/SKILL.md new file mode 100644 index 0000000..ccd4b2c --- /dev/null +++ b/.omc/skills/sprint-status/SKILL.md @@ -0,0 +1,208 @@ +--- +name: sprint-status +description: "Fast sprint status check. Reads the current sprint plan, scans story files for status, and produces a concise progress snapshot with burndown assessment and emerging risks. Run at any time during a sprint for quick situational awareness. Use when user asks 'how is the sprint going', 'sprint update', 'show sprint progress'." +argument-hint: "[sprint-number or blank for current]" +user-invocable: true +allowed-tools: Read, Glob, Grep +model: haiku +--- + +# Sprint Status + +This is a fast situational awareness check, not a sprint review. It reads the +current sprint plan and story files, scans for status markers, and produces a +concise snapshot in under 30 lines. For detailed sprint management, use +`/sprint-plan update` or `/milestone-review`. + +**This skill is read-only.** It never proposes changes, never asks to write +files, and makes at most one concrete recommendation. + +--- + +## 1. Find the Sprint + +**Argument:** `$ARGUMENTS[0]` (blank = use current sprint) + +- If an argument is given (e.g., `/sprint-status 3`), search + `production/sprints/` for a file matching `sprint-03.md`, `sprint-3.md`, + or similar. Report which file was found. +- If no argument is given, find the most recently modified file in + `production/sprints/` and treat it as the current sprint. +- If `production/sprints/` does not exist or is empty, report: "No sprint + files found. Start a sprint with `/sprint-plan new`." Then stop. + +Read the sprint file in full. Extract: +- Sprint number and goal +- Start date and end date +- All story or task entries with their priority (Must Have / Should Have / + Nice to Have), owner, and estimate + +--- + +## 2. Calculate Days Remaining + +Using today's date and the sprint end date from the sprint file, calculate: +- Total sprint days (end minus start) +- Days elapsed +- Days remaining +- Percentage of time consumed + +If the sprint file does not include explicit dates, note "Sprint dates not +found — burndown assessment skipped." + +--- + +## 3. Scan Story Status + +**First: check for `production/sprint-status.yaml`.** + +If it exists, read it directly — it is the authoritative source of truth. +Extract status for each story from the `status` field. No markdown scanning needed. +Use its `sprint`, `goal`, `start`, `end` fields instead of re-parsing the sprint plan. + +**If `sprint-status.yaml` does not exist** (legacy sprint or first-time setup), +fall back to markdown scanning: + +1. If the entry references a story file path, check if the file exists. + Read the file and scan for status markers: DONE, COMPLETE, IN PROGRESS, + BLOCKED, NOT STARTED (case-insensitive). +2. If the entry has no file path (inline task in the sprint plan), scan the + sprint plan itself for status markers next to that entry. +3. If no status marker is found, classify as NOT STARTED. +4. If a file is referenced but does not exist, classify as MISSING and note it. + +When using the fallback, add a note at the bottom of the output: +"⚠ No `sprint-status.yaml` found — status inferred from markdown. Run `/sprint-plan update` to generate one." + +Optionally (fast check only — do not do a deep scan): grep `src/` for a +directory or file name that matches the story's system slug to check for +implementation evidence. This is a hint only, not a definitive status. + +### Stale Story Detection + +After collecting status for all stories, check each IN PROGRESS story for staleness: + +- For each story that has a referenced file, read the file and look for a + `Last Updated:` field in the frontmatter or header (e.g., `Last Updated: 2026-04-01` + or `updated: 2026-04-01`). Accept any reasonable date field name: `Last Updated`, + `Updated`, `last-updated`, `updated_at`. +- Calculate days since that date using today's date. +- If the date is more than 2 days ago, flag the story as **STALE**. +- If no date field is found in the story file, note "no timestamp — cannot check staleness." +- If the story has no referenced file (inline task), note "inline task — cannot check staleness." + +STALE stories are included in the output table and collected into an "Attention Needed" +section (see Phase 5 output format). + +**Stale story escalation**: If any IN PROGRESS story is flagged STALE, the burndown verdict +is upgraded to at least **At Risk** — even if the completion percentage is within the normal +On Track window. Record this escalation reason: "At Risk — [N] story(ies) with no progress in +[N] days." + +--- + +## 4. Burndown Assessment + +Calculate: +- Tasks complete (DONE or COMPLETE) +- Tasks in progress (IN PROGRESS) +- Tasks blocked (BLOCKED) +- Tasks not started (NOT STARTED or MISSING) +- Completion percentage: (complete / total) * 100 + +Assess burndown by comparing completion percentage to time consumed percentage: + +- **On Track**: completion % is within 10 points of time consumed % or ahead +- **At Risk**: completion % is 10-25 points behind time consumed % +- **Behind**: completion % is more than 25 points behind time consumed % + +If dates are unavailable, skip the burndown assessment and report "On Track / +At Risk / Behind: unknown — sprint dates not found." + +--- + +## 5. Output + +Keep the total output to 30 lines or fewer. Use this format: + +```markdown +## Sprint [N] Status — [Today's Date] +**Sprint Goal**: [from sprint plan] +**Days Remaining**: [N] of [total] ([% time consumed]) + +### Progress: [complete/total] tasks ([%]) + +| Story / Task | Priority | Status | Owner | Blocker | +|----------------------|------------|-------------|---------|----------------| +| [title] | Must Have | DONE | [owner] | | +| [title] | Must Have | IN PROGRESS | [owner] | | +| [title] | Must Have | BLOCKED | [owner] | [brief reason] | +| [title] | Should Have| NOT STARTED | [owner] | | + +### Attention Needed +| Story / Task | Status | Last Updated | Days Stale | Note | +|----------------------|-------------|----------------|------------|----------------| +| [title] | IN PROGRESS | [date or N/A] | [N days] | [STALE / no timestamp — cannot check staleness / inline task — cannot check staleness] | + +*(Omit this section entirely if no IN PROGRESS stories are stale or have timestamp concerns.)* + +### Burndown: [On Track / At Risk / Behind] +[1-2 sentences. If behind: which Must Haves are at risk. If on track: confirm +and note any Should Haves the team could pull.] + +### Must-Haves at Risk +[List any Must Have stories that are BLOCKED or NOT STARTED with less than +40% of sprint time remaining. If none, write "None."] + +### Emerging Risks +[Any risks visible from the story scan: missing files, cascading blockers, +stories with no owner. If none, write "None identified."] + +### Recommendation +[One concrete action, or "Sprint is on track — no action needed."] +``` + +--- + +## 6. Fast Escalation Rules + +Apply these rules before outputting, and place the flag at the TOP of the +output if triggered (above the status table): + +**Critical flag** — if Must Have stories are BLOCKED or NOT STARTED and +less than 40% of the sprint time remains: + +``` +SPRINT AT RISK: [N] Must Have stories are not complete with [X]% of sprint +time remaining. Recommend replanning with `/sprint-plan update`. +``` + +**Completion flag** — if all Must Have stories are DONE: + +``` +All Must Haves complete. Team can pull from Should Have backlog. +``` + +**Missing stories flag** — if any referenced story files do not exist: + +``` +NOTE: [N] story files referenced in the sprint plan are missing. +Run `/story-readiness sprint` to validate story file coverage. +``` + +--- + +## Collaborative Protocol + +This skill is read-only. It reports observed facts from files on disk. + +- It does not update the sprint plan +- It does not change story status +- It does not propose scope cuts (that is `/sprint-plan update`) +- It makes at most one recommendation per run + +For more detail on a specific story, the user can read the story file directly +or run `/story-readiness [path]`. + +For sprint replanning, use `/sprint-plan update`. +For end-of-sprint retrospective, use `/milestone-review`. diff --git a/.omc/skills/start/SKILL.md b/.omc/skills/start/SKILL.md new file mode 100644 index 0000000..9e4ce7e --- /dev/null +++ b/.omc/skills/start/SKILL.md @@ -0,0 +1,225 @@ +--- +name: start +description: "First-time onboarding — asks where you are, then guides you to the right workflow. No assumptions." +argument-hint: "[no arguments]" +user-invocable: true +allowed-tools: Read, Glob, Grep, Write, AskUserQuestion +--- + +# Guided Onboarding + +This skill writes one file: `production/review-mode.txt` (review mode config set in Phase 3b). + +This skill is the entry point for new users. It does NOT assume you have a game idea, an engine preference, or any prior experience. It asks first, then routes you to the right workflow. + +--- + +## Phase 1: Detect Project State + +Before asking anything, silently gather context so you can tailor your guidance. Do NOT show these results unprompted — they inform your recommendations, not the conversation opener. + +Check: +- **Engine configured?** Read `.claude/docs/technical-preferences.md`. If the Engine field contains `[TO BE CONFIGURED]`, the engine is not set. +- **Game concept exists?** Check for `design/gdd/game-concept.md`. +- **Source code exists?** Glob for source files in `src/` (`*.gd`, `*.cs`, `*.cpp`, `*.h`, `*.rs`, `*.py`, `*.js`, `*.ts`). +- **Prototypes exist?** Check for subdirectories in `prototypes/`. +- **Design docs exist?** Count markdown files in `design/gdd/`. +- **Production artifacts?** Check for files in `production/sprints/` or `production/milestones/`. + +Store these findings internally to validate the user's self-assessment and tailor recommendations. + +--- + +## Phase 2: Ask Where the User Is + +This is the first thing the user sees. Use `AskUserQuestion` with these exact options so the user can click rather than type: + +- **Prompt**: "Welcome to Claude Code Game Studios! Before I suggest anything, I'd like to understand where you're starting from. Where are you at with your game idea right now?" +- **Options**: + - `A) No idea yet` — I don't have a game concept at all. I want to explore and figure out what to make. + - `B) Vague idea` — I have a rough theme, feeling, or genre in mind (e.g., "something with space" or "a cozy farming game") but nothing concrete. + - `C) Clear concept` — I know the core idea — genre, basic mechanics, maybe a pitch sentence — but haven't formalized it into documents yet. + - `D) Existing work` — I already have design docs, prototypes, code, or significant planning done. I want to organize or continue the work. + +Wait for the user's selection. Do not proceed until they respond. + +--- + +## Phase 3: Route Based on Answer + +#### If A: No idea yet + +The user needs creative exploration before anything else. + +1. Acknowledge that starting from zero is completely fine +2. Briefly explain what `/brainstorm` does (guided ideation using professional frameworks — MDA, player psychology, verb-first design). Mention that it has two modes: `/brainstorm open` for fully open exploration, or `/brainstorm [hint]` if they have even a vague theme (e.g., "space", "cozy", "horror"). +3. Recommend running `/brainstorm open` as the next step, but invite them to use a hint if something comes to mind +4. Show the recommended path: + **Concept phase:** + - `/brainstorm open` — discover your game concept + - `/setup-engine` — configure the engine (brainstorm will recommend one) + - `/art-bible` — define visual identity (uses the Visual Identity Anchor brainstorm produces) + - `/map-systems` — decompose the concept into systems + - `/design-system` — author a GDD for each MVP system + - `/review-all-gdds` — cross-system consistency check + - `/gate-check` — validate readiness before architecture work + **Architecture phase:** + - `/create-architecture` — produce the master architecture blueprint and Required ADR list + - `/architecture-decision (×N)` — record key technical decisions, following the Required ADR list + - `/create-control-manifest` — compile decisions into an actionable rules sheet + - `/architecture-review` — validate architecture coverage + **Pre-Production phase:** + - `/ux-design` — author UX specs for key screens (main menu, HUD, core interactions) + - `/prototype` — build a throwaway prototype to validate the core mechanic + - `/playtest-report (×1+)` — document each vertical slice playtest session + - `/create-epics` — map systems to epics + - `/create-stories` — break epics into implementable stories + - `/sprint-plan` — plan the first sprint + **Production phase:** → pick up stories with `/dev-story` + +#### If B: Vague idea + +1. Ask them to share their vague idea — even a few words is enough +2. Validate the idea as a starting point (don't judge or redirect) +3. Recommend running `/brainstorm [their hint]` to develop it +4. Show the recommended path: + **Concept phase:** + - `/brainstorm [hint]` — develop the idea into a full concept + - `/setup-engine` — configure the engine + - `/art-bible` — define visual identity (uses the Visual Identity Anchor brainstorm produces) + - `/map-systems` — decompose the concept into systems + - `/design-system` — author a GDD for each MVP system + - `/review-all-gdds` — cross-system consistency check + - `/gate-check` — validate readiness before architecture work + **Architecture phase:** + - `/create-architecture` — produce the master architecture blueprint and Required ADR list + - `/architecture-decision (×N)` — record key technical decisions, following the Required ADR list + - `/create-control-manifest` — compile decisions into an actionable rules sheet + - `/architecture-review` — validate architecture coverage + **Pre-Production phase:** + - `/ux-design` — author UX specs for key screens (main menu, HUD, core interactions) + - `/prototype` — build a throwaway prototype to validate the core mechanic + - `/playtest-report (×1+)` — document each vertical slice playtest session + - `/create-epics` — map systems to epics + - `/create-stories` — break epics into implementable stories + - `/sprint-plan` — plan the first sprint + **Production phase:** → pick up stories with `/dev-story` + +#### If C: Clear concept + +1. Ask them to describe their concept in one sentence — genre and core mechanic. Use plain text, not AskUserQuestion (it's an open response). +2. Acknowledge the concept, then use `AskUserQuestion` to offer two paths: + - **Prompt**: "How would you like to proceed?" + - **Options**: + - `Formalize it first` — Run `/brainstorm [concept]` to structure it into a proper game concept document + - `Jump straight in` — Go to `/setup-engine` now and write the GDD manually afterward +3. Show the recommended path: + **Concept phase:** + - `/brainstorm` or `/setup-engine` — (their pick from step 2) + - `/art-bible` — define visual identity (after brainstorm if run, or after concept doc exists) + - `/design-review` — validate the concept doc + - `/map-systems` — decompose the concept into individual systems + - `/design-system` — author a GDD for each MVP system + - `/review-all-gdds` — cross-system consistency check + - `/gate-check` — validate readiness before architecture work + **Architecture phase:** + - `/create-architecture` — produce the master architecture blueprint and Required ADR list + - `/architecture-decision (×N)` — record key technical decisions, following the Required ADR list + - `/create-control-manifest` — compile decisions into an actionable rules sheet + - `/architecture-review` — validate architecture coverage + **Pre-Production phase:** + - `/ux-design` — author UX specs for key screens (main menu, HUD, core interactions) + - `/prototype` — build a throwaway prototype to validate the core mechanic + - `/playtest-report (×1+)` — document each vertical slice playtest session + - `/create-epics` — map systems to epics + - `/create-stories` — break epics into implementable stories + - `/sprint-plan` — plan the first sprint + **Production phase:** → pick up stories with `/dev-story` + +#### If D: Existing work + +1. Share what you found in Phase 1: + - "I can see you have [X source files / Y design docs / Z prototypes]..." + - "Your engine is [configured as X / not yet configured]..." + +2. **Sub-case D1 — Early stage** (engine not configured or only a game concept exists): + - Recommend `/setup-engine` first if engine not configured + - Then `/project-stage-detect` for a gap inventory + + **Sub-case D2 — GDDs, ADRs, or stories already exist:** + - Explain: "Having files isn't the same as the template's skills being able to use them. GDDs might be missing required sections. `/adopt` checks this specifically." + - Recommend: + 1. `/project-stage-detect` — understand what phase and what's missing entirely + 2. `/adopt` — audit whether existing artifacts are in the right internal format + +3. Show the recommended path for D2: + - `/project-stage-detect` — phase detection + existence gaps + - `/adopt` — format compliance audit + migration plan + - `/setup-engine` — if engine not configured + - `/design-system retrofit [path]` — fill missing GDD sections + - `/architecture-decision retrofit [path]` — add missing ADR sections + - `/architecture-review` — bootstrap the TR requirement registry + - `/gate-check` — validate readiness for next phase + +--- + +## Phase 3b: Set Review Mode + +Check if `production/review-mode.txt` already exists. + +**If it exists**: Read it and show the current mode — "Review mode is set to `[current]`." — then proceed to Phase 4. Do not ask again. + +**If it does not exist**: Use `AskUserQuestion`: + +- **Prompt**: "One setup choice: how much design review would you want as you work through the workflow?" +- **Options**: + - `Full` — Director specialists review at each key workflow step. Best for teams, learning the workflow, or when you want thorough feedback on every decision. + - `Lean (recommended)` — Directors only at phase gate transitions (/gate-check). Skips per-skill reviews. Balanced approach for solo devs and small teams. + - `Solo` — No director reviews at all. Maximum speed. Best for game jams, prototypes, or if the reviews feel like overhead. + +Write the choice to `production/review-mode.txt` immediately after the user +selects — no separate "May I write?" needed, as the write is a direct +consequence of the selection: +- `Full` → write `full` +- `Lean (recommended)` → write `lean` +- `Solo` → write `solo` + +Create the `production/` directory if it does not exist. + +--- + +## Phase 4: Confirm Before Proceeding + +After presenting the recommended path, use `AskUserQuestion` to ask the user which step they'd like to take first. Never auto-run the next skill. + +- **Prompt**: "Would you like to start with [recommended first step]?" +- **Options**: + - `Yes, let's start with [recommended first step]` + - `I'd like to do something else first` + +--- + +## Phase 5: Hand Off + +When the user confirms their next step, respond with a single short line: "Type `[skill command]` to begin." Nothing else. Do not re-explain the skill or add encouragement. The `/start` skill's job is done. + +Verdict: **COMPLETE** — user oriented and handed off to next step. + +--- + +## Edge Cases + +- **User picks D but project is empty**: Gently redirect — "It looks like the project is a fresh template with no artifacts yet. Would Path A or B be a better fit?" +- **User picks A but project has code**: Mention what you found — "I noticed there's already code in `src/`. Did you mean to pick D (existing work)?" +- **User is returning (engine configured, concept exists)**: Skip onboarding entirely — "It looks like you're already set up! Your engine is [X] and you have a game concept at `design/gdd/game-concept.md`. Review mode: `[read from production/review-mode.txt, or 'lean (default)' if missing]`. Want to pick up where you left off? Try `/sprint-plan` or just tell me what you'd like to work on." +- **User doesn't fit any option**: Let them describe their situation in their own words and adapt. + +--- + +## Collaborative Protocol + +1. **Ask first** — never assume the user's state or intent +2. **Present options** — give clear paths, not mandates +3. **User decides** — they pick the direction +4. **No auto-execution** — recommend the next skill, don't run it without asking +5. **Adapt** — if the user's situation doesn't fit a template, listen and adjust diff --git a/.omc/skills/story-done/SKILL.md b/.omc/skills/story-done/SKILL.md new file mode 100644 index 0000000..b067f1d --- /dev/null +++ b/.omc/skills/story-done/SKILL.md @@ -0,0 +1,428 @@ +--- +name: story-done +description: "End-of-story completion review. Reads the story file, verifies each acceptance criterion against the implementation, checks for GDD/ADR deviations, prompts code review, updates story status to Complete, and surfaces the next ready story from the sprint." +argument-hint: "[story-file-path] [--review full|lean|solo]" +user-invocable: true +allowed-tools: Read, Glob, Grep, Bash, Write, Edit, AskUserQuestion, Task +--- + +# Story Done + +This skill closes the loop between design and implementation. Run it at the end +of implementing any story. It ensures every acceptance criterion is verified +before the story is marked done, GDD and ADR deviations are explicitly +documented rather than silently introduced, code review is prompted rather than +forgotten, and the story file reflects actual completion status. + +**Output:** Updated story file (Status: Complete) + surfaced next story. + +--- + +## Phase 1: Find the Story + +Resolve the review mode (once, store for all gate spawns this run): +1. If `--review [full|lean|solo]` was passed → use that +2. Else read `production/review-mode.txt` → use that value +3. Else → default to `lean` + +See `.claude/docs/director-gates.md` for the full check pattern. + +**If a file path is provided** (e.g., `/story-done production/epics/core/story-damage-calculator.md`): +read that file directly. + +**If no argument is provided:** + +1. Check `production/session-state/active.md` for the currently active story. +2. If not found there, read the most recent file in `production/sprints/` and + look for stories marked IN PROGRESS. +3. If multiple in-progress stories are found, use `AskUserQuestion`: + - "Which story are we completing?" + - Options: list the in-progress story file names. +4. If no story can be found, ask the user to provide the path. + +--- + +## Phase 2: Read the Story + +Read the full story file. Extract and hold in context: + +- **Story name and ID** +- **GDD Requirement TR-ID(s)** referenced (e.g., `TR-combat-001`) +- **Manifest Version** embedded in the story header (e.g., `2026-03-10`) +- **ADR reference(s)** referenced +- **Acceptance Criteria** — the complete list (every checkbox item) +- **Implementation files** — files listed under "files to create/modify" +- **Story Type** — the `Type:` field from the story header (Logic / Integration / Visual/Feel / UI / Config/Data) +- **Engine notes** — any engine-specific constraints noted +- **Definition of Done** — if present, the story-level DoD +- **Estimated vs actual scope** — if an estimate was noted + +Also read: +- `docs/architecture/tr-registry.yaml` — look up each TR-ID in the story. + Read the *current* `requirement` text from the registry entry. This is the + source of truth for what the GDD required — do not use any requirement text + that may be quoted inline in the story (it may be stale). +- The referenced GDD section — just the acceptance criteria and key rules, not + the full document. Use this to cross-check the registry text is still accurate. +- The referenced ADR(s) — just the Decision and Consequences sections +- `docs/architecture/control-manifest.md` header — extract the current + `Manifest Version:` date (used in Phase 4 staleness check) + +--- + +## Phase 3: Verify Acceptance Criteria + +For each acceptance criterion in the story, attempt verification using one of +three methods: + +### Automatic verification (run without asking) + +- **File existence check**: `Glob` for files the story said would be created. +- **Test pass check**: if a test file path is mentioned, run it via `Bash`. +- **No hardcoded values check**: `Grep` for numeric literals in gameplay code + paths that should be in config files. +- **No hardcoded strings check**: `Grep` for player-facing strings in `src/` + that should be in localization files. +- **Dependency check**: if a criterion says "depends on X", check that X exists. + +### Manual verification with confirmation (use `AskUserQuestion`) + +- Criteria about subjective qualities ("feels responsive", "animations play correctly") +- Criteria about gameplay behaviour ("player takes damage when...", "enemy responds to...") +- Performance criteria ("completes within Xms") — ask if profiled or accept as assumed + +Batch up to 4 manual verification questions into a single `AskUserQuestion` call: + +``` +question: "Does [criterion]?" +options: "Yes — passes", "No — fails", "Not tested yet" +``` + +### Unverifiable (flag without blocking) + +- Criteria that require a full game build to test (end-to-end gameplay scenarios) +- Mark as: `DEFERRED — requires playtest session` + +### Test-Criterion Traceability + +After completing the pass/fail/deferred check above, map each acceptance +criterion to the test that covers it: + +For each acceptance criterion in the story: + +1. Ask: is there a test — unit, integration, or confirmed manual playtest — that + directly verifies this criterion? + - **Unit test**: check `tests/unit/` for a test file or function name that + matches the criterion's subject (use `Glob` and `Grep`) + - **Integration test**: check `tests/integration/` similarly + - **Manual confirmation**: if the criterion was verified via `AskUserQuestion` + above with a "Yes — passes" answer, count that as a manual test + +2. Produce a traceability table: + +``` +| Criterion | Test | Status | +|-----------|------|--------| +| AC-1: [criterion text] | tests/unit/test_foo.gd::test_bar | COVERED | +| AC-2: [criterion text] | Manual playtest confirmation | COVERED | +| AC-3: [criterion text] | — | UNTESTED | +``` + +3. Apply these escalation rules: + + - If **>50% of criteria are UNTESTED**: escalate to **BLOCKING** — test + coverage is insufficient to confirm the story is actually done. The verdict + in Phase 6 cannot be COMPLETE until coverage improves. + - If **some (≤50%) criteria are UNTESTED**: remain ADVISORY — does not block + completion, but must appear in Completion Notes. + - If **all criteria are COVERED**: no action needed beyond including the + table in the report. + +4. For any ADVISORY untested criteria, add to the Completion Notes in Phase 7: + `"Untested criteria: [AC-N list]. Recommend adding tests in a follow-up story."` + +### Test Evidence Requirement + +Based on the Story Type extracted in Phase 2, check for required evidence: + +| Story Type | Required Evidence | Gate Level | +|---|---|---| +| **Logic** | Automated unit test in `tests/unit/[system]/` — must exist and pass | BLOCKING | +| **Integration** | Integration test in `tests/integration/[system]/` OR playtest doc | BLOCKING | +| **Visual/Feel** | Screenshot + sign-off in `production/qa/evidence/` | ADVISORY | +| **UI** | Manual walkthrough doc OR interaction test in `production/qa/evidence/` | ADVISORY | +| **Config/Data** | Smoke check pass report in `production/qa/smoke-*.md` | ADVISORY | + +**For Logic stories**: first read the story's **Test Evidence** section to extract the +exact required file path. Use `Glob` to check that exact path. If the exact path is not +found, also search `tests/unit/[system]/` broadly (the file may have been placed at a +slightly different location). If no test file is found at either location: +- Flag as **BLOCKING**: "Logic story has no unit test file. Story requires it at + `[exact-path-from-Test-Evidence-section]`. Create and run the test before marking + this story Complete." + +**For Integration stories**: read the story's **Test Evidence** section for the exact +required path. Use `Glob` to check that exact path first, then search +`tests/integration/[system]/` broadly, then check `production/session-logs/` for a +playtest record referencing this story. +If none found: flag as **BLOCKING** (same rule as Logic). + +**For Visual/Feel and UI stories**: glob `production/qa/evidence/` for a file +referencing this story. If none: flag as **ADVISORY** — +"No manual test evidence found. Create `production/qa/evidence/[story-slug]-evidence.md` +using the test-evidence template and obtain sign-off before final closure." + +**For Config/Data stories**: check for any `production/qa/smoke-*.md` file. +If none: flag as **ADVISORY** — "No smoke check report found. Run `/smoke-check`." + +**If no Story Type is set**: flag as **ADVISORY** — +"Story Type not declared. Add `Type: [Logic|Integration|Visual/Feel|UI|Config/Data]` +to the story header to enable test evidence gate enforcement in future stories." + +Any BLOCKING test evidence gap prevents the COMPLETE verdict in Phase 6. + +--- + +## Phase 4: Check for Deviations + +Compare the implementation against the design documents. + +Run these checks automatically: + +1. **GDD rules check**: Using the current requirement text from `tr-registry.yaml` + (looked up by the story's TR-ID), check that the implementation reflects what + the GDD actually requires now — not what it required when the story was written. + `Grep` the implemented files for key function names, data structures, or class + names mentioned in the current GDD section. + +2. **Manifest version staleness check**: Compare the `Manifest Version:` date + embedded in the story header against the `Manifest Version:` date in the + current `docs/architecture/control-manifest.md` header. + - If they match → pass silently. + - If the story's version is older → flag as ADVISORY: + `ADVISORY: Story was written against manifest v[story-date]; current manifest + is v[current-date]. New rules may apply. Run /story-readiness to check.` + - If control-manifest.md does not exist → skip this check. + +3. **ADR constraints check**: Read the referenced ADR's Decision section. Check + for forbidden patterns from `docs/architecture/control-manifest.md` (if it + exists). `Grep` for patterns explicitly forbidden in the ADR. + +4. **Hardcoded values check**: `Grep` the implemented files for numeric literals + in gameplay logic that should be in data files. + +5. **Scope check**: Did the implementation touch files outside the story's stated + scope? (files not listed in "files to create/modify") + +For each deviation found, categorize: + +- **BLOCKING** — implementation contradicts the GDD or ADR (must fix before + marking complete) +- **ADVISORY** — implementation drifts slightly from spec but is functionally + equivalent (document, user decides) +- **OUT OF SCOPE** — additional files were touched beyond the story's stated + boundary (flag for awareness — may be valid or scope creep) + +--- + +## Phase 4b: QA Coverage Gate + +**Review mode check** — apply before spawning QL-TEST-COVERAGE: +- `solo` → skip. Note: "QL-TEST-COVERAGE skipped — Solo mode." Proceed to Phase 5. +- `lean` → skip (not a PHASE-GATE). Note: "QL-TEST-COVERAGE skipped — Lean mode." Proceed to Phase 5. +- `full` → spawn as normal. + +After completing the deviation checks in Phase 4, spawn `qa-lead` via Task using gate **QL-TEST-COVERAGE** (`.claude/docs/director-gates.md`). + +Pass: +- The story file path and story type +- Test file paths found during Phase 3 (exact paths, or "none found") +- The story's `## QA Test Cases` section (the pre-written test specs from story creation) +- The story's `## Acceptance Criteria` list + +The qa-lead reviews whether the tests actually cover what was specified — not just whether files exist. + +Apply the verdict: +- **ADEQUATE** → proceed to Phase 5 +- **GAPS** → flag as **ADVISORY**: "QA lead identified coverage gaps: [list]. Story can complete but gaps should be addressed in a follow-up story." +- **INADEQUATE** → flag as **BLOCKING**: "QA lead: critical logic is untested. Verdict cannot be COMPLETE until coverage improves. Specific gaps: [list]." + +Skip this phase for Config/Data stories (no code tests required). + +--- + +## Phase 5: Lead Programmer Code Review Gate + +**Review mode check** — apply before spawning LP-CODE-REVIEW: +- `solo` → skip. Note: "LP-CODE-REVIEW skipped — Solo mode." Proceed to Phase 6 (completion report). +- `lean` → skip (not a PHASE-GATE). Note: "LP-CODE-REVIEW skipped — Lean mode." Proceed to Phase 6 (completion report). +- `full` → spawn as normal. + +Spawn `lead-programmer` via Task using gate **LP-CODE-REVIEW** (`.claude/docs/director-gates.md`). + +Pass: implementation file paths, story file path, relevant GDD section, governing ADR. + +Present the verdict to the user. If CONCERNS, surface them via `AskUserQuestion`: +- Options: `Revise flagged issues` / `Accept and proceed` / `Discuss further` +If REJECT, do not proceed to Phase 6 verdict until the issues are resolved. + +If the story has no implementation files yet (verdict is being run before coding is done), skip this phase and note: "LP-CODE-REVIEW skipped — no implementation files found. Run after implementation is complete." + +--- + +## Phase 6: Present the Completion Report + +Before updating any files, present the full report: + +```markdown +## Story Done: [Story Name] +**Story**: [file path] +**Date**: [today] + +### Acceptance Criteria: [X/Y passing] +- [x] [Criterion 1] — auto-verified (test passes) +- [x] [Criterion 2] — confirmed +- [ ] [Criterion 3] — FAILS: [reason] +- [?] [Criterion 4] — DEFERRED: requires playtest + +### Test-Criterion Traceability +| Criterion | Test | Status | +|-----------|------|--------| +| AC-1: [text] | [test file::test name] | COVERED | +| AC-2: [text] | Manual confirmation | COVERED | +| AC-3: [text] | — | UNTESTED | + +### Test Evidence +**Story Type**: [Logic | Integration | Visual/Feel | UI | Config/Data | Not declared] +**Required evidence**: [unit test file | integration test or playtest | screenshot + sign-off | walkthrough doc | smoke check pass] +**Evidence found**: [YES — `[path]` | NO — BLOCKING | NO — ADVISORY] + +### Deviations +[NONE] OR: +- BLOCKING: [description] — [GDD/ADR reference] +- ADVISORY: [description] — user accepted / flagged for tech debt + +### Scope +[All changes within stated scope] OR: +- Extra files touched: [list] — [note whether valid or scope creep] + +### Verdict: COMPLETE / COMPLETE WITH NOTES / BLOCKED +``` + +**Verdict definitions:** +- **COMPLETE**: all criteria pass, no blocking deviations +- **COMPLETE WITH NOTES**: all criteria pass, advisory deviations documented +- **BLOCKED**: failing criteria or blocking deviations must be resolved first + +If the verdict is **BLOCKED**: do not proceed to Phase 7. List what must be +fixed. Offer to help fix the blocking items. + +--- + +## Phase 7: Update Story Status + +Ask before writing: "May I update the story file to mark it Complete and log +the completion notes?" + +If yes, edit the story file: + +1. Update the status field: `Status: Complete` +2. Add a `## Completion Notes` section at the bottom: + +```markdown +## Completion Notes +**Completed**: [date] +**Criteria**: [X/Y passing] ([any deferred items listed]) +**Deviations**: [None] or [list of advisory deviations] +**Test Evidence**: [Logic: test file at path | Visual/Feel: evidence doc at path | None required (Config/Data)] +**Code Review**: [Pending / Complete / Skipped] +``` + +3. If advisory deviations exist, ask: "Should I log these as tech debt in + `docs/tech-debt-register.md`?" + +4. **Update `production/sprint-status.yaml`** (if it exists): + - Find the entry matching this story's file path or ID + - Set `status: done` and `completed: [today's date]` + - Update the top-level `updated` field + - This is a silent update — no extra approval needed (already approved in step above) + +### Session State Update + +After updating the story file, silently append to +`production/session-state/active.md`: + + ## Session Extract — /story-done [date] + - Verdict: [COMPLETE / COMPLETE WITH NOTES / BLOCKED] + - Story: [story file path] — [story title] + - Tech debt logged: [N items, or "None"] + - Next recommended: [next ready story title and path, or "None identified"] + +If `active.md` does not exist, create it with this block as the initial content. +Confirm in conversation: "Session state updated." + +--- + +## Phase 8: Surface the Next Story + +After completion, help the developer keep momentum: + +1. Read the current sprint plan from `production/sprints/`. +2. Find stories that are: + - Status: READY or NOT STARTED + - Not blocked by other incomplete stories + - In the Must Have or Should Have tier + +Present: + +``` +### Next Up +The following stories are ready to pick up: +1. [Story name] — [1-line description] — Est: [X hrs] +2. [Story name] — [1-line description] — Est: [X hrs] + +Run `/story-readiness [path]` to confirm a story is implementation-ready +before starting. +``` + +If no more Must Have stories remain in this sprint (all are Complete or Blocked): + +``` +### Sprint Close-Out Sequence + +All Must Have stories are complete. QA sign-off is required before advancing. +Run these in order: + +1. `/smoke-check sprint` — verify the critical path still works end-to-end +2. `/team-qa sprint` — full QA cycle: test case execution, bug triage, sign-off report +3. `/gate-check` — advance to the next phase once QA approves + +Do not run `/gate-check` until `/team-qa` returns APPROVED or APPROVED WITH CONDITIONS. +``` + +If there are Should Have stories still unstarted, surface them alongside the close-out sequence so the user can choose: close the sprint now, or pull in more work first. + +If no more stories are ready but Must Have stories are still In Progress (not Complete): +"No more stories ready to start — [N] Must Have stories still in progress. Continue implementing those before sprint close-out." + +--- + +## Collaborative Protocol + +- **Never mark a story complete without user approval** — Phase 7 requires an + explicit "yes" before any file is edited. +- **Never auto-fix failing criteria** — report them and ask what to do. +- **Deviations are facts, not judgments** — present them neutrally; the user + decides if they are acceptable. +- **BLOCKED verdict is advisory** — the user can override and mark complete + anyway; document the risk explicitly if they do. +- Use `AskUserQuestion` for the code review prompt and for batching manual + criteria confirmations. + +--- + +## Recommended Next Steps + +- Run `/story-readiness [next-story-path]` to validate the next story before starting implementation +- If all Must Have stories are complete: run `/smoke-check sprint` → `/team-qa sprint` → `/gate-check` +- If tech debt was logged: track it via `/tech-debt` to keep the register current diff --git a/.omc/skills/story-readiness/SKILL.md b/.omc/skills/story-readiness/SKILL.md new file mode 100644 index 0000000..5390f68 --- /dev/null +++ b/.omc/skills/story-readiness/SKILL.md @@ -0,0 +1,348 @@ +--- +name: story-readiness +description: "Validate that a story file is implementation-ready. Checks for embedded GDD requirements, ADR references, engine notes, clear acceptance criteria, and no open design questions. Produces READY / NEEDS WORK / BLOCKED verdict with specific gaps. Use when user says 'is this story ready', 'can I start on this story', 'is story X ready to implement'." +argument-hint: "[story-file-path or 'all' or 'sprint']" +user-invocable: true +allowed-tools: Read, Glob, Grep, AskUserQuestion, Task +model: haiku +--- + +# Story Readiness + +This skill validates that a story file contains everything a developer needs +to begin implementation — no mid-sprint design interruptions, no guessing, +no ambiguous acceptance criteria. Run it before assigning a story. + +**This skill is read-only.** It never edits story files. It reports findings +and asks whether the user wants help filling gaps. + +**Output:** Verdict per story (READY / NEEDS WORK / BLOCKED) with a specific +gap list for each non-ready story. + +--- + +## Phase 0: Resolve Review Mode + +Resolve the review mode once at startup (store for all gate spawns this run): + +1. If skill was called with `--review [full|lean|solo]` → use that value +2. Else read `production/review-mode.txt` → use that value +3. Else → default to `lean` + +See `.claude/docs/director-gates.md` for the full check pattern and mode definitions. + +--- + +## 1. Parse Arguments + +**Scope:** `$ARGUMENTS[0]` (blank = ask user via AskUserQuestion) + +- **Specific path** (e.g., `/story-readiness production/epics/combat/story-001-basic-attack.md`): + validate that single story file. +- **`sprint`**: read the current sprint plan from `production/sprints/` (most + recent file), extract every story path it references, validate each one. +- **`all`**: glob `production/epics/**/*.md`, exclude `EPIC.md` index files, + validate every story file found. +- **No argument**: ask the user which scope to validate. + +If no argument is given, use `AskUserQuestion`: +- "What would you like to validate?" + - Options: "A specific story file", "All stories in the current sprint", + "All stories in production/epics/", "Stories for a specific epic" + +Report the scope before proceeding: "Validating [N] story files." + +--- + +## 2. Load Supporting Context + +Before checking any stories, load reference documents once (not per-story): + +- `design/gdd/systems-index.md` — to know which systems have approved GDDs +- `docs/architecture/control-manifest.md` — to know which manifest rules exist + (if the file does not exist, note it as missing once; do not re-flag per story) + Also extract the `Manifest Version:` date from the header block if the file exists. +- `docs/architecture/tr-registry.yaml` — index all entries by `id`. Used to + validate TR-IDs in stories. If the file does not exist, note it once; TR-ID + checks will auto-pass for all stories (registry predates stories, so missing + registry means stories are from before TR tracking was introduced). +- All ADR status fields — for each unique ADR referenced across the stories being + checked, read the ADR file and note its `Status:` field. Cache these so you + don't re-read the same ADR for every story. +- The current sprint file (if scope is `sprint`) — to identify Must Have / + Should Have priority for escalation decisions + +--- + +## 3. Story Readiness Checklist + +For each story file, evaluate every item below. A story is READY only if all +items pass or are explicitly marked N/A with a stated reason. + +### Design Completeness + +- [ ] **GDD requirement referenced**: The story includes a `design/gdd/` path + and quotes or links a specific requirement, acceptance criterion, or rule from + that GDD — not just the GDD filename. A link to the document without tracing + to a specific requirement does not pass. +- [ ] **Requirement is self-contained**: The acceptance criteria in the story + are understandable without opening the GDD. A developer should not need to + read a separate document to understand what DONE means. +- [ ] **Acceptance criteria are testable**: Each criterion is a specific, + observable condition — not "implement X" or "the system works correctly". + Bad example: "Implement the jump mechanic." Good example: "Jump reaches + max height of 5 units within 0.3 seconds when jump is held." +- [ ] **No acceptance criteria require judgment calls**: Criteria like + "feels responsive" or "looks good" are not testable without a defined + benchmark. These must be replaced with specific observable conditions or + playtest protocols. + +### Architecture Completeness + +- [ ] **ADR referenced or N/A stated**: The story references at least one ADR, + OR explicitly states "No ADR applies" with a brief reason. + A story with no ADR reference and no explicit N/A note fails this check. +- [ ] **ADR is Accepted (not Proposed)**: For each referenced ADR, check its + `Status:` field using the cached ADR statuses loaded in Section 2. + - If `Status: Accepted` → pass. + - If `Status: Proposed` → **BLOCKED**: the ADR may change before it is accepted, + and the story's implementation guidance could be wrong. + Fix: `BLOCKED: ADR-NNNN is Proposed — wait for acceptance before implementing.` + - If the ADR file does not exist → **BLOCKED**: referenced ADR is missing. + - Auto-pass if story has an explicit "No ADR applies" N/A note. +- [ ] **TR-ID is valid and active**: If the story contains a `TR-[system]-NNN` + reference, look it up in the TR registry loaded in Section 2. + - If the ID exists and `status: active` → pass. + - If the ID exists and `status: deprecated` or `status: superseded-by: ...` → + NEEDS WORK: the requirement was removed or replaced. + Fix: update the story to reference the current requirement ID or remove if no longer applicable. + - If the ID does not exist in the registry → NEEDS WORK: ID was not registered + (story may predate registry, or registry needs an `/architecture-review` run). + - Auto-pass if the story has no TR-ID reference OR if the registry does not exist. +- [ ] **Manifest version is current**: If the story has a `Manifest Version:` date + in its header AND `docs/architecture/control-manifest.md` exists: + - If story version matches current manifest `Manifest Version:` → pass. + - If story version is older than current manifest → NEEDS WORK: new rules may + apply. Fix: review changed manifest rules, update story if any forbidden/required + entries changed, then update the story's `Manifest Version:` to current. + - Auto-pass if either the story has no `Manifest Version:` field OR the manifest + does not exist. +- [ ] **Engine notes present**: For any post-cutoff engine API this story + is likely to touch, implementation notes or a verification requirement are + included. If the story clearly does not touch engine APIs (e.g., it is a + pure data/config change), "N/A — no engine API involved" is acceptable. +- [ ] **Control manifest rules noted**: Relevant layer rules from the control + manifest are referenced, OR "N/A — manifest not yet created" is stated. + This item auto-passes if `docs/architecture/control-manifest.md` does not + exist yet (do not penalize stories written before the manifest was created). + +### Scope Clarity + +- [ ] **Estimate present**: The story includes a size estimate (hours, + points, or a t-shirt size). A story with no estimate cannot be planned. +- [ ] **In-scope / Out-of-scope boundary stated**: The story states what + it does NOT include, either in an explicit Out of Scope section or in + language that makes the boundary unambiguous. Without this, scope creep + during implementation is likely. +- [ ] **Story dependencies listed**: If this story depends on other stories + being DONE first, those story IDs are listed. If there are no dependencies, + "None" is explicitly stated (not just omitted). + +### Open Questions + +- [ ] **No unresolved design questions**: The story does not contain text + flagged as "UNRESOLVED", "TBD", "TODO", "?", or equivalent markers in + any acceptance criterion, implementation note, or rule statement. +- [ ] **Dependency stories are not in DRAFT**: For each story listed as a + dependency, check if the file exists and does not have a DRAFT status. A + story that depends on a DRAFT or missing story is BLOCKED, not just + NEEDS WORK. + +### Asset References Check + +- [ ] **Referenced assets exist**: Scan the story text for asset path patterns + (paths containing `assets/`, or file extensions `.png`, `.jpg`, `.svg`, + `.wav`, `.ogg`, `.mp3`, `.glb`, `.gltf`, `.tres`, `.tscn`, `.res`). + - For each asset path found: use Glob to check whether the file exists. + - If any referenced asset does not exist: **NEEDS WORK** — note the missing + path(s). (The story references assets that have not been created yet. + Either remove the reference, create a placeholder, or mark it as an + explicit dependency on an asset creation story.) + - If all referenced assets exist: note "Referenced assets verified: + [count] found." + - If no asset paths are referenced in the story: note "No asset references + found in story — skipping asset check." This item auto-passes. + - This is an existence-only check. Do not validate file format or content. + +### Definition of Done + +- [ ] **At least 3 testable acceptance criteria**: Fewer than 3 suggests + the story is either trivially small (should it be a story?) or under-specified. +- [ ] **Performance budget noted if applicable**: If this story touches any + part of the gameplay loop, rendering, or physics, a performance budget or + a "no performance impact expected — [reason]" note is present. +- [ ] **Story Type declared**: The story includes a `Type:` field in its header + identifying the test category (Logic / Integration / Visual/Feel / UI / Config/Data). + Without this, test evidence requirements cannot be enforced at story close. + Fix: Add `Type: [Logic|Integration|Visual/Feel|UI|Config/Data]` to the story header. +- [ ] **Test evidence requirement is clear**: If the Story Type is set, the story + includes a `## Test Evidence` section stating where evidence will be stored + (test file path for Logic/Integration, or evidence doc path for Visual/Feel/UI). + Fix: Add `## Test Evidence` with the expected evidence location for the story's type. + +--- + +## 4. Verdict Assignment + +Assign one of three verdicts per story: + +**READY** — All checklist items pass or have explicit N/A justifications. +The story can be assigned immediately. + +**NEEDS WORK** — One or more checklist items fail, but all dependency stories +exist and are not DRAFT. The story can be fixed before assignment. + +**BLOCKED** — One or more dependency stories are missing or in DRAFT state, +OR a critical design question (flagged UNRESOLVED in a criterion or rule) has +no owner. The story cannot be assigned until the blocker is resolved. Note: +a story that is BLOCKED may also have NEEDS WORK items — list both. + +--- + +## 5. Output Format + +### Single story output + +``` +## Story Readiness: [story title] +File: [path] +Verdict: [READY / NEEDS WORK / BLOCKED] + +### Passing Checks (N/[total]) +[list passing items briefly] + +### Gaps +- [Checklist item]: [exact description of what is missing or wrong] + Fix: [specific text needed to resolve this gap] + +### Blockers (if BLOCKED) +- [What is blocking]: [story ID or design question that must resolve first] +``` + +### Multiple story aggregate output + +``` +## Story Readiness Summary — [scope] — [date] + +Ready: [N] stories +Needs Work: [N] stories +Blocked: [N] stories + +### Ready Stories +- [story title] ([path]) + +### Needs Work +- [story title]: [primary gap — one line] +- [story title]: [primary gap — one line] + +### Blocked Stories +- [story title]: Blocked by [story ID / design question] + +--- +[Full detail for each non-ready story follows, using the single-story format] +``` + +### Sprint escalation + +If the scope is `sprint` and any Must Have stories are NEEDS WORK or BLOCKED, +add a prominent warning at the top of the output: + +``` +WARNING: [N] Must Have stories are not implementation-ready. +[List them with their primary gap or blocker.] +Resolve these before the sprint begins or replan with `/sprint-plan update`. +``` + +--- + +## 6. Collaborative Protocol + +This skill is read-only. It never proposes edits or asks to write files. + +After reporting findings, offer: + +"Would you like help filling in the gaps for any of these stories? I can +draft the missing sections for your approval." + +If the user says yes for a specific story, draft only the missing sections +in conversation. Do not use Write or Edit tools — the user (or +`/create-stories`) handles writing. + +**Redirect rules:** +- If a story file does not exist at all: "This story file is missing entirely. + Run `/create-epics [layer]` then `/create-stories [epic-slug]` to generate stories from the GDD and ADR." +- If a story has no GDD reference and the work appears small: "This story has + no GDD reference. If the change is small (under ~4 hours), run + `/quick-design [description]` to create a Quick Design Spec, then reference + that spec in the story." +- If a story's scope has grown beyond its original sizing: "This story appears + to have expanded in scope. Consider splitting it or escalating to the producer + before implementation begins." + +--- + +## 7. Next-Story Handoff + +After completing a single-story readiness check (not `all` or `sprint` scope): + +1. Read the current sprint file from `production/sprints/` (most recent). +2. Find stories that are: + - Status: READY or NOT STARTED + - Not the story just checked + - Not blocked by incomplete dependencies + - In the Must Have or Should Have tier + +If any are found, surface up to 3: + +``` +### Other Ready Stories in This Sprint + +1. [Story name] — [1-line description] — Est: [X hrs] +2. [Story name] — [1-line description] — Est: [X hrs] + +Run `/story-readiness [path]` to validate before starting. +``` + +If no sprint file exists or no other ready stories are found, skip this section silently. + +--- + +## Phase 8: Director Gate — Story Readiness Review + +Apply the review mode resolved in Phase 0 before spawning QL-STORY-READY: + +- `solo` → skip. Note: "QL-STORY-READY skipped — Solo mode." Proceed to close. +- `lean` → skip. Note: "QL-STORY-READY skipped — Lean mode." Proceed to close. +- `full` → spawn as normal. + +Spawn `qa-lead` via Task using gate **QL-STORY-READY** (`.claude/docs/director-gates.md`). + +Pass the following context: +- Story title +- Acceptance criteria list (all items from the story's acceptance criteria section) +- Dependency status (all dependencies listed and their current state: exist / DRAFT / missing) +- Overall verdict (READY / NEEDS WORK / BLOCKED) from Phase 4 + +Handle the verdict per standard rules in `director-gates.md`: +- **ADEQUATE** → story is cleared. Proceed to close. +- **GAPS [list]** → surface the specific gaps to the user via `AskUserQuestion`: + options: `Update story with suggested gaps` / `Accept and proceed anyway` / `Discuss further`. +- **INADEQUATE** → surface the specific gaps; ask user whether to update the story or proceed anyway. + +--- + +## Recommended Next Steps + +- Run `/dev-story [story-path]` to begin implementation once the story is READY +- Run `/story-readiness sprint` to check all stories in the current sprint at once +- Run `/create-stories [epic-slug]` if a story file is missing entirely diff --git a/.omc/skills/team-audio/SKILL.md b/.omc/skills/team-audio/SKILL.md new file mode 100644 index 0000000..f719864 --- /dev/null +++ b/.omc/skills/team-audio/SKILL.md @@ -0,0 +1,129 @@ +--- +name: team-audio +description: "Orchestrate audio team: audio-director + sound-designer + technical-artist + gameplay-programmer for full audio pipeline from direction to implementation." +argument-hint: "[feature or area to design audio for]" +user-invocable: true +allowed-tools: Read, Glob, Grep, Write, Edit, Bash, Task, AskUserQuestion, TodoWrite +--- + +If no argument is provided, output usage guidance and exit without spawning any agents: +> Usage: `/team-audio [feature or area]` — specify the feature or area to design audio for (e.g., `combat`, `main menu`, `forest biome`, `boss encounter`). Do not use `AskUserQuestion` here; output the guidance directly. + +When this skill is invoked with an argument, orchestrate the audio team through a structured pipeline. + +**Decision Points:** At each step transition, use `AskUserQuestion` to present +the user with the subagent's proposals as selectable options. Write the agent's +full analysis in conversation, then capture the decision with concise labels. +The user must approve before moving to the next step. + +1. **Read the argument** for the target feature or area (e.g., `combat`, + `main menu`, `forest biome`, `boss encounter`). + +2. **Gather context**: + - Read relevant design docs in `design/gdd/` for the feature + - Read the sound bible at `design/gdd/sound-bible.md` if it exists + - Read existing audio asset lists in `assets/audio/` + - Read any existing sound design docs for this area + +## How to Delegate + +Use the Task tool to spawn each team member as a subagent: +- `subagent_type: audio-director` — Sonic identity, emotional tone, audio palette +- `subagent_type: sound-designer` — SFX specifications, audio events, mixing groups +- `subagent_type: technical-artist` — Audio middleware, bus structure, memory budgets +- `subagent_type: [primary engine specialist]` — Validate audio integration patterns for the engine +- `subagent_type: gameplay-programmer` — Audio manager, gameplay triggers, adaptive music + +Always provide full context in each agent's prompt (feature description, existing audio assets, design doc references). + +3. **Orchestrate the audio team** in sequence: + +### Step 1: Audio Direction (audio-director) +Spawn the `audio-director` agent to: +- Define the sonic identity for this feature/area +- Specify the emotional tone and audio palette +- Set music direction (adaptive layers, stems, transitions) +- Define audio priorities and mix targets +- Establish any adaptive audio rules (combat intensity, exploration, tension) + +### Step 2: Sound Design and Audio Accessibility (parallel) +Spawn the `sound-designer` agent to: +- Create detailed SFX specifications for every audio event +- Define sound categories (ambient, UI, gameplay, music, dialogue) +- Specify per-sound parameters (volume range, pitch variation, attenuation) +- Plan audio event list with trigger conditions +- Define mixing groups and ducking rules + +Spawn the `accessibility-specialist` agent in parallel to: +- Identify which audio events carry critical gameplay information (damage received, enemy nearby, objective complete) and require visual alternatives for hearing-impaired players +- Specify subtitle requirements: which audio events need captions, what text format, on-screen duration +- Check that no gameplay state is communicated by audio alone (all must have a visual fallback) +- Review the audio event list for any that could cause issues for players with auditory sensitivities (high-frequency alerts, sudden loud events) +- Output: audio accessibility requirements list integrated into the audio event spec + +### Step 3: Technical Implementation (parallel) +Spawn the `technical-artist` agent to: +- Design the audio middleware integration (Wwise/FMOD/native) +- Define audio bus structure and routing +- Specify memory budgets for audio assets per platform +- Plan streaming vs preloaded asset strategy +- Design any audio-reactive visual effects + +Spawn the **primary engine specialist** in parallel (from `.claude/docs/technical-preferences.md` Engine Specialists) to validate the integration approach: +- Is the proposed audio middleware integration idiomatic for the engine? (e.g., Godot's built-in AudioStreamPlayer vs FMOD, Unity's Audio Mixer vs Wwise, Unreal's MetaSounds vs FMOD) +- Any engine-specific audio node/component patterns that should be used? +- Known audio system changes in the pinned engine version that affect the integration plan? +- Output: engine audio integration notes to merge with the technical-artist's plan + +If no engine is configured, skip the specialist spawn. + +### Step 4: Code Integration (gameplay-programmer) +Spawn the `gameplay-programmer` agent to: +- Implement audio manager system or review existing +- Wire up audio events to gameplay triggers +- Implement adaptive music system (if specified) +- Set up audio occlusion/reverb zones +- Write unit tests for audio event triggers + +4. **Compile the audio design document** combining all team outputs. + +5. **Save to** `design/gdd/audio-[feature].md`. + +6. **Output a summary** with: audio event count, estimated asset count, + implementation tasks, and any open questions between team members. + +Verdict: **COMPLETE** — audio design document produced and team pipeline finished. + +If the pipeline stops because a dependency is unresolved (e.g., critical accessibility gap or missing GDD not resolved by the user): + +Verdict: **BLOCKED** — [reason] + +## File Write Protocol + +All file writes (audio design docs, SFX specs, implementation files) are delegated +to sub-agents spawned via Task. Each sub-agent enforces the "May I write to [path]?" +protocol. This orchestrator does not write files directly. + +## Next Steps + +- Review the audio design doc with the audio-director before implementation begins. +- Use `/dev-story` to implement the audio manager and event system once the design is approved. +- Run `/asset-audit` after audio assets are created to verify naming and format compliance. + +## Error Recovery Protocol + +If any spawned agent (via Task) returns BLOCKED, errors, or cannot complete: + +1. **Surface immediately**: Report "[AgentName]: BLOCKED — [reason]" to the user before continuing to dependent phases +2. **Assess dependencies**: Check whether the blocked agent's output is required by subsequent phases. If yes, do not proceed past that dependency point without user input. +3. **Offer options** via AskUserQuestion with choices: + - Skip this agent and note the gap in the final report + - Retry with narrower scope + - Stop here and resolve the blocker first +4. **Always produce a partial report** — output whatever was completed. Never discard work because one agent blocked. + +Common blockers: +- Input file missing (story not found, GDD absent) → redirect to the skill that creates it +- ADR status is Proposed → do not implement; run `/architecture-decision` first +- Scope too large → split into two stories via `/create-stories` +- Conflicting instructions between ADR and story → surface the conflict, do not guess diff --git a/.omc/skills/team-combat/SKILL.md b/.omc/skills/team-combat/SKILL.md new file mode 100644 index 0000000..f08bcb6 --- /dev/null +++ b/.omc/skills/team-combat/SKILL.md @@ -0,0 +1,120 @@ +--- +name: team-combat +description: "Orchestrate the combat team: coordinates game-designer, gameplay-programmer, ai-programmer, technical-artist, sound-designer, and qa-tester to design, implement, and validate a combat feature end-to-end." +argument-hint: "[combat feature description]" +user-invocable: true +allowed-tools: Read, Glob, Grep, Write, Edit, Bash, Task, AskUserQuestion, TodoWrite +--- +**Argument check:** If no combat feature description is provided, output: +> "Usage: `/team-combat [combat feature description]` — Provide a description of the combat feature to design and implement (e.g., `melee parry system`, `ranged weapon spread`)." +Then stop immediately without spawning any subagents or reading any files. + +When this skill is invoked with a valid argument, orchestrate the combat team through a structured pipeline. + +**Decision Points:** At each phase transition, use `AskUserQuestion` to present +the user with the subagent's proposals as selectable options. Write the agent's +full analysis in conversation, then capture the decision with concise labels. +The user must approve before moving to the next phase. + +## Team Composition +- **game-designer** — Design the mechanic, define formulas and edge cases +- **gameplay-programmer** — Implement the core gameplay code +- **ai-programmer** — Implement NPC/enemy AI behavior for the feature +- **technical-artist** — Create VFX, shader effects, and visual feedback +- **sound-designer** — Define audio events, impact sounds, and ambient combat audio +- **engine specialist** (primary) — Validate architecture and implementation patterns are idiomatic for the engine (read from `.claude/docs/technical-preferences.md` Engine Specialists section) +- **qa-tester** — Write test cases and validate the implementation + +## How to Delegate + +Use the Task tool to spawn each team member as a subagent: +- `subagent_type: game-designer` — Design the mechanic, define formulas and edge cases +- `subagent_type: gameplay-programmer` — Implement the core gameplay code +- `subagent_type: ai-programmer` — Implement NPC/enemy AI behavior +- `subagent_type: technical-artist` — Create VFX, shader effects, visual feedback +- `subagent_type: sound-designer` — Define audio events, impact sounds, ambient audio +- `subagent_type: [primary engine specialist]` — Engine idiom validation for architecture and implementation +- `subagent_type: qa-tester` — Write test cases and validate implementation + +Always provide full context in each agent's prompt (design doc path, relevant code files, constraints). Launch independent agents in parallel where the pipeline allows it (e.g., Phase 3 agents can run simultaneously). + +## Pipeline + +### Phase 1: Design +Delegate to **game-designer**: +- Create or update the design document in `design/gdd/` covering: mechanic overview, player fantasy, detailed rules, formulas with variable definitions, edge cases, dependencies, tuning knobs with safe ranges, and acceptance criteria +- Output: completed design document + +### Phase 2: Architecture +Delegate to **gameplay-programmer** (with **ai-programmer** if AI is involved): +- Review the design document +- Design the code architecture: class structure, interfaces, data flow +- Identify integration points with existing systems +- Output: architecture sketch with file list and interface definitions + +Then spawn the **primary engine specialist** to validate the proposed architecture: +- Is the class/node/component structure idiomatic for the pinned engine? (e.g., Godot node hierarchy, Unity MonoBehaviour vs DOTS, Unreal Actor/Component design) +- Are there engine-native systems that should be used instead of custom implementations? +- Any proposed APIs that are deprecated or changed in the pinned engine version? +- Output: engine architecture notes — incorporate into the architecture before Phase 3 begins + +### Phase 3: Implementation (parallel where possible) +Delegate in parallel: +- **gameplay-programmer**: Implement core combat mechanic code +- **ai-programmer**: Implement AI behaviors (if the feature involves NPC reactions) +- **technical-artist**: Create VFX and shader effects +- **sound-designer**: Define audio event list and mixing notes + +### Phase 4: Integration +- Wire together gameplay code, AI, VFX, and audio +- Ensure all tuning knobs are exposed and data-driven +- Verify the feature works with existing combat systems + +### Phase 5: Validation +Delegate to **qa-tester**: +- Write test cases from the acceptance criteria +- Test all edge cases documented in the design +- Verify performance impact is within budget +- File bug reports for any issues found + +### Phase 6: Sign-off +- Collect results from all team members +- Report feature status: COMPLETE / NEEDS WORK / BLOCKED +- List any outstanding issues and their assigned owners + +## Error Recovery Protocol + +If any spawned agent (via Task) returns BLOCKED, errors, or cannot complete: + +1. **Surface immediately**: Report "[AgentName]: BLOCKED — [reason]" to the user before continuing to dependent phases +2. **Assess dependencies**: Check whether the blocked agent's output is required by subsequent phases. If yes, do not proceed past that dependency point without user input. +3. **Offer options** via AskUserQuestion with choices: + - Skip this agent and note the gap in the final report + - Retry with narrower scope + - Stop here and resolve the blocker first +4. **Always produce a partial report** — output whatever was completed. Never discard work because one agent blocked. + +Common blockers: +- Input file missing (story not found, GDD absent) → redirect to the skill that creates it +- ADR status is Proposed → do not implement; run `/architecture-decision` first +- Scope too large → split into two stories via `/create-stories` +- Conflicting instructions between ADR and story → surface the conflict, do not guess + +## File Write Protocol + +All file writes (design documents, implementation files, test cases) are +delegated to sub-agents spawned via Task. Each sub-agent enforces the +"May I write to [path]?" protocol. This orchestrator does not write files directly. + +## Output + +A summary report covering: design completion status, implementation status per team member, test results, and any open issues. + +Verdict: **COMPLETE** — combat feature designed, implemented, and validated. +Verdict: **BLOCKED** — one or more phases could not complete; partial report produced with unresolved items listed. + +## Next Steps + +- Run `/code-review` on the implemented combat code before closing stories. +- Run `/balance-check` to validate combat formulas and tuning values. +- Run `/team-polish` if VFX, audio, or performance polish is needed. diff --git a/.omc/skills/team-level/SKILL.md b/.omc/skills/team-level/SKILL.md new file mode 100644 index 0000000..b5dc161 --- /dev/null +++ b/.omc/skills/team-level/SKILL.md @@ -0,0 +1,175 @@ +--- +name: team-level +description: "Orchestrate level design team: level-designer + narrative-director + world-builder + art-director + systems-designer + qa-tester for complete area/level creation." +argument-hint: "[level name or area to design]" +user-invocable: true +allowed-tools: Read, Glob, Grep, Write, Edit, Bash, Task, AskUserQuestion, TodoWrite +--- + +When this skill is invoked: + +**Decision Points:** At each step transition, use `AskUserQuestion` to present +the user with the subagent's proposals as selectable options. Write the agent's +full analysis in conversation, then capture the decision with concise labels. +The user must approve before moving to the next step. + +1. **Read the argument** for the target level or area (e.g., `tutorial`, + `forest dungeon`, `hub town`, `final boss arena`). + +2. **Gather context**: + - Read the game concept at `design/gdd/game-concept.md` + - Read game pillars at `design/gdd/game-pillars.md` + - Read existing level docs in `design/levels/` + - Read relevant narrative docs in `design/narrative/` + - Read world-building docs for the area's region/faction + +## How to Delegate + +Use the Task tool to spawn each team member as a subagent: +- `subagent_type: narrative-director` — Narrative purpose, characters, emotional arc +- `subagent_type: world-builder` — Lore context, environmental storytelling, world rules +- `subagent_type: level-designer` — Spatial layout, pacing, encounters, navigation +- `subagent_type: systems-designer` — Enemy compositions, loot tables, difficulty balance +- `subagent_type: art-director` — Visual theme, color palette, lighting, asset requirements +- `subagent_type: accessibility-specialist` — Navigation clarity, colorblind safety, cognitive load +- `subagent_type: qa-tester` — Test cases, boundary testing, playtest checklist + +Always provide full context in each agent's prompt (game concept, pillars, existing level docs, narrative docs). + +3. **Orchestrate the level design team** in sequence: + +### Step 1: Narrative + Visual Direction (narrative-director + world-builder + art-director, parallel) + +Spawn all three agents simultaneously — issue all three Task calls before waiting for any result. + +Spawn the `narrative-director` agent to: +- Define the narrative purpose of this area (what story beats happen here?) +- Identify key characters, dialogue triggers, and lore elements +- Specify emotional arc (how should the player feel entering, during, leaving?) + +Spawn the `world-builder` agent to: +- Provide lore context for the area (history, faction presence, ecology) +- Define environmental storytelling opportunities +- Specify any world rules that affect gameplay in this area + +Spawn the `art-director` agent to: +- Establish visual theme targets for this area — these are INPUTS to layout, not outputs of it +- Define the color temperature and lighting mood for this area (how does it differ from adjacent areas?) +- Specify shape language direction (angular fortress? organic cave? decayed grandeur?) +- Name the primary visual landmarks that will orient the player +- Read `design/art/art-bible.md` if it exists — anchor all direction in the established art bible + +**The art-director's visual targets from Step 1 must be passed to the level-designer in Step 2** as explicit constraints. Layout decisions happen within the visual direction, not before it. + +**Gate**: Use `AskUserQuestion` to present all three Step 1 outputs (narrative brief, lore foundation, visual direction targets) and confirm before proceeding to Step 2. + +### Step 2: Layout and Encounter Design (level-designer) +Spawn the `level-designer` agent with the full Step 1 output as context: +- Narrative brief (from narrative-director) +- Lore foundation (from world-builder) +- **Visual direction targets (from art-director)** — layout must work within these targets, not contradict them + +The level-designer should: +- Design the spatial layout (critical path, optional paths, secrets) — ensuring primary routes align with the visual landmark targets from Step 1 +- Define pacing curve (tension peaks, rest areas, exploration zones) — coordinated with the emotional arc from narrative-director +- Place encounters with difficulty progression +- Design environmental puzzles or navigation challenges +- Define points of interest and landmarks for wayfinding — these must match the visual landmarks the art-director specified +- Specify entry/exit points and connections to adjacent areas + +**Adjacent area dependency check**: After the layout is produced, check `design/levels/` for each adjacent area referenced by the level-designer. If any referenced area's `.md` file does not exist, surface the gap: +> "Level references [area-name] as an adjacent area but `design/levels/[area-name].md` does not exist." + +Use `AskUserQuestion` with options: +- (a) Proceed with a placeholder reference — mark the connection as UNRESOLVED in the level doc and list it in the open cross-level dependencies section of the summary report +- (b) Pause and run `/team-level [area-name]` first to establish that area + +Do NOT invent content for the missing adjacent area. + +**Gate**: Use `AskUserQuestion` to present Step 2 layout (including any unresolved adjacent area dependencies) and confirm before proceeding to Step 3. + +### Step 3: Systems Integration (systems-designer) +Spawn the `systems-designer` agent to: +- Specify enemy compositions and encounter formulas +- Define loot tables and reward placement +- Balance difficulty relative to expected player level/gear +- Design any area-specific mechanics or environmental hazards +- Specify resource distribution (health pickups, save points, shops) + +**Gate**: Use `AskUserQuestion` to present Step 3 outputs and confirm before proceeding to Step 4. + +### Step 4: Production Concepts + Accessibility (art-director + accessibility-specialist, parallel) + +**Note**: The art-director's directional pass (visual theme, color targets, mood) happened in Step 1. This pass is location-specific production concepts — given the finalized layout, what does each specific space look like? + +Spawn the `art-director` agent with the finalized layout from Step 2: +- Produce location-specific concept specs for key spaces (entrance, key encounter zones, landmarks, exits) +- Specify which art assets are unique to this area vs. shared from the global pool +- Define sight-line and lighting setups per key space (these are now layout-informed, not directional) +- Specify VFX needs that are specific to this area's layout (weather volumes, particles, atmospheric effects) +- Flag any locations where the layout creates visual direction conflicts with the Step 1 targets — surface these as production risks + +Spawn the `accessibility-specialist` agent in parallel to: +- Review the level layout for navigation clarity (can players orient themselves without relying on color alone?) +- Check that critical path signposting uses shape/icon/sound cues in addition to color +- Review any puzzle mechanics for cognitive load — flag anything that requires holding more than 3 simultaneous states +- Check that key gameplay areas have sufficient contrast for colorblind players +- Output: accessibility concerns list with severity (BLOCKING / RECOMMENDED / NICE TO HAVE) + +Wait for both agents to return before proceeding. + +**Gate**: Use `AskUserQuestion` to present both Step 4 results. If the accessibility-specialist returned any BLOCKING concerns, highlight them prominently and offer: +- (a) Return to level-designer and art-director to redesign the flagged elements before Step 5 +- (b) Document as a known accessibility gap and proceed to Step 5 with the concern explicitly logged in the final report + +Do NOT proceed to Step 5 without the user acknowledging any BLOCKING accessibility concerns. + +### Step 5: QA Planning (qa-tester) +Spawn the `qa-tester` agent to: +- Write test cases for the critical path +- Identify boundary and edge cases (sequence breaks, softlocks) +- Create a playtest checklist for the area +- Define acceptance criteria for level completion + +4. **Compile the level design document** combining all team outputs into the + level design template format. + +5. **Save to** `design/levels/[level-name].md`. + +6. **Output a summary** with: area overview, encounter count, estimated asset + list, narrative beats, any cross-team dependencies or open questions, open + cross-level dependencies (adjacent areas referenced but not yet designed, each + marked UNRESOLVED), and accessibility concerns with their resolution status. + +## File Write Protocol + +All file writes (level design docs, narrative docs, test checklists) are delegated +to sub-agents spawned via Task. Each sub-agent enforces the "May I write to [path]?" +protocol. This orchestrator does not write files directly. + +Verdict: **COMPLETE** — level design document produced and all team outputs compiled. +Verdict: **BLOCKED** — one or more agents blocked; partial report produced with unresolved items listed. + +## Next Steps + +- Run `/design-review design/levels/[level-name].md` to validate the completed level design doc. +- Run `/dev-story` to implement level content once the design is approved. +- Run `/qa-plan` to generate a QA test plan for this level. + +## Error Recovery Protocol + +If any spawned agent (via Task) returns BLOCKED, errors, or cannot complete: + +1. **Surface immediately**: Report "[AgentName]: BLOCKED — [reason]" to the user before continuing to dependent phases +2. **Assess dependencies**: Check whether the blocked agent's output is required by subsequent phases. If yes, do not proceed past that dependency point without user input. +3. **Offer options** via AskUserQuestion with choices: + - Skip this agent and note the gap in the final report + - Retry with narrower scope + - Stop here and resolve the blocker first +4. **Always produce a partial report** — output whatever was completed. Never discard work because one agent blocked. + +Common blockers: +- Input file missing (story not found, GDD absent) → redirect to the skill that creates it +- ADR status is Proposed → do not implement; run `/architecture-decision` first +- Scope too large → split into two stories via `/create-stories` +- Conflicting instructions between ADR and story → surface the conflict, do not guess diff --git a/.omc/skills/team-live-ops/SKILL.md b/.omc/skills/team-live-ops/SKILL.md new file mode 100644 index 0000000..c4f6e7f --- /dev/null +++ b/.omc/skills/team-live-ops/SKILL.md @@ -0,0 +1,145 @@ +--- +name: team-live-ops +description: "Orchestrate the live-ops team for post-launch content planning: coordinates live-ops-designer, economy-designer, analytics-engineer, community-manager, writer, and narrative-director to design and plan a season, event, or live content update." +argument-hint: "[season name or event description]" +user-invocable: true +allowed-tools: Read, Glob, Grep, Write, Edit, Bash, Task, AskUserQuestion, TodoWrite +--- +**Argument check:** If no season name or event description is provided, output: +> "Usage: `/team-live-ops [season name or event description]` — Provide the name or description of the season or live event to plan." +Then stop immediately without spawning any subagents or reading any files. + +When this skill is invoked with a valid argument, orchestrate the live-ops team through a structured planning pipeline. + +**Decision Points:** At each phase transition, use `AskUserQuestion` to present +the user with the subagent's proposals as selectable options. Write the agent's +full analysis in conversation, then capture the decision with concise labels. +The user must approve before moving to the next phase. + +## Team Composition +- **live-ops-designer** — Season structure, event cadence, retention mechanics, battle pass +- **economy-designer** — Live economy balance, store rotation, currency pricing, pity timers +- **analytics-engineer** — Success metrics, A/B test design, event tracking, dashboard specs +- **community-manager** — Player-facing announcements, event descriptions, seasonal messaging +- **narrative-director** — Seasonal narrative theme, story arc, world event framing +- **writer** — Event descriptions, reward item names, seasonal flavor text, announcement copy + +## How to Delegate + +Use the Task tool to spawn each team member as a subagent: +- `subagent_type: live-ops-designer` — Season/event structure and retention mechanics +- `subagent_type: economy-designer` — Live economy balance and reward pricing +- `subagent_type: analytics-engineer` — Success metrics, A/B tests, event instrumentation +- `subagent_type: community-manager` — Player-facing communication and messaging +- `subagent_type: narrative-director` — Seasonal theme and narrative framing +- `subagent_type: writer` — All player-facing text: event descriptions, item names, copy + +Always provide full context in each agent's prompt (game concept path, existing season docs, ethics policy path, current economy state). Launch independent agents in parallel where the pipeline allows it (Phases 3 and 4 can run simultaneously). + +## Pipeline + +### Phase 1: Season/Event Scoping +Delegate to **live-ops-designer**: +- Define the season or event: type (seasonal, limited-time event, challenge), duration, theme direction +- Outline the content list: what's new (modes, items, challenges, story beats) +- Define the retention hook: what brings players back daily/weekly during this season +- Identify resource budget: how much new content needs to be created vs. reused +- Output: season brief with scope, content list, and retention mechanic overview + +### Phase 2: Narrative Theme +Delegate to **narrative-director**: +- Read the season brief from Phase 1 +- Design the seasonal narrative theme: how does this event connect to the game world? +- Define the central story hook players will discover during the event +- Identify which existing lore threads this season can advance +- Output: narrative framing document (theme, story hook, lore connections) + +### Phase 3: Economy Design (parallel with Phase 2 if theme is clear) +Delegate to **economy-designer**: +- Read the season brief and existing economy rules from `design/live-ops/economy-rules.md` +- Design the reward track: free tier progression, premium tier value proposition +- Plan the in-season economy: seasonal currency, store rotation, pricing +- Define pity timer mechanics and bad-luck protection for any random elements +- Verify no pay-to-win items in premium track +- Output: economy design doc with reward tables, pricing, and currency flow + +### Phase 4: Analytics and Success Metrics (parallel with Phase 3) +Delegate to **analytics-engineer**: +- Read the season brief +- Define success metrics: participation rate target, retention lift target, battle pass completion rate +- Design any A/B tests to run during the season (e.g., different reward cadences) +- Specify new telemetry events needed for this season's content +- Output: analytics plan with success criteria and instrumentation requirements + +### Phase 5: Content Writing (parallel) +Delegate in parallel: +- **narrative-director** (if needed): Write any in-game narrative text (cutscene scripts, NPC dialogue, world event descriptions) for the season +- **writer**: Write all player-facing text — event names, reward item descriptions, challenge objective text, seasonal flavor text +- Both should read the narrative framing doc from Phase 2 + +### Phase 6: Player Communication Plan +Delegate to **community-manager**: +- Read the season brief, economy design, and narrative framing +- Draft the season launch announcement (tone, key highlights, platform-specific versions) +- Plan the communication cadence: pre-launch teaser, launch day post, mid-season reminder, final week FOMO push +- Draft known-issues section placeholder for day-1 patch notes +- Output: communication calendar with draft copy for each touchpoint + +### Phase 7: Review and Sign-off +Collect outputs from all phases and present a consolidated season plan: +- Season brief (Phase 1) +- Narrative framing (Phase 2) +- Economy design and reward tables (Phase 3) +- Analytics plan and success metrics (Phase 4) +- Written content inventory (Phase 5) +- Communication calendar (Phase 6) + +Present a summary to the user with: +- **Content scope**: what is being created +- **Economy health check**: does the reward track feel fair and non-predatory? +- **Analytics readiness**: are success criteria defined and instrumented? +- **Ethics review**: check the Phase 3 economy design against `design/live-ops/ethics-policy.md` + - If the file does not exist: flag "ETHICS REVIEW SKIPPED: `design/live-ops/ethics-policy.md` not found. Economy design was not reviewed against an ethics policy. Recommend creating one before production begins." Include this flag in the season design output document. Add to next steps: create `design/live-ops/ethics-policy.md`. + - If the file exists and a violation is found: flag "ETHICS FLAG: [element] in Phase 3 economy design violates [policy rule]. Approval is blocked until this is resolved." Do NOT issue a COMPLETE verdict or write output documents. Use `AskUserQuestion` with options: revise economy design / override with documented rationale / cancel. If user chooses to revise: re-spawn economy-designer to produce a corrected design, then return to Phase 7 review. +- **Open questions**: decisions still needed before production begins + +Ask the user to approve the season plan before delegating to production teams. Issue the COMPLETE verdict only after the user approves and no unresolved ethics violations remain. If an ethics violation is unresolved, end with Verdict: **BLOCKED**. + +## Output Documents + +All documents save to `design/live-ops/`: +- `seasons/S[N]_[name].md` — Season design document (from Phase 1-3) +- `seasons/S[N]_[name]_analytics.md` — Analytics plan (from Phase 4) +- `seasons/S[N]_[name]_comms.md` — Communication calendar (from Phase 6) + +## Error Recovery Protocol + +If any spawned agent (via Task) returns BLOCKED, errors, or cannot complete: + +1. **Surface immediately**: Report "[AgentName]: BLOCKED — [reason]" to the user before continuing to dependent phases +2. **Assess dependencies**: Check whether the blocked agent's output is required by subsequent phases. If yes, do not proceed past that dependency point without user input. +3. **Offer options** via AskUserQuestion with choices: + - Skip this agent and note the gap in the final report + - Retry with narrower scope + - Stop here and resolve the blocker first +4. **Always produce a partial report** — output whatever was completed. Never discard work because one agent blocked. + +If a BLOCKED state is unresolvable, end with Verdict: **BLOCKED** instead of COMPLETE. + +## File Write Protocol + +All file writes (season design docs, analytics plans, communication calendars) are +delegated to sub-agents spawned via Task. Each sub-agent enforces the +"May I write to [path]?" protocol. This orchestrator does not write files directly. + +## Output + +A summary covering: season theme and scope, economy design highlights, success metrics, content list, communication plan, and any open decisions needing user input before production. + +Verdict: **COMPLETE** — season plan produced and handed off for production. + +## Next Steps + +- Run `/design-review` on the season design document for consistency validation. +- Run `/sprint-plan` to schedule content creation work for the season. +- Run `/team-release` when the season content is ready to deploy. diff --git a/.omc/skills/team-narrative/SKILL.md b/.omc/skills/team-narrative/SKILL.md new file mode 100644 index 0000000..373ad22 --- /dev/null +++ b/.omc/skills/team-narrative/SKILL.md @@ -0,0 +1,111 @@ +--- +name: team-narrative +description: "Orchestrate the narrative team: coordinates narrative-director, writer, world-builder, and level-designer to create cohesive story content, world lore, and narrative-driven level design." +argument-hint: "[narrative content description]" +user-invocable: true +allowed-tools: Read, Glob, Grep, Write, Edit, Task, AskUserQuestion, TodoWrite +--- +If no argument is provided, output usage guidance and exit without spawning any agents: +> Usage: `/team-narrative [narrative content description]` — describe the story content, scene, or narrative area to work on (e.g., `boss encounter cutscene`, `faction intro dialogue`, `tutorial narrative`). Do not use `AskUserQuestion` here; output the guidance directly. + +When this skill is invoked with an argument, orchestrate the narrative team through a structured pipeline. + +**Decision Points:** At each phase transition, use `AskUserQuestion` to present +the user with the subagent's proposals as selectable options. Write the agent's +full analysis in conversation, then capture the decision with concise labels. +The user must approve before moving to the next phase. + +## Team Composition +- **narrative-director** — Story arcs, character design, dialogue strategy, narrative vision +- **writer** — Dialogue writing, lore entries, item descriptions, in-game text +- **world-builder** — World rules, faction design, history, geography, environmental storytelling +- **art-director** — Character visual design, environmental visual storytelling, cutscene/cinematic tone +- **level-designer** — Level layouts that serve the narrative, pacing, environmental storytelling beats + +## How to Delegate + +Use the Task tool to spawn each team member as a subagent: +- `subagent_type: narrative-director` — Story arcs, character design, narrative vision +- `subagent_type: writer` — Dialogue writing, lore entries, in-game text +- `subagent_type: world-builder` — World rules, faction design, history, geography +- `subagent_type: art-director` — Character visual profiles, environmental visual storytelling, cinematic tone +- `subagent_type: level-designer` — Level layouts that serve the narrative, pacing +- `subagent_type: localization-lead` — i18n validation, string key compliance, translation headroom + +Always provide full context in each agent's prompt (narrative brief, lore dependencies, character profiles). Launch independent agents in parallel where the pipeline allows it (e.g., Phase 2 agents can run simultaneously). + +## Pipeline + +### Phase 1: Narrative Direction +Delegate to **narrative-director**: +- Define the narrative purpose of this content: what story beat does it serve? +- Identify characters involved, their motivations, and how this fits the overall arc +- Set the emotional tone and pacing targets +- Specify any lore dependencies or new lore this introduces +- Output: narrative brief with story requirements + +### Phase 2: World Foundation (parallel) +Delegate in parallel — issue all three Task calls simultaneously before waiting for any result: +- **world-builder**: Create or update lore entries for factions, locations, and history relevant to this content. Cross-reference against existing lore for contradictions. Set canon level for new entries. +- **writer**: Draft character dialogue using voice profiles. Ensure all lines are under 120 characters, use named placeholders for variables, and are localization-ready. +- **art-director**: Define character visual design direction for key characters appearing in this content (silhouette, visual archetype, distinguishing features). Specify environmental visual storytelling elements for each key space (prop composition, lighting notes, spatial arrangement). Define tone palette and cinematic direction for any cutscenes or scripted sequences. + +### Phase 3: Level Narrative Integration +Delegate to **level-designer**: +- Review the narrative brief and lore foundation +- Design environmental storytelling elements in the level +- Place narrative triggers, dialogue zones, and discovery points +- Ensure pacing serves both gameplay and story + +### Phase 4: Review and Consistency +Delegate to **narrative-director**: +- Review all dialogue against character voice profiles +- Verify lore consistency across new and existing entries +- Confirm narrative pacing aligns with level design +- Check that all mysteries have documented "true answers" + +### Phase 5: Polish (parallel) +Delegate in parallel: +- **writer**: Final self-review — verify no line exceeds dialogue box constraints, all text uses string keys (not raw strings), placeholder variable names are consistent +- **localization-lead**: Validate i18n compliance — check string key naming conventions, flag any strings with hardcoded formatting that won't survive translation, verify character limit headroom for languages that expand (German/Finnish typically +30%), confirm no cultural assumptions in text that would need locale-specific variants +- **world-builder**: Finalize canon levels for all new lore entries + +## Error Recovery Protocol + +If any spawned agent (via Task) returns BLOCKED, errors, or cannot complete: + +1. **Surface immediately**: Report "[AgentName]: BLOCKED — [reason]" to the user before continuing to dependent phases +2. **Assess dependencies**: Check whether the blocked agent's output is required by subsequent phases. If yes, do not proceed past that dependency point without user input. +3. **Offer options** via AskUserQuestion with choices: + - Skip this agent and note the gap in the final report + - Retry with narrower scope + - Stop here and resolve the blocker first +4. **Always produce a partial report** — output whatever was completed. Never discard work because one agent blocked. + +Common blockers: +- Input file missing (story not found, GDD absent) → redirect to the skill that creates it +- ADR status is Proposed → do not implement; run `/architecture-decision` first +- Scope too large → split into two stories via `/create-stories` +- Conflicting instructions between ADR and story → surface the conflict, do not guess + +## File Write Protocol + +All file writes (narrative docs, dialogue files, lore entries) are delegated to +sub-agents spawned via Task. Each sub-agent enforces the "May I write to [path]?" +protocol. This orchestrator does not write files directly. + +## Output + +A summary report covering: narrative brief status, lore entries created/updated, dialogue lines written, level narrative integration points, consistency review results, and any unresolved contradictions. + +Verdict: **COMPLETE** — narrative content delivered. + +If the pipeline stops because a dependency is unresolved (e.g., lore contradiction or missing prerequisite not resolved by the user): + +Verdict: **BLOCKED** — [reason] + +## Next Steps + +- Run `/design-review` on the narrative documents for consistency validation. +- Run `/localize extract` to extract new strings for translation after dialogue is finalized. +- Run `/dev-story` to implement dialogue triggers and narrative events in-engine. diff --git a/.omc/skills/team-polish/SKILL.md b/.omc/skills/team-polish/SKILL.md new file mode 100644 index 0000000..5cdd6ca --- /dev/null +++ b/.omc/skills/team-polish/SKILL.md @@ -0,0 +1,124 @@ +--- +name: team-polish +description: "Orchestrate the polish team: coordinates performance-analyst, technical-artist, sound-designer, and qa-tester to optimize, polish, and harden a feature or area for release quality." +argument-hint: "[feature or area to polish]" +user-invocable: true +allowed-tools: Read, Glob, Grep, Write, Edit, Bash, Task, AskUserQuestion, TodoWrite +--- +If no argument is provided, output usage guidance and exit without spawning any agents: +> Usage: `/team-polish [feature or area]` — specify the feature or area to polish (e.g., `combat`, `main menu`, `inventory system`, `level-1`). Do not use `AskUserQuestion` here; output the guidance directly. + +When this skill is invoked with an argument, orchestrate the polish team through a structured pipeline. + +**Decision Points:** At each phase transition, use `AskUserQuestion` to present +the user with the subagent's proposals as selectable options. Write the agent's +full analysis in conversation, then capture the decision with concise labels. +The user must approve before moving to the next phase. + +## Team Composition +- **performance-analyst** — Profiling, optimization, memory analysis, frame budget +- **engine-programmer** — Engine-level bottlenecks: rendering pipeline, memory, resource loading (invoke when performance-analyst identifies low-level root causes) +- **technical-artist** — VFX polish, shader optimization, visual quality +- **sound-designer** — Audio polish, mixing, ambient layers, feedback sounds +- **tools-programmer** — Content pipeline tool verification, editor tool stability, automation fixes (invoke when content authoring tools are involved in the polished area) +- **qa-tester** — Edge case testing, regression testing, soak testing + +## How to Delegate + +Use the Task tool to spawn each team member as a subagent: +- `subagent_type: performance-analyst` — Profiling, optimization, memory analysis +- `subagent_type: engine-programmer` — Engine-level fixes for rendering, memory, resource loading +- `subagent_type: technical-artist` — VFX polish, shader optimization, visual quality +- `subagent_type: sound-designer` — Audio polish, mixing, ambient layers +- `subagent_type: tools-programmer` — Content pipeline and editor tool verification +- `subagent_type: qa-tester` — Edge case testing, regression testing, soak testing + +Always provide full context in each agent's prompt (target feature/area, performance budgets, known issues). Launch independent agents in parallel where the pipeline allows it (e.g., Phases 3 and 4 can run simultaneously). + +## Pipeline + +### Phase 1: Assessment +Delegate to **performance-analyst**: +- Profile the target feature/area using `/perf-profile` +- Identify performance bottlenecks and frame budget violations +- Measure memory usage and check for leaks +- Benchmark against target hardware specs +- Output: performance report with prioritized optimization list + +### Phase 2: Optimization +Delegate to **performance-analyst** (with relevant programmers as needed): +- Fix performance hotspots identified in Phase 1 +- Optimize draw calls, reduce overdraw +- Fix memory leaks and reduce allocation pressure +- Verify optimizations don't change gameplay behavior +- Output: optimized code with before/after metrics + +If Phase 1 identified engine-level root causes (rendering pipeline, resource loading, memory allocator), delegate those fixes to **engine-programmer** in parallel: +- Optimize hot paths in engine systems +- Fix allocation pressure in core loops +- Output: engine-level fixes with profiler validation + +### Phase 3: Visual Polish (parallel with Phase 2) +Delegate to **technical-artist**: +- Review VFX for quality and consistency with art bible +- Optimize particle systems and shader effects +- Add screen shake, camera effects, and visual juice where appropriate +- Ensure effects degrade gracefully on lower settings +- Output: polished visual effects + +### Phase 4: Audio Polish (parallel with Phase 2) +Delegate to **sound-designer**: +- Review audio events for completeness (are any actions missing sound feedback?) +- Check audio mix levels — nothing too loud or too quiet relative to the mix +- Add ambient audio layers for atmosphere +- Verify audio plays correctly with spatial positioning +- Output: audio polish list and mixing notes + +### Phase 5: Hardening +Delegate to **qa-tester**: +- Test all edge cases: boundary conditions, rapid inputs, unusual sequences +- Soak test: run the feature for extended periods checking for degradation +- Stress test: maximum entities, worst-case scenarios +- Regression test: verify polish changes haven't broken existing functionality +- Test on minimum spec hardware (if available) +- Output: test results with any remaining issues + +### Phase 6: Sign-off +- Collect results from all team members +- Compare performance metrics against budgets +- Report: READY FOR RELEASE / NEEDS MORE WORK +- List any remaining issues with severity and recommendations + +## Error Recovery Protocol + +If any spawned agent (via Task) returns BLOCKED, errors, or cannot complete: + +1. **Surface immediately**: Report "[AgentName]: BLOCKED — [reason]" to the user before continuing to dependent phases +2. **Assess dependencies**: Check whether the blocked agent's output is required by subsequent phases. If yes, do not proceed past that dependency point without user input. +3. **Offer options** via AskUserQuestion with choices: + - Skip this agent and note the gap in the final report + - Retry with narrower scope + - Stop here and resolve the blocker first +4. **Always produce a partial report** — output whatever was completed. Never discard work because one agent blocked. + +Common blockers: +- Input file missing (story not found, GDD absent) → redirect to the skill that creates it +- ADR status is Proposed → do not implement; run `/architecture-decision` first +- Scope too large → split into two stories via `/create-stories` +- Conflicting instructions between ADR and story → surface the conflict, do not guess + +## File Write Protocol + +All file writes (performance reports, test results, evidence docs) are delegated to +sub-agents spawned via Task. Each sub-agent enforces the "May I write to [path]?" +protocol. This orchestrator does not write files directly. + +## Output + +A summary report covering: performance before/after metrics, visual polish changes, audio polish changes, test results, and release readiness assessment. + +## Next Steps + +- If READY FOR RELEASE: run `/release-checklist` for the final pre-release validation. +- If NEEDS MORE WORK: schedule remaining issues in `/sprint-plan update` and re-run `/team-polish` after fixes. +- Run `/gate-check` for a formal phase gate verdict before handing off to release. diff --git a/.omc/skills/team-qa/SKILL.md b/.omc/skills/team-qa/SKILL.md new file mode 100644 index 0000000..f8ba570 --- /dev/null +++ b/.omc/skills/team-qa/SKILL.md @@ -0,0 +1,222 @@ +--- +name: team-qa +description: "Orchestrate the QA team through a full testing cycle. Coordinates qa-lead (strategy + test plan) and qa-tester (test case writing + bug reporting) to produce a complete QA package for a sprint or feature. Covers: test plan generation, test case writing, smoke check gate, manual QA execution, and sign-off report." +argument-hint: "[sprint | feature: system-name]" +user-invocable: true +allowed-tools: Read, Glob, Grep, Write, Task, AskUserQuestion +agent: qa-lead +--- + +When this skill is invoked, orchestrate the QA team through a structured testing cycle. + +**Decision Points:** At each phase transition, use `AskUserQuestion` to present +the user with the subagent's proposals as selectable options. Write the agent's +full analysis in conversation, then capture the decision with concise labels. +The user must approve before moving to the next phase. + +## Team Composition + +- **qa-lead** — QA strategy, test plan generation, story classification, sign-off report +- **qa-tester** — Test case writing, bug report writing, manual QA documentation + +## How to Delegate + +Use the Task tool to spawn each team member as a subagent: +- `subagent_type: qa-lead` — Strategy, planning, classification, sign-off +- `subagent_type: qa-tester` — Test case writing and bug report writing + +Always provide full context in each agent's prompt (story file paths, QA plan path, scope constraints). Launch independent qa-tester tasks in parallel where possible (e.g., multiple stories in Phase 5 can be scaffolded simultaneously). + +## Pipeline + +### Phase 1: Load Context + +Before doing anything else, gather the full scope: + +1. Detect the current sprint or feature scope from the argument: + - If argument is a sprint identifier (e.g., `sprint-03`): read all story files in `production/sprints/[sprint]/` + - If argument is `feature: [system-name]`: glob story files tagged for that system + - If no argument: read `production/session-state/active.md` and `production/sprint-status.yaml` (if present) to infer the active sprint + +2. Read `production/stage.txt` to confirm the current project phase. + +3. Count stories found and report to the user: + > "QA cycle starting for [sprint/feature]. Found [N] stories. Current stage: [stage]. Ready to begin QA strategy?" + +### Phase 2: QA Strategy (qa-lead) + +Spawn `qa-lead` via Task to review all in-scope stories and produce a QA strategy. + +Prompt the qa-lead to: +- Read each story file +- Classify each story by type: **Logic** / **Integration** / **Visual/Feel** / **UI** / **Config/Data** +- Identify which stories require automated test evidence vs. manual QA +- Flag any stories with missing acceptance criteria or missing test evidence that would block QA +- Estimate manual QA effort (number of test sessions needed) +- Check `tests/smoke/` for smoke test scenarios; for each, assess whether it can be verified given the current build. Produce a smoke check verdict: **PASS** / **PASS WITH WARNINGS [list]** / **FAIL [list of failures]** +- Produce a strategy summary table and smoke check result: + + | Story | Type | Automated Required | Manual Required | Blocker? | + |-------|------|--------------------|-----------------|----------| + + **Smoke Check**: [PASS / PASS WITH WARNINGS / FAIL] — [details if not PASS] + +If the smoke check result is **FAIL**, the qa-lead must list the failures prominently. QA cannot proceed past the strategy phase with a failed smoke check. + +Present the qa-lead's full strategy to the user, then use `AskUserQuestion`: + +``` +question: "QA Strategy Review" +options: + - "Looks good — proceed to test plan" + - "Adjust story types before proceeding" + - "Skip blocked stories and proceed with the rest" + - "Smoke check failed — fix issues and re-run /team-qa" + - "Cancel — resolve blockers first" +``` + +If smoke check **FAIL**: do not proceed to Phase 3. Surface the failures and stop. The user must fix them and re-run `/team-qa`. +If smoke check **PASS WITH WARNINGS**: note the warnings for the sign-off report and continue. +If blockers are present: list them explicitly. The user may choose to skip blocked stories or cancel the cycle. + +### Phase 3: Test Plan Generation + +Using the strategy from Phase 2, produce a structured test plan document. + +The test plan should cover: +- **Scope**: sprint/feature name, story count, dates +- **Story Classification Table**: from Phase 2 strategy +- **Automated Test Requirements**: which stories need test files, expected paths in `tests/` +- **Manual QA Scope**: which stories need manual walkthrough and what to validate +- **Out of Scope**: what is explicitly not being tested this cycle and why +- **Entry Criteria**: what must be true before QA can begin (smoke check pass, build stable) +- **Exit Criteria**: what constitutes a completed QA cycle (all stories PASS or FAIL with bugs filed) + +Ask: "May I write the QA plan to `production/qa/qa-plan-[sprint]-[date].md`?" + +Write only after receiving approval. + +### Phase 4: Test Case Writing (qa-tester) + +> **Smoke check** is performed as part of Phase 2 (QA Strategy). If the smoke check returned FAIL in Phase 2, the cycle was stopped there. This phase only runs when the Phase 2 smoke check was PASS or PASS WITH WARNINGS. + +For each story requiring manual QA (Visual/Feel, UI, Integration without automated tests): + +Spawn `qa-tester` via Task for each story (run in parallel where possible), providing: +- The story file path +- The relevant section of the QA plan for that story +- The GDD acceptance criteria for the system being tested (if available) +- Instructions to write detailed test cases covering all acceptance criteria + +Each test case set should include: +- **Preconditions**: game state required before testing begins +- **Steps**: numbered, unambiguous actions +- **Expected Result**: what should happen +- **Actual Result**: field left blank for the tester to fill in +- **Pass/Fail**: field left blank + +Present the test cases to the user for review before execution. Group by story. + +Use `AskUserQuestion` per story group (batched 3-4 at a time): + +``` +question: "Test cases ready for [Story Group]. Review before manual QA begins?" +options: + - "Approved — begin manual QA for these stories" + - "Revise test cases for [story name]" + - "Skip manual QA for [story name] — not ready" +``` + +### Phase 6: Manual QA Execution + +Walk through each story in the approved manual QA list. + +Batch stories into groups of 3-4 and use `AskUserQuestion` for each: + +``` +question: "Manual QA — [Story Title]\n[brief description of what to test]" +options: + - "PASS — all acceptance criteria verified" + - "PASS WITH NOTES — minor issues found (describe after)" + - "FAIL — criteria not met (describe after)" + - "BLOCKED — cannot test yet (reason)" +``` + +After each FAIL result: use `AskUserQuestion` to collect the failure description, then spawn `qa-tester` via Task to write a formal bug report in `production/qa/bugs/`. + +Bug report naming: `BUG-[NNN]-[short-slug].md` (increment NNN from existing bugs in the directory). + +After collecting all results, summarize: +- Stories PASS: [count] +- Stories PASS WITH NOTES: [count] +- Stories FAIL: [count] — bugs filed: [IDs] +- Stories BLOCKED: [count] + +### Phase 7: QA Sign-Off Report + +Spawn `qa-lead` via Task to produce the sign-off report using all results from Phases 4–6. + +The sign-off report format: + +```markdown +## QA Sign-Off Report: [Sprint/Feature] +**Date**: [date] +**QA Lead sign-off**: [pending] + +### Test Coverage Summary +| Story | Type | Auto Test | Manual QA | Result | +|-------|------|-----------|-----------|--------| +| [title] | Logic | PASS | — | PASS | +| [title] | Visual | — | PASS | PASS | + +### Bugs Found +| ID | Story | Severity | Status | +|----|-------|----------|--------| +| BUG-001 | [story] | S2 | Open | + +### Verdict: APPROVED / APPROVED WITH CONDITIONS / NOT APPROVED + +**Conditions** (if any): [list what must be fixed before the build advances] + +### Next Step +[guidance based on verdict] +``` + +Verdict rules: +- **APPROVED**: All stories PASS or PASS WITH NOTES; no S1/S2 bugs open +- **APPROVED WITH CONDITIONS**: S3/S4 bugs open, or PASS WITH NOTES issues documented; no S1/S2 bugs +- **NOT APPROVED**: Any S1/S2 bugs open; or stories FAIL without documented workaround + +Next step guidance by verdict: +- APPROVED: "Build is ready for the next phase. Run `/gate-check` to validate advancement." +- APPROVED WITH CONDITIONS: "Resolve conditions before advancing. S3/S4 bugs may be deferred to polish." +- NOT APPROVED: "Resolve S1/S2 bugs and re-run `/team-qa` or targeted manual QA before advancing." + +Ask: "May I write this QA sign-off report to `production/qa/qa-signoff-[sprint]-[date].md`?" + +Write only after receiving approval. + +## Error Recovery Protocol + +If any spawned agent (via Task) returns BLOCKED, errors, or cannot complete: + +1. **Surface immediately**: Report "[AgentName]: BLOCKED — [reason]" to the user before continuing to dependent phases +2. **Assess dependencies**: Check whether the blocked agent's output is required by subsequent phases. If yes, do not proceed past that dependency point without user input. +3. **Offer options** via AskUserQuestion with choices: + - Skip this agent and note the gap in the final report + - Retry with narrower scope + - Stop here and resolve the blocker first +4. **Always produce a partial report** — output whatever was completed. Never discard work because one agent blocked. + +Common blockers: +- Input file missing (story not found, GDD absent) → redirect to the skill that creates it +- ADR status is Proposed → do not implement; run `/architecture-decision` first +- Scope too large → split into two stories via `/create-stories` +- Conflicting instructions between ADR and story → surface the conflict, do not guess + +## Output + +A summary covering: stories in scope, smoke check result, manual QA results, bugs filed (with IDs and severities), and the final APPROVED / APPROVED WITH CONDITIONS / NOT APPROVED verdict. + +Verdict: **COMPLETE** — QA cycle finished. +Verdict: **BLOCKED** — smoke check failed or critical blocker prevented cycle completion; partial report produced. diff --git a/.omc/skills/team-release/SKILL.md b/.omc/skills/team-release/SKILL.md new file mode 100644 index 0000000..845199f --- /dev/null +++ b/.omc/skills/team-release/SKILL.md @@ -0,0 +1,148 @@ +--- +name: team-release +description: "Orchestrate the release team: coordinates release-manager, qa-lead, devops-engineer, and producer to execute a release from candidate to deployment." +argument-hint: "[version number or 'next']" +user-invocable: true +allowed-tools: Read, Glob, Grep, Write, Edit, Bash, Task, AskUserQuestion, TodoWrite +--- +**Argument check:** If no version number is provided: +1. Read `production/session-state/active.md` and the most recent file in `production/milestones/` (if they exist) to infer the target version. +2. If a version is found: report "No version argument provided — inferred [version] from milestone data. Proceeding." Then confirm with `AskUserQuestion`: "Releasing [version]. Is this correct?" +3. If no version is discoverable: use `AskUserQuestion` to ask "What version number should be released? (e.g., v1.0.0)" and wait for user input before proceeding. Do NOT default to a hardcoded version string. + +When this skill is invoked, orchestrate the release team through a structured pipeline. + +**Decision Points:** At each phase transition, use `AskUserQuestion` to present +the user with the subagent's proposals as selectable options. Write the agent's +full analysis in conversation, then capture the decision with concise labels. +The user must approve before moving to the next phase. + +## Team Composition +- **release-manager** — Release branch, versioning, changelog, deployment +- **qa-lead** — Test sign-off, regression suite, release quality gate +- **devops-engineer** — Build pipeline, artifacts, deployment automation +- **security-engineer** — Pre-release security audit (invoke if game has online/multiplayer features or player data) +- **analytics-engineer** — Verify telemetry events fire correctly and dashboards are live +- **community-manager** — Patch notes, launch announcement, player-facing messaging +- **producer** — Go/no-go decision, stakeholder communication, scheduling + +## How to Delegate + +Use the Task tool to spawn each team member as a subagent: +- `subagent_type: release-manager` — Release branch, versioning, changelog, deployment +- `subagent_type: qa-lead` — Test sign-off, regression suite, release quality gate +- `subagent_type: devops-engineer` — Build pipeline, artifacts, deployment automation +- `subagent_type: security-engineer` — Security audit for online/multiplayer/data features +- `subagent_type: analytics-engineer` — Telemetry event verification and dashboard readiness +- `subagent_type: community-manager` — Patch notes and launch communication +- `subagent_type: producer` — Go/no-go decision, stakeholder communication +- `subagent_type: network-programmer` — Netcode stability sign-off (invoke if game has multiplayer) + +Always provide full context in each agent's prompt (version number, milestone status, known issues). Launch independent agents in parallel where the pipeline allows it (e.g., Phase 3 agents can run simultaneously). + +## Pipeline + +### Phase 1: Release Planning +Delegate to **producer**: +- Confirm all milestone acceptance criteria are met +- Identify any scope items deferred from this release +- Set the target release date and communicate to team +- Output: release authorization with scope confirmation + +### Phase 2: Release Candidate +Delegate to **release-manager**: +- Cut release branch from the agreed commit +- Bump version numbers in all relevant files +- Generate the release checklist using `/release-checklist` +- Freeze the branch — no feature changes, bug fixes only +- Output: release branch name and checklist + +### Phase 3: Quality Gate (parallel) +Delegate in parallel: +- **qa-lead**: Execute full regression test suite. Test all critical paths. Verify no S1/S2 bugs. Sign off on quality. +- **devops-engineer**: Build release artifacts for all target platforms. Verify builds are clean and reproducible. Run automated tests in CI. +- **security-engineer** *(if game has online features, multiplayer, or player data)*: Conduct pre-release security audit. Review authentication, anti-cheat, data privacy compliance. Sign off on security posture. +- **network-programmer** *(if game has multiplayer)*: Sign off on netcode stability. Verify lag compensation, reconnect handling, and bandwidth usage under load. + +### Phase 4: Localization, Performance, and Analytics +Delegate (can run in parallel with Phase 3 if resources available): +- Verify all strings are translated (delegate to **localization-lead** if available) +- Run performance benchmarks against targets (delegate to **performance-analyst** if available) +- **analytics-engineer**: Verify all telemetry events fire correctly on release build. Confirm dashboards are receiving data. Check that critical funnels (onboarding, progression, monetization if applicable) are instrumented. +- Output: localization, performance, and analytics sign-off + +### Phase 5: Go/No-Go +Delegate to **producer**: +- Collect sign-off from: qa-lead, release-manager, devops-engineer, security-engineer (if spawned in Phase 3), network-programmer (if spawned in Phase 3), and technical-director +- Evaluate any open issues — are they blocking or can they ship? +- Make the go/no-go call +- Output: release decision with rationale + +**If producer declares NO-GO:** +- Surface the decision immediately: "PRODUCER: NO-GO — [rationale, e.g., S1 bug found in Phase 3]." +- Use `AskUserQuestion` with options: + - Fix the blocker and re-run the affected phase + - Defer the release to a later date + - Override NO-GO with documented rationale (user must provide written justification) +- **Skip Phase 6 entirely** — do not tag, deploy to staging, deploy to production, or spawn community-manager. +- Produce a partial report summarizing Phases 1–5 and what was skipped (Phase 6) and why. +- Verdict: **BLOCKED** — release not deployed. + +### Phase 6: Deployment (if GO) +Delegate to **release-manager** + **devops-engineer**: +- Tag the release in version control +- Generate changelog using `/changelog` +- Deploy to staging for final smoke test +- Deploy to production +- Monitor for 48 hours post-release + +Delegate to **community-manager** (in parallel with deployment): +- Finalize patch notes using `/patch-notes [version]` +- Prepare launch announcement (store page updates, social media, community post) +- Draft known issues post if any S3+ issues shipped +- Output: all player-facing release communication, ready to publish on deploy confirmation + +### Phase 7: Post-Release +- **release-manager**: Generate release report (what shipped, what was deferred, metrics) +- **producer**: Update milestone tracking, communicate to stakeholders +- **qa-lead**: Monitor incoming bug reports for regressions +- **community-manager**: Publish all player-facing communication, monitor community sentiment +- **analytics-engineer**: Confirm live dashboards are healthy; alert if any critical events are missing +- Schedule post-release retrospective if issues occurred + +## Error Recovery Protocol + +If any spawned agent (via Task) returns BLOCKED, errors, or cannot complete: + +1. **Surface immediately**: Report "[AgentName]: BLOCKED — [reason]" to the user before continuing to dependent phases +2. **Assess dependencies**: Check whether the blocked agent's output is required by subsequent phases. If yes, do not proceed past that dependency point without user input. +3. **Offer options** via AskUserQuestion with choices: + - Skip this agent and note the gap in the final report + - Retry with narrower scope + - Stop here and resolve the blocker first +4. **Always produce a partial report** — output whatever was completed. Never discard work because one agent blocked. + +Common blockers: +- Input file missing (story not found, GDD absent) → redirect to the skill that creates it +- ADR status is Proposed → do not implement; run `/architecture-decision` first +- Scope too large → split into two stories via `/create-stories` +- Conflicting instructions between ADR and story → surface the conflict, do not guess + +## File Write Protocol + +All file writes (release checklists, changelogs, patch notes, deployment scripts) are +delegated to sub-agents and sub-skills. Each enforces the "May I write to [path]?" +protocol. This orchestrator does not write files directly. + +## Output + +A summary report covering: release version, scope, quality gate results, go/no-go decision, deployment status, and monitoring plan. + +Verdict: **COMPLETE** — release executed and deployed. +Verdict: **BLOCKED** — release halted; go/no-go was NO or a hard blocker is unresolved. + +## Next Steps + +- Monitor post-release dashboards for 48 hours. +- Run `/retrospective` if significant issues occurred during the release. +- Update `production/stage.txt` to `Live` after successful deployment. diff --git a/.omc/skills/team-ui/SKILL.md b/.omc/skills/team-ui/SKILL.md new file mode 100644 index 0000000..7f68b0f --- /dev/null +++ b/.omc/skills/team-ui/SKILL.md @@ -0,0 +1,170 @@ +--- +name: team-ui +description: "Orchestrate the UI team through the full UX pipeline: from UX spec authoring through visual design, implementation, review, and polish. Integrates with /ux-design, /ux-review, and studio UX templates." +argument-hint: "[UI feature description]" +user-invocable: true +allowed-tools: Read, Glob, Grep, Write, Edit, Bash, Task, AskUserQuestion, TodoWrite +--- +When this skill is invoked, orchestrate the UI team through a structured pipeline. + +**Decision Points:** At each phase transition, use `AskUserQuestion` to present +the user with the subagent's proposals as selectable options. Write the agent's +full analysis in conversation, then capture the decision with concise labels. +The user must approve before moving to the next phase. + +## Team Composition +- **ux-designer** — User flows, wireframes, accessibility, input handling +- **ui-programmer** — UI framework, screens, widgets, data binding, implementation +- **art-director** — Visual style, layout polish, consistency with art bible +- **engine UI specialist** — Validates UI implementation patterns against engine-specific best practices (read from `.claude/docs/technical-preferences.md` Engine Specialists → UI Specialist) +- **accessibility-specialist** — Audits accessibility compliance at Phase 4 + +**Templates used by this pipeline:** +- `ux-spec.md` — Standard screen/flow UX specification +- `hud-design.md` — HUD-specific UX specification +- `interaction-pattern-library.md` — Reusable interaction patterns +- `accessibility-requirements.md` — Committed accessibility tier and requirements + +## How to Delegate + +Use the Task tool to spawn each team member as a subagent: +- `subagent_type: ux-designer` — User flows, wireframes, accessibility, input handling +- `subagent_type: ui-programmer` — UI framework, screens, widgets, data binding +- `subagent_type: art-director` — Visual style, layout polish, art bible consistency +- `subagent_type: [UI engine specialist]` — Engine-specific UI pattern validation (e.g., unity-ui-specialist, ue-umg-specialist, godot-specialist) +- `subagent_type: accessibility-specialist` — Accessibility compliance audit + +Always provide full context in each agent's prompt (feature requirements, existing UI patterns, platform targets). Launch independent agents in parallel where the pipeline allows it (e.g., Phase 4 review agents can run simultaneously). + +## Pipeline + +### Phase 1a: Context Gathering + +Before designing anything, read and synthesize: +- `design/gdd/game-concept.md` — platform targets and intended audience +- `design/player-journey.md` — player's state and context when they reach this screen +- All GDD UI Requirements sections relevant to this feature +- `design/ux/interaction-patterns.md` — existing patterns to reuse (not reinvent) +- `design/accessibility-requirements.md` — committed accessibility tier (e.g., Basic, Enhanced, Full) + +**If `design/ux/interaction-patterns.md` does not exist**, surface the gap immediately: +> "interaction-patterns.md does not exist — no existing patterns to reuse." + +Then use `AskUserQuestion` with options: +- (a) Run `/ux-design patterns` first to establish the pattern library, then continue +- (b) Proceed without the pattern library — ui-programmer will treat all patterns created as new and add each to a new `design/ux/interaction-patterns.md` at completion + +Do NOT invent or assume patterns from the feature name or GDD alone. If the user chooses (b), explicitly instruct ui-programmer in Phase 3 to treat all patterns as new and document them in `design/ux/interaction-patterns.md` when implementation is complete. Note the pattern library status (created / absent / updated) in the final summary report. + +Summarize the context in a brief for the ux-designer: what the player is doing, what they need, what constraints apply, and which existing patterns are relevant. + +### Phase 1b: UX Spec Authoring + +Invoke `/ux-design [feature name]` skill OR delegate directly to ux-designer to produce `design/ux/[feature-name].md` following the `ux-spec.md` template. + +If designing the HUD, use the `hud-design.md` template instead of `ux-spec.md`. + +> **Notes on special cases:** +> - For HUD design specifically, invoke `/ux-design` with `argument: hud` (e.g., `/ux-design hud`). +> - For the interaction pattern library, run `/ux-design patterns` once at project start and update it whenever new patterns are introduced during later phases. + +Output: `design/ux/[feature-name].md` with all required spec sections filled. + +### Phase 1c: UX Review + +After the spec is complete, invoke `/ux-review design/ux/[feature-name].md`. + +**Gate**: Do not proceed to Phase 2 until the verdict is APPROVED. If the verdict is NEEDS REVISION, the ux-designer must address the flagged issues and re-run the review. The user may explicitly accept a NEEDS REVISION risk and proceed, but this must be a conscious decision — present the specific concerns via `AskUserQuestion` before asking whether to proceed. + +### Phase 2: Visual Design + +Delegate to **art-director**: +- Review the full UX spec (flows, wireframes, interaction patterns, accessibility notes) — not just the wireframe images +- Apply visual treatment from the art bible: colors, typography, spacing, animation style +- Check that visual design preserves accessibility compliance: verify color contrast ratios, and confirm color is never the only indicator of state (shape, text, or icon must reinforce it) +- Specify all asset requirements needed from the art pipeline: icons at specified sizes, background textures, fonts, decorative elements — with precise dimensions and format requirements +- Ensure consistency with existing implemented UI screens +- Output: visual design spec with style notes and asset manifest + +### Phase 3: Implementation + +Before implementation begins, spawn the **engine UI specialist** (from `.claude/docs/technical-preferences.md` Engine Specialists → UI Specialist) to review the UX spec and visual design spec for engine-specific implementation guidance: +- Which engine UI framework should be used for this screen? (e.g., UI Toolkit vs UGUI in Unity, Control nodes vs CanvasLayer in Godot, UMG vs CommonUI in Unreal) +- Any engine-specific gotchas for the proposed layout or interaction patterns? +- Recommended widget/node structure for the engine? +- Output: engine UI implementation notes to hand off to ui-programmer before they begin + +If no engine is configured, skip this step. + +Delegate to **ui-programmer**: +- Implement the UI following the UX spec and visual design spec +- **Use patterns from `design/ux/interaction-patterns.md`** — do not reinvent patterns that are already specified. If a pattern almost fits but needs modification, note the deviation and flag it for ux-designer review. +- **UI NEVER owns or modifies game state** — display only; emit events for all player actions +- All text through the localization system — no hardcoded player-facing strings +- Support both input methods (keyboard/mouse AND gamepad) +- Implement accessibility features per the committed tier in `design/accessibility-requirements.md` +- Wire up data binding to game state +- **If any new interaction pattern is created during implementation** (i.e., something not already in the pattern library), add it to `design/ux/interaction-patterns.md` before marking implementation complete +- Output: implemented UI feature + +### Phase 4: Review (parallel) + +Delegate in parallel: +- **ux-designer**: Verify implementation matches wireframes and interaction spec. Test keyboard-only and gamepad-only navigation. Check accessibility features function correctly. +- **art-director**: Verify visual consistency with art bible. Check at minimum and maximum supported resolutions. +- **accessibility-specialist**: Verify compliance against the committed accessibility tier documented in `design/accessibility-requirements.md`. Flag any violations as blockers. + +All three review streams must report before proceeding to Phase 5. + +### Phase 5: Polish + +- Address all review feedback +- Verify animations are skippable and respect the player's motion reduction preferences +- Confirm UI sounds trigger through the audio event system (no direct audio calls) +- Test at all supported resolutions and aspect ratios +- **Verify `design/ux/interaction-patterns.md` is up to date** — if any new patterns were introduced during this feature's implementation, confirm they have been added to the library +- **Confirm all HUD elements respect the visual budget** defined in `design/ux/hud.md` (element count, screen region allocations, maximum opacity values) + +## Quick Reference — When to Use Which Skill + +- `/ux-design` — Author a new UX spec for a screen, flow, or HUD from scratch +- `/ux-review` — Validate a completed UX spec before implementation +- `/team-ui [feature]` — Full pipeline from concept through polish (calls `/ux-design` and `/ux-review` internally) +- `/quick-design` — Small UI changes that don't need a full new UX spec + +## Error Recovery Protocol + +If any spawned agent (via Task) returns BLOCKED, errors, or cannot complete: + +1. **Surface immediately**: Report "[AgentName]: BLOCKED — [reason]" to the user before continuing to dependent phases +2. **Assess dependencies**: Check whether the blocked agent's output is required by subsequent phases. If yes, do not proceed past that dependency point without user input. +3. **Offer options** via AskUserQuestion with choices: + - Skip this agent and note the gap in the final report + - Retry with narrower scope + - Stop here and resolve the blocker first +4. **Always produce a partial report** — output whatever was completed. Never discard work because one agent blocked. + +Common blockers: +- Input file missing (story not found, GDD absent) → redirect to the skill that creates it +- ADR status is Proposed → do not implement; run `/architecture-decision` first +- Scope too large → split into two stories via `/create-stories` +- Conflicting instructions between ADR and story → surface the conflict, do not guess + +## File Write Protocol + +All file writes (UX specs, interaction pattern library updates, implementation files) are +delegated to sub-agents and sub-skills (`/ux-design`, `ui-programmer`). Each enforces the +"May I write to [path]?" protocol. This orchestrator does not write files directly. + +## Output + +A summary report covering: UX spec status, UX review verdict, visual design status, implementation status, accessibility compliance, input method support, interaction pattern library update status, and any outstanding issues. + +Verdict: **COMPLETE** — UI feature delivered through full pipeline (UX spec → visual → implementation → review → polish). +Verdict: **BLOCKED** — pipeline halted; surface the blocker and its phase before stopping. + +## Next Steps + +- Run `/ux-review` on the final spec if not yet approved. +- Run `/code-review` on the UI implementation before closing stories. +- Run `/team-polish` if visual or audio polish pass is needed. diff --git a/.omc/skills/tech-debt/SKILL.md b/.omc/skills/tech-debt/SKILL.md new file mode 100644 index 0000000..1195626 --- /dev/null +++ b/.omc/skills/tech-debt/SKILL.md @@ -0,0 +1,121 @@ +--- +name: tech-debt +description: "Track, categorize, and prioritize technical debt across the codebase. Scans for debt indicators, maintains a debt register, and recommends repayment scheduling." +argument-hint: "[scan|add|prioritize|report]" +user-invocable: true +allowed-tools: Read, Glob, Grep, Write +--- + +## Phase 1: Parse Subcommand + +Determine the mode from the argument: + +- `scan` — Scan the codebase for tech debt indicators +- `add` — Add a new tech debt entry manually +- `prioritize` — Re-prioritize the existing debt register +- `report` — Generate a summary report of current debt status + +If no subcommand is provided, output usage and stop. Verdict: **FAIL** — missing required subcommand. + +--- + +## Phase 2A: Scan Mode + +Search the codebase for debt indicators: + +- `TODO` comments (count and categorize) +- `FIXME` comments (these are bugs disguised as debt) +- `HACK` comments (workarounds that need proper solutions) +- `@deprecated` markers +- Duplicated code blocks (similar patterns in multiple files) +- Files over 500 lines (potential god objects) +- Functions over 50 lines (potential complexity) + +Categorize each finding: + +- **Architecture Debt**: Wrong abstractions, missing patterns, coupling issues +- **Code Quality Debt**: Duplication, complexity, naming, missing types +- **Test Debt**: Missing tests, flaky tests, untested edge cases +- **Documentation Debt**: Missing docs, outdated docs, undocumented APIs +- **Dependency Debt**: Outdated packages, deprecated APIs, version conflicts +- **Performance Debt**: Known slow paths, unoptimized queries, memory issues + +Present the findings to the user. + +Ask: "May I write these findings to `docs/tech-debt-register.md`?" + +If yes, update the register (append new entries, do not overwrite existing ones). Verdict: **COMPLETE** — scan findings written to register. + +If no, stop here. Verdict: **BLOCKED** — user declined write. + +--- + +## Phase 2B: Add Mode + +Prompt for: description, category, affected files, estimated fix effort, impact if left unfixed. + +Present the new entry to the user. + +Ask: "May I append this entry to `docs/tech-debt-register.md`?" + +If yes, append the entry. Verdict: **COMPLETE** — entry added to register. + +If no, stop here. Verdict: **BLOCKED** — user declined write. + +--- + +## Phase 2C: Prioritize Mode + +Read the debt register at `docs/tech-debt-register.md`. + +Score each item by: `(impact_if_unfixed × frequency_of_encounter) / fix_effort` + +Re-sort the register by priority score and recommend which items to include in the next sprint. + +Present the re-prioritized register to the user. + +Ask: "May I write the re-prioritized register back to `docs/tech-debt-register.md`?" + +If yes, write the updated file. Verdict: **COMPLETE** — register re-prioritized and saved. + +If no, stop here. Verdict: **BLOCKED** — user declined write. + +--- + +## Phase 2D: Report Mode + +Read the debt register. Generate summary statistics: + +- Total items by category +- Total estimated fix effort +- Items added vs resolved since last report +- Trending direction (growing / stable / shrinking) + +Flag any items that have been in the register for more than 3 sprints. + +Output the report to the user. This mode is read-only — no files are written. Verdict: **COMPLETE** — debt report generated. + +--- + +## Phase 3: Next Steps + +- Run `/sprint-plan` to schedule high-priority debt items into the next sprint. +- Run `/tech-debt report` at the start of each sprint to track debt trends over time. + +### Debt Register Format + +```markdown +## Technical Debt Register +Last updated: [Date] +Total items: [N] | Estimated total effort: [T-shirt sizes summed] + +| ID | Category | Description | Files | Effort | Impact | Priority | Added | Sprint | +|----|----------|-------------|-------|--------|--------|----------|-------|--------| +| TD-001 | [Cat] | [Description] | [files] | [S/M/L/XL] | [Low/Med/High/Critical] | [Score] | [Date] | [Sprint to fix or "Backlog"] | +``` + +### Rules +- Tech debt is not inherently bad — it is a tool. The register tracks conscious decisions. +- Every debt entry must explain WHY it was accepted (deadline, prototype, missing info) +- "Scan" should run at least once per sprint to catch new debt +- Items older than 3 sprints without action should either be fixed or consciously accepted with a documented reason diff --git a/.omc/skills/test-evidence-review/SKILL.md b/.omc/skills/test-evidence-review/SKILL.md new file mode 100644 index 0000000..afa7dff --- /dev/null +++ b/.omc/skills/test-evidence-review/SKILL.md @@ -0,0 +1,250 @@ +--- +name: test-evidence-review +description: "Quality review of test files and manual evidence documents. Goes beyond existence checks — evaluates assertion coverage, edge case handling, naming conventions, and evidence completeness. Produces ADEQUATE/INCOMPLETE/MISSING verdict per story. Run before QA sign-off or on demand." +argument-hint: "[story-path | sprint | system-name]" +user-invocable: true +allowed-tools: Read, Glob, Grep, Write +--- + +# Test Evidence Review + +`/smoke-check` verifies that test files **exist** and **pass**. This skill +goes further — it reviews the **quality** of those tests and evidence documents. +A test file that exists and passes may still leave critical behaviour uncovered. +A manual evidence doc that exists may lack the sign-offs required for closure. + +**Output:** Summary report (in conversation) + optional `production/qa/evidence-review-[date].md` + +**When to run:** +- Before QA hand-off sign-off (`/team-qa` Phase 5) +- On any story where test quality is in question +- As part of milestone review for Logic and Integration story quality audit + +--- + +## 1. Parse Arguments + +**Modes:** +- `/test-evidence-review [story-path]` — review a single story's evidence +- `/test-evidence-review sprint` — review all stories in the current sprint +- `/test-evidence-review [system-name]` — review all stories in an epic/system +- No argument — ask which scope: "Single story", "Current sprint", "A system" + +--- + +## 2. Load Stories in Scope + +Based on the argument: + +**Single story**: Read the story file directly. Extract: Story Type, Test +Evidence section, story slug, system name. + +**Sprint**: Read the most recently modified file in `production/sprints/`. +Extract the list of story file paths from the sprint plan. Read each story file. + +**System**: Glob `production/epics/[system-name]/story-*.md`. Read each. + +For each story, collect: +- `Type:` field (Logic / Integration / Visual/Feel / UI / Config/Data) +- `## Test Evidence` section — the stated expected test file path or evidence doc +- Story slug (from file name) +- System name (from directory path) +- Acceptance Criteria list (all checkbox items) + +--- + +## 3. Locate Evidence Files + +For each story, find the evidence: + +**Logic stories**: Glob `tests/unit/[system]/[story-slug]_test.*` + - If not found, also try: Grep in `tests/unit/[system]/` for files + containing the story slug + +**Integration stories**: Glob `tests/integration/[system]/[story-slug]_test.*` + - Also check `production/session-logs/` for playtest records mentioning the story + +**Visual/Feel and UI stories**: Glob `production/qa/evidence/[story-slug]-evidence.*` + +**Config/Data stories**: Glob `production/qa/smoke-*.md` (any smoke check report) + +Note what was found (path) or not found (gap) for each story. + +--- + +## 4. Review Automated Test Quality (Logic / Integration) + +For each test file found, read it and evaluate: + +### Assertion coverage + +Count the number of distinct assertions (lines containing assert, expect, +check, verify, or engine-specific assertion patterns). Low assertion count is +a quality signal — a test that makes only 1 assertion per test function may +not cover the range of expected behaviour. + +Thresholds: +- **3+ assertions per test function** → normal +- **1-2 assertions per test function** → note as potentially thin +- **0 assertions** (test exists but no asserts) → flag as BLOCKING — the + test passes vacuously and proves nothing + +### Edge case coverage + +For each acceptance criterion in the story that contains a number, threshold, +or "when X happens" conditional: check whether a test function name or +test body references that specific case. + +Heuristics: +- Grep test file for "zero", "max", "null", "empty", "min", "invalid", + "boundary", "edge" — presence of any is a positive signal +- If the story has a Formulas section with specific bounds: check whether + tests exercise at minimum/maximum values + +### Naming quality + +Test function names should describe: the scenario + the expected result. +Pattern: `test_[scenario]_[expected_outcome]` + +Flag functions named generically (`test_1`, `test_run`, `testBasic`) as +**naming issues** — they make failures harder to diagnose. + +### Formula traceability + +For Logic stories where the GDD has a Formulas section: check that the test +file contains at least one test whose name or comment references the formula +name or a formula value. A test that exercises a formula without mentioning +it by name is harder to maintain when the formula changes. + +--- + +## 5. Review Manual Evidence Quality (Visual/Feel / UI) + +For each evidence document found, read it and evaluate: + +### Criterion linkage + +The evidence doc should reference each acceptance criterion from the story. +Check: does the evidence doc contain each criterion (or a clear rephrasing)? +Missing criteria mean a criterion was never verified. + +### Sign-off completeness + +Check for three sign-off lines (or equivalent fields): +- Developer sign-off +- Designer / art-lead sign-off (for Visual/Feel) +- QA lead sign-off + +If any are missing or blank: flag as INCOMPLETE — the story cannot be fully +closed without all required sign-offs. + +### Screenshot / artefact completeness + +For Visual/Feel stories: check whether screenshot file paths are referenced +in the evidence doc. If referenced, Glob for them to confirm they exist. + +For UI stories: check whether a walkthrough sequence (step-by-step interaction +log) is present. + +### Date coverage + +Evidence doc should have a date. If the date is earlier than the story's +last major change (heuristic: compare against sprint start date from the sprint +plan), flag as POTENTIALLY STALE — the evidence may not cover the final +implementation. + +--- + +## 6. Build the Review Report + +For each story, assign a verdict: + +| Verdict | Meaning | +|---------|---------| +| **ADEQUATE** | Test/evidence exists, passes quality checks, all criteria covered | +| **INCOMPLETE** | Test/evidence exists but has quality gaps (thin assertions, missing sign-offs) | +| **MISSING** | No test or evidence found for a story type that requires it | + +The overall sprint/system verdict is the worst story verdict present. + +```markdown +## Test Evidence Review + +> **Date**: [date] +> **Scope**: [single story path | Sprint [N] | [system name]] +> **Stories reviewed**: [N] +> **Overall verdict**: ADEQUATE / INCOMPLETE / MISSING + +--- + +### Story-by-Story Results + +#### [Story Title] — [Type] — [ADEQUATE/INCOMPLETE/MISSING] + +**Test/evidence path**: `[path]` (found) / (not found) + +**Automated test quality** *(Logic/Integration only)*: +- Assertion coverage: [N per function on average] — [adequate / thin / none] +- Edge cases: [covered / partial / not found] +- Naming: [consistent / [N] generic names flagged] +- Formula traceability: [yes / no — formula names not referenced in tests] + +**Manual evidence quality** *(Visual/Feel/UI only)*: +- Criterion linkage: [N/M criteria referenced] +- Sign-offs: [Developer ✓ | Designer ✗ | QA Lead ✗] +- Artefacts: [screenshots present / missing / N/A] +- Freshness: [dated [date] — current / potentially stale] + +**Issues**: +- BLOCKING: [description] *(prevents story-done)* +- ADVISORY: [description] *(should fix before release)* + +--- + +### Summary + +| Story | Type | Verdict | Issues | +|-------|------|---------|--------| +| [title] | Logic | ADEQUATE | None | +| [title] | Integration | INCOMPLETE | Thin assertions (avg 1.2/function) | +| [title] | Visual/Feel | INCOMPLETE | QA lead sign-off missing | +| [title] | Logic | MISSING | No test file found | + +**BLOCKING items** (must resolve before story can be closed): [N] +**ADVISORY items** (should address before release): [N] +``` + +--- + +## 7. Write Output (Optional) + +Present the report in conversation. + +Ask: "May I write this test evidence review to +`production/qa/evidence-review-[date].md`?" + +This is optional — the report is useful standalone. Write only if the user +wants a persistent record. + +After the report: + +- For BLOCKING items: "These must be resolved before `/story-done` can mark the + story Complete. Would you like to address any of them now?" +- For thin assertions: "Consider running `/test-helpers [system]` to see + scaffolded assertion patterns for common cases." +- For missing sign-offs: "Manual sign-off is required from [role]. Share + `[evidence-path]` with them to complete sign-off." + +Verdict: **COMPLETE** — evidence review finished. Use CONCERNS if BLOCKING items were found. + +--- + +## Collaborative Protocol + +- **Report quality issues, do not fix them** — this skill reads and evaluates; + it does not modify test files or evidence documents +- **ADEQUATE means adequate for shipping, not perfect** — avoid nitpicking + tests that are functioning and comprehensive enough to give confidence +- **BLOCKING vs. ADVISORY distinction is important** — only flag BLOCKING when + the gap leaves a story criterion genuinely unverified +- **Ask before writing** — the report file is optional; always confirm before writing diff --git a/.omc/skills/test-flakiness/SKILL.md b/.omc/skills/test-flakiness/SKILL.md new file mode 100644 index 0000000..c2427af --- /dev/null +++ b/.omc/skills/test-flakiness/SKILL.md @@ -0,0 +1,210 @@ +--- +name: test-flakiness +description: "Detect non-deterministic (flaky) tests by reading CI run logs or test result history. Aggregates pass rates per test, identifies intermittent failures, recommends quarantine or fix, and maintains a flaky test registry. Best run during Polish phase or after multiple CI runs." +argument-hint: "[ci-log-path | scan | registry]" +user-invocable: true +allowed-tools: Read, Glob, Grep, Write, Edit, Bash +--- + +# Test Flakiness Detection + +A flaky test is one that sometimes passes and sometimes fails without any code +change. Flaky tests are worse than no tests in some ways — they train the team +to ignore red CI runs, masking genuine failures. This skill identifies them, +explains likely causes, and recommends whether to quarantine or fix each one. + +**Output:** Updated `tests/regression-suite.md` quarantine section + optional +`production/qa/flakiness-report-[date].md` + +**When to run:** +- Polish phase (tests have had many runs; statistical signal is reliable) +- When developers start dismissing CI failures as "probably flaky" +- After `/regression-suite` identifies quarantined tests that need diagnosis + +--- + +## 1. Parse Arguments + +**Modes:** +- `/test-flakiness [ci-log-path]` — analyse a specific CI run log file +- `/test-flakiness scan` — scan all available CI logs in `.github/` or + standard log output directories +- `/test-flakiness registry` — read existing regression-suite.md quarantine + section and provide remediation guidance for already-known flaky tests +- No argument — auto-detect: run `scan` if CI logs are accessible, else + `registry` + +--- + +## 2. Locate CI Log Data + +### Option A — GitHub Actions (preferred) + +Check for test result artifacts: +```bash +ls -t .github/ 2>/dev/null +ls -t test-results/ 2>/dev/null +``` + +For Godot projects: GdUnit4 outputs XML results compatible with JUnit format. +Check `test-results/` for `.xml` files. + +For Unity projects: game-ci test runner outputs NUnit XML to `test-results/` +by default. + +For Unreal projects: automation logs go to `Saved/Logs/`. Grep for +`Result: Success` and `Result: Fail` patterns. + +### Option B — Local log files + +If a path argument is provided, read that file directly. + +### Option C — No log data available + +If no logs found: +> "No CI log data found. To detect flaky tests, this skill needs test result +> history from multiple runs. Options: +> 1. Run the test suite at least 3 times and collect the output logs +> 2. Check CI pipeline output and save a log to `test-results/` +> 3. Run `/test-flakiness registry` to review tests already flagged as flaky +> in `tests/regression-suite.md`" + +Stop and ask the user which option to pursue. + +--- + +## 3. Parse Test Results + +For each CI log or result file found, parse: + +**JUnit XML format** (GdUnit4 / Unity): +- Grep for `25% of runs — quarantine immediately +- **Moderate flakiness**: Fails in 5–25% of runs — investigate and fix soon +- **Low/suspected flakiness**: Fails in 1–5% of runs — monitor; may be + genuinely rare failure + +For each flaky test, classify the likely cause: + +### Cause classification + +| Cause | Symptoms | Fix direction | +|-------|----------|---------------| +| **Timing / async** | Fails after awaiting signals or timers; pass rate correlates with system load | Add explicit await/synchronisation; avoid time-based delays | +| **Order dependency** | Fails when run after specific other tests; passes in isolation | Add proper setup/teardown; ensure test isolation | +| **Random seed** | Fails intermittently with no pattern; involves RNG | Pass explicit seed; don't use `randf()` in tests | +| **Resource leak** | Fails more often later in a test run | Fix cleanup in teardown; check orphan nodes (Godot) or object disposal (Unity) | +| **External state** | Fails when a file, scene, or global exists from a prior test | Isolate test from file system; use in-memory mocks | +| **Floating point** | Fails on comparisons like `== 0.5` | Use epsilon comparison (`is_equal_approx`, `Assert.AreApproximately`) | +| **Scene/prefab load race** | Fails when scenes are not yet ready | Await one frame after instantiation; use `await get_tree().process_frame` | + +Use Grep to check the test file for timing calls, randf, global state access, +or equality comparisons on floats to narrow down the cause. + +--- + +## 5. Recommend Action + +For each flaky test: + +**Quarantine (High flakiness):** +> "Quarantine this test immediately. Disable it in CI by adding +> `@pytest.mark.skip` / `[Ignore]` / `GdUnitSkip` annotation. Log it in +> `tests/regression-suite.md` quarantine section. The test is now opt-in only. +> Fix the root cause before removing quarantine." + +**Investigate and fix soon (Moderate):** +> "This test is intermittently unreliable. Root cause appears to be [cause]. +> Suggested fix: [specific fix based on cause classification]. Do not quarantine +> yet — fix the test directly." + +**Monitor (Low/suspected):** +> "This test shows suspected flakiness. Collect more run data before +> quarantining. Note it as 'suspected' in the regression suite." + +--- + +## 6. Generate Reports + +### In-conversation summary + +``` +## Flakiness Detection Results + +**Runs analysed**: [N] +**Tests tracked**: [N] + +### Flaky Tests Found + +| Test | System | Fail Rate | Likely Cause | Recommendation | +|------|--------|-----------|--------------|----------------| +| [test_name] | [system] | [N]% | Timing | Quarantine + fix async | +| [test_name] | [system] | [N]% | Float comparison | Fix: use epsilon compare | +| [test_name] | [system] | [N]% | Order dependency | Investigate teardown | + +### Clean Tests (no flakiness detected) + +[N] tests ran across [N] runs with consistent results — no flakiness detected. + +### Data Limitations + +[Note if fewer than 5 runs were available — fewer runs = less statistical confidence] +``` + +--- + +## 7. Update Regression Suite + Optional Report File + +Ask: "May I update the quarantine section of `tests/regression-suite.md` +with the flaky tests found?" + +If yes: use `Edit` to append entries to the Quarantined Tests table. +Never remove existing quarantine entries — only add new ones. + +Ask (separately): "May I write a full flakiness report to +`production/qa/flakiness-report-[date].md`?" + +The full report includes per-test analysis with cause details and +engine-specific fix snippets. + +After writing: + +- For each quarantined test: "Add the engine-specific skip annotation to + disable this test in CI. Re-enable after the root cause is fixed." +- For fix-eligible tests: "The fix for [test] is straightforward — + change the equality comparison on line [N] to use `is_equal_approx`." +- Summary: "Once all quarantine annotations are applied, CI should run green. + Schedule fix work for the [N] quarantined tests before the release gate." + +--- + +## Collaborative Protocol + +- **Never delete test files** — quarantine means annotate + list, not remove +- **Statistical confidence matters** — with < 3 runs, flag findings as + "suspected" not "confirmed"; ask if more run data is available +- **Fix is always the goal** — quarantine is temporary; surface the fix + direction even when recommending quarantine +- **Ask before writing** — both the regression-suite update and the report + file require explicit approval. On write: Verdict: **COMPLETE** — flakiness report written. On decline: Verdict: **BLOCKED** — user declined write. +- **Flakiness in CI is a team problem** — surface the list and recommended + actions clearly; do not just silently quarantine without the team knowing diff --git a/.omc/skills/test-helpers/SKILL.md b/.omc/skills/test-helpers/SKILL.md new file mode 100644 index 0000000..a7e10b1 --- /dev/null +++ b/.omc/skills/test-helpers/SKILL.md @@ -0,0 +1,394 @@ +--- +name: test-helpers +description: "Generate engine-specific test helper libraries for the project's test suite. Reads existing test patterns and produces tests/helpers/ with assertion utilities, factory functions, and mock objects tailored to the project's systems. Reduces boilerplate in new test files." +argument-hint: "[system-name | all | scaffold]" +user-invocable: true +allowed-tools: Read, Glob, Grep, Write +--- + +# Test Helpers + +Writing test cases is faster and more consistent when common setup, teardown, +and assertion patterns are abstracted into helpers. This skill generates a +`tests/helpers/` library tailored to the project's actual engine, language, +and systems — so every developer writes less boilerplate and more assertions. + +**Output:** `tests/helpers/` directory with engine-specific helper files + +**When to run:** +- After `/test-setup` scaffolds the framework (first time) +- When multiple test files repeat the same setup boilerplate +- When starting to write tests for a new system + +--- + +## 1. Parse Arguments + +**Modes:** +- `/test-helpers [system-name]` — generate helpers for a specific system + (e.g., `/test-helpers combat`) +- `/test-helpers all` — generate helpers for all systems with test files +- `/test-helpers scaffold` — generate only the base helper library (no + system-specific helpers); use this on first run +- No argument — run `scaffold` if no helpers exist, else `all` + +--- + +## 2. Detect Engine and Language + +Read `.claude/docs/technical-preferences.md` and extract: +- `Engine:` value +- `Language:` value +- `Framework:` from the Testing section + +If engine is not configured: "Engine not configured. Run `/setup-engine` first." + +--- + +## 3. Load Existing Test Patterns + +Scan the test directory for patterns already in use: + +``` +Glob pattern="tests/**/*_test.*" (all test files) +``` + +For a representative sample (up to 5 files), read the test files and extract: +- Setup patterns (how `before_each` / `setUp` / fixtures are written) +- Common assertion patterns (what is being asserted most often) +- Object creation patterns (how game objects or scenes are instantiated in tests) +- Mock/stub patterns (how dependencies are replaced) + +This ensures generated helpers match the project's existing style, not a +generic template. + +Also read: +- `design/gdd/systems-index.md` — to know which systems exist +- In-scope GDD(s) — to understand what data types and values need testing +- `docs/architecture/tr-registry.yaml` — to map requirements to tested systems + +--- + +## 4. Generate Engine-Specific Helpers + +### Godot 4 (GDUnit4 / GDScript) + +**Base helper** (`tests/helpers/game_assertions.gd`): + +```gdscript +## Game-specific assertion utilities for [Project Name] tests. +## Extends GdUnitAssertions with domain-specific helpers. +## +## Usage: +## var assert = GameAssertions.new() +## assert.health_in_range(entity, 0, entity.max_health) + +class_name GameAssertions +extends RefCounted + +## Assert a value is within the inclusive range [min_val, max_val]. +## Use for any formula output that has defined bounds in a GDD. +static func assert_in_range( + value: float, + min_val: float, + max_val: float, + label: String = "value" +) -> void: + assert( + value >= min_val and value <= max_val, + "%s %.2f is outside expected range [%.2f, %.2f]" % [label, value, min_val, max_val] + ) + +## Assert a signal was emitted during a callable block. +## Usage: assert_signal_emitted(entity, "health_changed", func(): entity.take_damage(10)) +static func assert_signal_emitted( + obj: Object, + signal_name: String, + action: Callable +) -> void: + var emitted := false + obj.connect(signal_name, func(_args): emitted = true) + action.call() + assert(emitted, "Expected signal '%s' to be emitted, but it was not." % signal_name) + +## Assert that a callable does NOT emit a signal. +static func assert_signal_not_emitted( + obj: Object, + signal_name: String, + action: Callable +) -> void: + var emitted := false + obj.connect(signal_name, func(_args): emitted = true) + action.call() + assert(not emitted, "Expected signal '%s' NOT to be emitted, but it was." % signal_name) + +## Assert a node exists at path within a parent. +static func assert_node_exists(parent: Node, path: NodePath) -> void: + assert( + parent.has_node(path), + "Expected node at path '%s' to exist." % str(path) + ) +``` + +**Factory helper** (`tests/helpers/game_factory.gd`): + +```gdscript +## Factory functions for creating test game objects. +## Returns minimal objects configured for unit testing (no scene tree required). +## +## Usage: var player = GameFactory.make_player(health: 100) + +class_name GameFactory +extends RefCounted + +## Create a minimal player-like object for testing. +## Override fields as needed. +static func make_player(health: int = 100) -> Node: + var player = Node.new() + player.set_meta("health", health) + player.set_meta("max_health", health) + return player +``` + +**Scene helper** (`tests/helpers/scene_runner_helper.gd`): + +```gdscript +## Utilities for scene-based integration tests. +## Wraps GdUnitSceneRunner for common patterns. + +class_name SceneRunnerHelper +extends GdUnitTestSuite + +## Load a scene and wait one frame for _ready() to complete. +func load_scene_and_wait(scene_path: String) -> Node: + var scene = load(scene_path).instantiate() + add_child(scene) + await get_tree().process_frame + return scene +``` + +--- + +### Unity (NUnit / C#) + +**Base helper** (`tests/helpers/GameAssertions.cs`): + +```csharp +using NUnit.Framework; +using UnityEngine; + +/// +/// Game-specific assertion utilities for [Project Name] tests. +/// Extends NUnit's Assert with domain-specific helpers. +/// +public static class GameAssertions +{ + /// + /// Assert a value is within an inclusive range [min, max]. + /// Use for any formula output defined in GDD Formulas sections. + /// + public static void AssertInRange(float value, float min, float max, string label = "value") + { + Assert.That(value, Is.InRange(min, max), + $"{label} ({value:F2}) is outside expected range [{min:F2}, {max:F2}]"); + } + + /// Assert a UnityEvent or C# event was raised during an action. + public static void AssertEventRaised(ref bool wasCalled, System.Action action, string eventName) + { + wasCalled = false; + action(); + Assert.IsTrue(wasCalled, $"Expected event '{eventName}' to be raised, but it was not."); + } + + /// Assert a component exists on a GameObject. + public static void AssertHasComponent(GameObject obj) where T : Component + { + var component = obj.GetComponent(); + Assert.IsNotNull(component, + $"Expected GameObject '{obj.name}' to have component {typeof(T).Name}."); + } +} +``` + +**Factory helper** (`tests/helpers/GameFactory.cs`): + +```csharp +using UnityEngine; + +/// +/// Factory methods for creating minimal test objects without loading scenes. +/// +public static class GameFactory +{ + /// Create a minimal GameObject with a named component for testing. + public static GameObject MakeGameObject(string name = "TestObject") + { + var go = new GameObject(name); + return go; + } + + /// + /// Create a ScriptableObject of type T for data-driven tests. + /// Dispose with Object.DestroyImmediate after test. + /// + public static T MakeScriptableObject() where T : ScriptableObject + { + return ScriptableObject.CreateInstance(); + } +} +``` + +--- + +### Unreal Engine (C++) + +**Base helper** (`tests/helpers/GameTestHelpers.h`): + +```cpp +#pragma once + +#include "CoreMinimal.h" +#include "Misc/AutomationTest.h" + +/** + * Game-specific assertion macros and helpers for [Project Name] automation tests. + * Include in any test file that needs domain-specific assertions. + * + * Usage: + * GAME_TEST_ASSERT_IN_RANGE(TestName, DamageValue, 10.0f, 50.0f, TEXT("Damage")); + */ + +// Assert a float value is within inclusive range [Min, Max] +#define GAME_TEST_ASSERT_IN_RANGE(TestName, Value, Min, Max, Label) \ + TestTrue( \ + FString::Printf(TEXT("%s (%.2f) in range [%.2f, %.2f]"), Label, Value, Min, Max), \ + (Value) >= (Min) && (Value) <= (Max) \ + ) + +// Assert a UObject pointer is valid (not null, not garbage collected) +#define GAME_TEST_ASSERT_VALID(TestName, Ptr, Label) \ + TestTrue( \ + FString::Printf(TEXT("%s is valid"), Label), \ + IsValid(Ptr) \ + ) + +// Assert an Actor is in the world (spawned successfully) +#define GAME_TEST_ASSERT_SPAWNED(TestName, ActorPtr, ClassName) \ + TestNotNull( \ + FString::Printf(TEXT("Spawned actor of class %s"), TEXT(#ClassName)), \ + ActorPtr \ + ) + +/** + * Helper to create a minimal test world. + * Remember to call World->DestroyWorld(false) in teardown. + */ +namespace GameTestHelpers +{ + inline UWorld* CreateTestWorld(const FString& WorldName = TEXT("TestWorld")) + { + UWorld* World = UWorld::CreateWorld(EWorldType::Game, false); + FWorldContext& WorldContext = GEngine->CreateNewWorldContext(EWorldType::Game); + WorldContext.SetCurrentWorld(World); + return World; + } +} +``` + +--- + +## 5. Generate System-Specific Helpers + +For `[system-name]` or `all` modes, generate a helper per system: + +Read the system's GDD to extract: +- Data types (entity types, component names) +- Formula variables and their bounds +- Common test scenarios mentioned in Edge Cases + +Generate `tests/helpers/[system]_factory.[ext]` with factory functions +specific to that system's objects. + +Example pattern for a `combat` system (Godot/GDScript): + +```gdscript +## Factory and assertion helpers for Combat system tests. +## Generated by /test-helpers combat on [date]. +## Based on: design/gdd/combat.md + +class_name CombatTestFactory +extends RefCounted + +const DAMAGE_MIN := 0 +const DAMAGE_MAX := 999 # From GDD: damage formula upper bound + +## Create a minimal attacker object for damage formula tests. +static func make_attacker(attack: float = 10.0, crit_chance: float = 0.0) -> Node: + var attacker = Node.new() + attacker.set_meta("attack", attack) + attacker.set_meta("crit_chance", crit_chance) + return attacker + +## Create a minimal target object for damage receive tests. +static func make_target(defense: float = 0.0, health: float = 100.0) -> Node: + var target = Node.new() + target.set_meta("defense", defense) + target.set_meta("health", health) + target.set_meta("max_health", health) + return target + +## Assert damage output is within GDD-specified bounds. +static func assert_damage_in_bounds(damage: float) -> void: + GameAssertions.assert_in_range(damage, DAMAGE_MIN, DAMAGE_MAX, "damage") +``` + +--- + +## 6. Write Output + +Present a summary of what will be created: + +``` +## Test Helpers to Create + +Base helpers (engine: [engine]): +- tests/helpers/game_assertions.[ext] +- tests/helpers/game_factory.[ext] +[engine-specific extras] + +System helpers ([mode]): +- tests/helpers/[system]_factory.[ext] ← from [system] GDD +``` + +Ask: "May I write these helper files to `tests/helpers/`?" + +**Never overwrite existing files.** If a file already exists, report: +"Skipping `[path]` — already exists. Remove the file manually if you want it +regenerated." + +After writing: Verdict: **COMPLETE** — helper files created. + +"Helper files created. To use them in a test: +- Godot: `class_name` is auto-imported — no explicit import needed +- Unity: Add `using` directive or reference the test assembly +- Unreal: `#include \"tests/helpers/GameTestHelpers.h\"`" + +--- + +## Collaborative Protocol + +- **Never overwrite existing helpers** — they may contain hand-written + customisations. Only generate new files that don't exist yet +- **Generated code is a starting point** — the generated factory functions use + metadata patterns for simplicity; adapt to the actual class structure once + the code exists +- **Helpers should reflect the GDD** — bounds and constants in helpers should + trace to GDD Formulas sections, not invented values +- **Ask before writing** — always confirm before creating files in `tests/` + +## Next Steps + +- Run `/test-setup` if the test framework has not been scaffolded yet. +- Use `/dev-story` to implement stories — helpers reduce boilerplate in new test files. +- Run `/skill-test` to validate other skills that may need helper coverage. diff --git a/.omc/skills/test-setup/SKILL.md b/.omc/skills/test-setup/SKILL.md new file mode 100644 index 0000000..a1b193d --- /dev/null +++ b/.omc/skills/test-setup/SKILL.md @@ -0,0 +1,425 @@ +--- +name: test-setup +description: "Scaffold the test framework and CI/CD pipeline for the project's engine. Creates the tests/ directory structure, engine-specific test runner configuration, and GitHub Actions workflow. Run once during Technical Setup phase before the first sprint begins." +argument-hint: "[force]" +user-invocable: true +allowed-tools: Read, Glob, Grep, Bash, Write +--- + +# Test Setup + +This skill scaffolds the automated testing infrastructure for the project. +It detects the configured engine, generates the appropriate test runner +configuration, creates the standard directory layout, and wires up CI/CD +so tests run on every push. + +Run this once during the Technical Setup phase, before any implementation +begins. A test framework installed at sprint start costs 30 minutes. +A test framework installed at sprint four costs 3 sprints. + +**Output:** `tests/` directory structure + `.github/workflows/tests.yml` + +--- + +## Phase 1: Detect Engine and Existing State + +1. **Read engine config**: + - Read `.claude/docs/technical-preferences.md` and extract the `Engine:` value. + - If engine is not configured (`[TO BE CONFIGURED]`), stop: + "Engine not configured. Run `/setup-engine` first, then re-run `/test-setup`." + +2. **Check for existing test infrastructure**: + - Glob `tests/` — does the directory exist? + - Glob `tests/unit/` and `tests/integration/` — do subdirectories exist? + - Glob `.github/workflows/` — does a CI workflow file exist? + - Glob `tests/gdunit4_runner.gd` (Godot) or `tests/EditMode/` (Unity) or + `Source/Tests/` (Unreal) for engine-specific artifacts. + +3. **Report findings**: + - "Engine: [engine]. Test directory: [found / not found]. CI workflow: [found / not found]." + - If everything already exists AND `force` argument was not passed: + "Test infrastructure appears to be in place. Re-run with `/test-setup force` + to regenerate. Proceeding will not overwrite existing test files." + +If the `force` argument is passed, skip the "already exists" early-exit and +proceed — but still do not overwrite files that already exist at a given path. +Only create files that are missing. + +--- + +## Phase 2: Present Plan + +Based on the engine detected and the existing state, present a plan: + +``` +## Test Setup Plan — [Engine] + +I will create the following (skipping any that already exist): + +tests/ + unit/ — Isolated unit tests for formulas, state, and logic + integration/ — Cross-system tests and save/load round-trips + smoke/ — Critical path test list (15-minute manual gate) + evidence/ — Screenshot and manual test sign-off records + README.md — Test framework documentation + +[Engine-specific files — see per-engine details below] + +.github/workflows/tests.yml — CI: run tests on every push to main + +Estimated time: ~5 minutes to create all files. +``` + +Ask: "May I create these files? I will not overwrite any test files that +already exist at these paths." + +Do not proceed without approval. + +--- + +## Phase 3: Create Directory Structure + +After approval, create the following files: + +### `tests/README.md` + +```markdown +# Test Infrastructure + +**Engine**: [engine name + version] +**Test Framework**: [GdUnit4 | Unity Test Framework | UE Automation] +**CI**: `.github/workflows/tests.yml` +**Setup date**: [date] + +## Directory Layout + +``` +tests/ + unit/ # Isolated unit tests (formulas, state machines, logic) + integration/ # Cross-system and save/load tests + smoke/ # Critical path test list for /smoke-check gate + evidence/ # Screenshot logs and manual test sign-off records +``` + +## Running Tests + +[Engine-specific command — see below] + +## Test Naming + +- **Files**: `[system]_[feature]_test.[ext]` +- **Functions**: `test_[scenario]_[expected]` +- **Example**: `combat_damage_test.gd` → `test_base_attack_returns_expected_damage()` + +## Story Type → Test Evidence + +| Story Type | Required Evidence | Location | +|---|---|---| +| Logic | Automated unit test — must pass | `tests/unit/[system]/` | +| Integration | Integration test OR playtest doc | `tests/integration/[system]/` | +| Visual/Feel | Screenshot + lead sign-off | `tests/evidence/` | +| UI | Manual walkthrough OR interaction test | `tests/evidence/` | +| Config/Data | Smoke check pass | `production/qa/smoke-*.md` | + +## CI + +Tests run automatically on every push to `main` and on every pull request. +A failed test suite blocks merging. +``` +``` + +### Engine-specific files + +#### Godot 4 (`Engine: Godot`) + +Create `tests/gdunit4_runner.gd`: + +```gdscript +# GdUnit4 test runner — invoked by CI and /smoke-check +# Usage: godot --headless --script tests/gdunit4_runner.gd +extends SceneTree + +func _init() -> void: + var runner := load("res://addons/gdunit4/GdUnitRunner.gd") + if runner == null: + push_error("GdUnit4 not found. Install via AssetLib or addons/.") + quit(1) + return + var instance = runner.new() + instance.run_tests() + quit(0) +``` + +Create `tests/unit/.gdignore_placeholder` with content: +`# Unit tests go here — one subdirectory per system (e.g., tests/unit/combat/)` + +Create `tests/integration/.gdignore_placeholder` with content: +`# Integration tests go here — one subdirectory per system` + +Note in the README: **Installing GdUnit4** +``` +1. Open Godot → AssetLib → search "GdUnit4" → Download & Install +2. Enable the plugin: Project → Project Settings → Plugins → GdUnit4 ✓ +3. Restart the editor +4. Verify: res://addons/gdunit4/ exists +``` + +#### Unity (`Engine: Unity`) + +Create `tests/EditMode/` placeholder file `tests/EditMode/README.md`: +```markdown +# Edit Mode Tests +Unit tests that run without entering Play Mode. +Use for pure logic: formulas, state machines, data validation. +Assembly definition required: `tests/EditMode/EditModeTests.asmdef` +``` + +Create `tests/PlayMode/README.md`: +```markdown +# Play Mode Tests +Integration tests that run in a real game scene. +Use for cross-system interactions, physics, and coroutines. +Assembly definition required: `tests/PlayMode/PlayModeTests.asmdef` +``` + +Note in the README: **Enabling Unity Test Framework** +``` +Window → General → Test Runner +(Unity Test Framework is included by default in Unity 2019+) +``` + +#### Unreal Engine (`Engine: Unreal` or `Engine: UE5`) + +Create `Source/Tests/README.md`: +```markdown +# Unreal Automation Tests +Tests use the UE Automation Testing Framework. +Run via: Session Frontend → Automation → select "MyGame." tests +Or headlessly: UnrealEditor -nullrhi -ExecCmds="Automation RunTests MyGame.; Quit" + +Test class naming: F[SystemName]Test +Test category naming: "MyGame.[System].[Feature]" +``` + +--- + +## Phase 4: Create CI/CD Workflow + +### Godot 4 + +Create `.github/workflows/tests.yml`: + +```yaml +name: Automated Tests + +on: + push: + branches: [main] + pull_request: + branches: [main] + +jobs: + test: + name: Run GdUnit4 Tests + runs-on: ubuntu-latest + + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + lfs: true + + - name: Run GdUnit4 Tests + uses: MikeSchulze/gdUnit4-action@v1 + with: + godot-version: '[VERSION FROM docs/engine-reference/godot/VERSION.md]' + paths: | + tests/unit + tests/integration + report-name: test-results + + - name: Upload Test Results + if: always() + uses: actions/upload-artifact@v4 + with: + name: test-results + path: reports/ +``` + +### Unity + +Create `.github/workflows/tests.yml`: + +```yaml +name: Automated Tests + +on: + push: + branches: [main] + pull_request: + branches: [main] + +jobs: + test: + name: Run Unity Tests + runs-on: ubuntu-latest + + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + lfs: true + + - name: Run Edit Mode Tests + uses: game-ci/unity-test-runner@v4 + env: + UNITY_LICENSE: ${{ secrets.UNITY_LICENSE }} + with: + testMode: editmode + artifactsPath: test-results/editmode + + - name: Run Play Mode Tests + uses: game-ci/unity-test-runner@v4 + env: + UNITY_LICENSE: ${{ secrets.UNITY_LICENSE }} + with: + testMode: playmode + artifactsPath: test-results/playmode + + - name: Upload Test Results + if: always() + uses: actions/upload-artifact@v4 + with: + name: test-results + path: test-results/ +``` + +Note: Unity CI requires a `UNITY_LICENSE` secret. Add to GitHub repository +secrets before the first CI run. + +### Unreal Engine + +Create `.github/workflows/tests.yml`: + +```yaml +name: Automated Tests + +on: + push: + branches: [main] + pull_request: + branches: [main] + +jobs: + test: + name: Run UE Automation Tests + runs-on: self-hosted # UE requires a local runner with the editor installed + + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + lfs: true + + - name: Run Automation Tests + run: | + "$UE_EDITOR_PATH" "${{ github.workspace }}/[ProjectName].uproject" \ + -nullrhi -nosound \ + -ExecCmds="Automation RunTests MyGame.; Quit" \ + -log -unattended + shell: bash + + - name: Upload Logs + if: always() + uses: actions/upload-artifact@v4 + with: + name: test-logs + path: Saved/Logs/ +``` + +Note: UE CI requires a self-hosted runner with Unreal Editor installed. +Set the `UE_EDITOR_PATH` environment variable on the runner. + +--- + +## Phase 5: Create Smoke Test Seed + +Create `tests/smoke/critical-paths.md`: + +```markdown +# Smoke Test: Critical Paths + +**Purpose**: Run these 10-15 checks in under 15 minutes before any QA hand-off. +**Run via**: `/smoke-check` (which reads this file) +**Update**: Add new entries when new core systems are implemented. + +## Core Stability (always run) + +1. Game launches to main menu without crash +2. New game / session can be started from the main menu +3. Main menu responds to all inputs without freezing + +## Core Mechanic (update per sprint) + + + +4. [Primary mechanic — update when first core system is implemented] + +## Data Integrity + +5. Save game completes without error (once save system is implemented) +6. Load game restores correct state (once load system is implemented) + +## Performance + +7. No visible frame rate drops on target hardware (60fps target) +8. No memory growth over 5 minutes of play (once core loop is implemented) +``` + +--- + +## Phase 6: Post-Setup Summary + +After writing all files, report: + +``` +Test infrastructure created for [engine]. + +Files created: +- tests/README.md +- tests/unit/ (directory) +- tests/integration/ (directory) +- tests/smoke/critical-paths.md +- tests/evidence/ (directory) +[engine-specific files] +- .github/workflows/tests.yml + +Next steps: +1. [Engine-specific install step, e.g., "Install GdUnit4 via AssetLib"] +2. Write your first test: create tests/unit/[first-system]/[system]_test.[ext] +3. Run `/qa-plan sprint` before your first sprint to classify stories and set + test evidence requirements +4. `/smoke-check` before every QA hand-off + +Gate note: /gate-check Technical Setup → Pre-Production now requires: +- tests/ directory with unit/ and integration/ subdirectories +- .github/workflows/tests.yml +- At least one example test file +Run /test-setup and write one example test before advancing. + +Verdict: **COMPLETE** — test framework scaffolded and CI/CD wired up. +``` + +--- + +## Collaborative Protocol + +- **Never overwrite existing test files** — only create files that are missing. + If a test runner file exists, leave it as-is. +- **Always ask before creating files** — Phase 2 requires explicit approval. +- **Engine detection is non-negotiable** — if the engine is not configured, + stop and redirect to `/setup-engine`. Do not guess. +- **`force` flag skips the "already exists" early-exit but never overwrites.** + It means "create any missing files even if the directory already exists." +- For Unity CI, note that the `UNITY_LICENSE` secret must be configured + manually. Do not attempt to automate license management. diff --git a/.omc/skills/ux-design/SKILL.md b/.omc/skills/ux-design/SKILL.md new file mode 100644 index 0000000..31ab8e2 --- /dev/null +++ b/.omc/skills/ux-design/SKILL.md @@ -0,0 +1,975 @@ +--- +name: ux-design +description: "Guided, section-by-section UX spec authoring for a screen, flow, or HUD. Reads game concept, player journey, and relevant GDDs to provide context-aware design guidance. Produces ux-spec.md (per screen/flow) or hud-design.md using the studio templates." +argument-hint: "[screen/flow name] or 'hud' or 'patterns'" +user-invocable: true +allowed-tools: Read, Glob, Grep, Write, Edit, AskUserQuestion, Task +agent: ux-designer +--- + +When this skill is invoked: + +## 1. Parse Arguments & Determine Mode + +Three authoring modes exist based on the argument: + +| Argument | Mode | Output file | +|----------|------|-------------| +| `hud` | HUD design | `design/ux/hud.md` | +| `patterns` | Interaction pattern library | `design/ux/interaction-patterns.md` | +| Any other value (e.g., `main-menu`, `inventory`) | UX spec for a screen or flow | `design/ux/[argument].md` | +| No argument | Ask the user | (see below) | + +**If no argument is provided**, do not fail — ask instead. Use `AskUserQuestion`: +- "What are we designing today?" + - Options: "A specific screen or flow (I'll name it)", "The game HUD", "The interaction pattern library", "I'm not sure — help me figure it out" + +If the user selects "I'll name it" or types a screen name, normalize it to kebab-case +for the filename (e.g., "Main Menu" becomes `main-menu`). + +--- + +## 2. Gather Context (Read Phase) + +Read all relevant context **before** asking the user anything. The skill's value +comes from arriving informed. + +### 2a: Required Reads + +- **Game concept**: Read `design/gdd/game-concept.md` — if missing, warn: + > "No game concept found. Run `/brainstorm` first to establish the game's + > foundation before designing UX." + > Continue anyway if the user asks. + +### 2b: Player Journey + +Read `design/player-journey.md` if it exists. For each relevant section, extract: +- Which journey phase(s) does this screen appear in? +- What is the player's emotional state on arrival at this screen? +- What player need is this screen serving in the journey? +- What critical moments (from the journey map) does this screen deliver? + +If the player journey file does not exist, note the gap and proceed: +> "No player journey map found at `design/player-journey.md`. Designing without it +> means we'll be making assumptions about player context. Consider running a player +> journey session after this spec is drafted." + +### 2c: GDD UI Requirements + +Glob `design/gdd/*.md` and grep for `UI Requirements` sections. Read any GDD whose +UI Requirements section references this screen by name or category. + +These GDD UI Requirements are the **requirements input** to this spec. Collect them +as a list of constraints the spec must satisfy. + +If designing the HUD, read ALL GDD UI Requirements sections — the HUD aggregates +requirements from every system. + +### 2d: Existing UX Specs + +Glob `design/ux/*.md` and note which screens already have specs. For screens that +will link to or from the current screen, read their navigation/flow sections to +find the entry and exit points this spec must match. + +### 2e: Interaction Pattern Library + +If `design/ux/interaction-patterns.md` exists, read the pattern catalog index +(the list of pattern names and their one-line descriptions). Do not read full +pattern details — just the catalog. This tells you which patterns already exist +so you can reference them rather than reinvent them. + +### 2f: Art Bible + +Check for `design/art/art-bible.md`. If found, read the visual direction +section. UX layout must align with the aesthetic commitments already made. + +### 2g: Accessibility Requirements + +Check for `design/accessibility-requirements.md`. If found, read it. The spec +must satisfy the accessibility tier committed to there. + +### 2h: Input Method (from Project Config) + +Read `.claude/docs/technical-preferences.md` and extract the `## Input & Platform` +section. Store these values for use throughout the skill — they drive the +Interaction Map and inform accessibility requirements: + +- **Input Methods** — e.g., Keyboard/Mouse, Gamepad, Touch, Mixed +- **Primary Input** — the dominant input for this game +- **Gamepad Support** — Full / Partial / None +- **Touch Support** — Full / Partial / None +- **Target Platforms** — for safe zone and aspect ratio decisions + +If the section is unconfigured (`[TO BE CONFIGURED]`), ask once: +> "Input methods aren't configured yet. What does this game target?" +> Options: "Keyboard/Mouse only", "Gamepad only", "Both (PC + Console)", "Touch (mobile)", "All of the above" +> +> (Run `/setup-engine` to save this permanently so you won't be asked again.) + +Store the answer for the rest of this session. Do **not** ask again per section +or per screen. + +### 2i: Present Context Summary + +Before any design work, present a brief summary to the user: + +> **Designing: [Screen/Flow Name]** +> - Mode: [UX Spec / HUD Design / Pattern Library] +> - Journey phase(s): [from player-journey.md, or "unknown — no journey map"] +> - GDD requirements feeding this spec: [count and names, or "none found"] +> - Related screens already specced: [list, or "none yet"] +> - Known patterns available: [count, or "no pattern library yet"] +> - Accessibility tier: [from requirements doc, or "not yet defined"] +> - Input methods: [from technical-preferences.md, or "asked above"] + +Then ask: "Anything else I should read before we start, or shall we proceed?" + +--- + +## 2b. Retrofit Mode Detection + +Before creating a skeleton, check if the target output file already exists. + +Glob `design/ux/[filename].md` (where `[filename]` is the resolved output path from Phase 1). + +**If the file exists — retrofit mode:** +- Read the file in full +- For each expected section, check whether the body has real content (more than a `[To be designed]` placeholder) or is empty/placeholder +- Present a section status summary to the user: + +> "Found existing UX spec at `design/ux/[filename].md`. Here's what's already done: +> +> | Section | Status | +> |---------|--------| +> | Overview & Context | [Complete / Empty / Placeholder] | +> | Player Journey Integration | ... | +> | Screen Layout & Information Architecture | ... | +> | Interaction Model | ... | +> | Feedback & State Communication | ... | +> | Accessibility | ... | +> | Edge Cases & Error States | ... | +> | Open Questions | ... | +> +> I'll work on the [N] incomplete sections only — existing content will not be overwritten." + +- Skip Section 3 (skeleton creation) — the file already exists +- In Phase 4 (Section Authoring), only work on sections with Status: Empty or Placeholder +- Use `Edit` to fill placeholders in-place rather than creating a new skeleton + +**If the file does not exist — fresh authoring mode:** +Proceed to Phase 3 (Create File Skeleton) as normal. + +--- + +## 3. Create File Skeleton + +Once the user confirms, **immediately** create the output file with empty section +headers. This ensures incremental writes have a target and work survives interruptions. + +Ask: "May I create the skeleton file at `design/ux/[filename].md`?" + +--- + +### Skeleton for UX Spec (screen or flow) + +```markdown +# UX Spec: [Screen/Flow Name] + +> **Status**: In Design +> **Author**: [user + ux-designer] +> **Last Updated**: [today's date] +> **Journey Phase(s)**: [from context] +> **Template**: UX Spec + +--- + +## Purpose & Player Need + +[To be designed] + +--- + +## Player Context on Arrival + +[To be designed] + +--- + +## Navigation Position + +[To be designed] + +--- + +## Entry & Exit Points + +[To be designed] + +--- + +## Layout Specification + +### Information Hierarchy + +[To be designed] + +### Layout Zones + +[To be designed] + +### Component Inventory + +[To be designed] + +### ASCII Wireframe + +[To be designed] + +--- + +## States & Variants + +[To be designed] + +--- + +## Interaction Map + +[To be designed] + +--- + +## Events Fired + +[To be designed] + +--- + +## Transitions & Animations + +[To be designed] + +--- + +## Data Requirements + +[To be designed] + +--- + +## Accessibility + +[To be designed] + +--- + +## Localization Considerations + +[To be designed] + +--- + +## Acceptance Criteria + +[To be designed] + +--- + +## Open Questions + +[To be designed] +``` + +--- + +### Skeleton for HUD Design + +```markdown +# HUD Design + +> **Status**: In Design +> **Author**: [user + ux-designer] +> **Last Updated**: [today's date] +> **Template**: HUD Design + +--- + +## HUD Philosophy + +[To be designed] + +--- + +## Information Architecture + +### Full Information Inventory + +[To be designed] + +### Categorization + +[To be designed] + +--- + +## Layout Zones + +[To be designed] + +--- + +## HUD Elements + +[To be designed] + +--- + +## Dynamic Behaviors + +[To be designed] + +--- + +## Platform & Input Variants + +[To be designed] + +--- + +## Accessibility + +[To be designed] + +--- + +## Open Questions + +[To be designed] +``` + +--- + +### Skeleton for Interaction Pattern Library + +```markdown +# Interaction Pattern Library + +> **Status**: In Design +> **Author**: [user + ux-designer] +> **Last Updated**: [today's date] +> **Template**: Interaction Pattern Library + +--- + +## Overview + +[To be designed] + +--- + +## Pattern Catalog + +[To be designed] + +--- + +## Patterns + +[Individual pattern entries added here as they are defined] + +--- + +## Gaps & Patterns Needed + +[To be designed] + +--- + +## Open Questions + +[To be designed] +``` + +--- + +After writing the skeleton, update `production/session-state/active.md` with: +- Task: Designing [screen/flow name] UX spec +- Current section: Starting (skeleton created) +- File: design/ux/[filename].md + +--- + +## 4. Section-by-Section Authoring + +Walk through each section in order. For **each section**, follow this cycle: + +``` +Context -> Questions -> Options -> Decision -> Draft -> Approval -> Write +``` + +1. **Context**: State what this section needs to contain and surface any relevant + constraints from context gathered in Phase 2. +2. **Questions**: Ask what is needed to draft this section. Use `AskUserQuestion` + for constrained choices, conversational text for open-ended exploration. +3. **Options**: Where design choices exist, present 2-4 approaches with pros/cons. + Explain reasoning in conversation, then use `AskUserQuestion` to capture the decision. +4. **Decision**: User picks an approach or provides custom direction. +5. **Draft**: Write the section content in conversation for review. Flag provisional + assumptions explicitly. +6. **Approval**: "Does this capture it? Any changes before I write it to the file?" +7. **Write**: Use `Edit` to replace the `[To be designed]` placeholder with approved + content. Confirm the write. + +After writing each section, update `production/session-state/active.md`. + +--- + +### Section Guidance: UX Spec Mode + +#### Section A: Purpose & Player Need + +This section is the foundation. Every other decision flows from it. + +**Questions to ask**: +- "What player goal does this screen serve? What is the player trying to DO here?" +- "What would go wrong if this screen didn't exist or was hard to use?" +- "Complete this sentence: 'The player arrives at this screen wanting to ___.' " + +Cross-reference the player journey context gathered in Phase 2. The stated purpose +must align with the journey phase and emotional state. + +--- + +#### Section B: Player Context on Arrival + +**Questions to ask**: +- "When in the game does a player first encounter this screen?" +- "What were they just doing immediately before reaching this screen?" +- "What emotional state should the design assume? (calm, stressed, curious, time-pressured)" +- "Do players arrive at this screen voluntarily, or are they sent here by the game?" + +Offer to map this against the journey phases if the player journey doc exists. + +--- + +#### Section B2: Navigation Position + +Where does this screen sit in the game's navigation hierarchy? This is a one-paragraph orientation map — not a full flow diagram. + +**Questions to ask**: +- "Is this screen accessed from the main menu, from pause, from within gameplay, or from another screen?" +- "Is it a top-level destination (always reachable) or a context-dependent one (only accessible in certain states)?" +- "Can the player reach this screen from more than one place in the game?" + +Present as: "This screen lives at: [root] → [parent] → [this screen]" plus any alternate entry paths. + +--- + +#### Section B3: Entry & Exit Points + +Map every way the player can arrive at and leave this screen. + +**Questions to ask**: +- "What are all the ways a player can reach this screen?" (List each trigger: button press, game event, redirect from another screen, etc.) +- "What can the player do to exit? What happens when they do?" (Back button, confirm action, timeout, game event) +- "Are there any exits that are one-way — where the player cannot return to this screen without starting over?" + +Present as two tables: + +| Entry Source | Trigger | Player carries this context | +|---|---|---| +| [screen/event] | [how] | [state/data they arrive with] | + +| Exit Destination | Trigger | Notes | +|---|---|---| +| [screen/event] | [how] | [any irreversible state changes] | + +--- + +#### Section C: Layout Specification + +This is the largest and most interactive section. Work through it in sub-sections: + +**Sub-section 1 — Information Hierarchy** (establish this before any layout): +- Ask the user to list every piece of information this screen must communicate. +- Then ask them to rank the items: "What is the single most important thing a player + needs to see first? What is second? What can be discovered rather than immediately visible?" +- Present the resulting hierarchy for approval before moving to zones. + +**Sub-section 2 — Layout Zones**: +- Based on the information hierarchy, propose rough screen zones (header, content + area, action bar, sidebar, etc.). +- Offer 2-3 zone arrangements with rationale for each. Reference platform and + input context gathered from game concept. +- Ask: "Do any of these match your mental image, or shall we build a custom arrangement?" + +**Sub-section 3 — Component Inventory**: +- For each zone, list the UI components it contains. For each component, note: + - Component type (button, list, card, stat display, input field, etc.) + - Content it displays + - Whether it is interactive + - If it uses an existing pattern from the library (reference by pattern name) + - If it introduces a new pattern (flag for later addition to the library) + +**Sub-section 4 — ASCII Wireframe**: +- Offer to generate an ASCII wireframe based on the zone layout and component list. +- Use `AskUserQuestion`: "Want an ASCII wireframe as part of this spec?" + - Options: "Yes, include one", "No, I'll attach a separate file" +- If yes, produce the wireframe in conversation first. Ask for feedback before + writing it to file. + +--- + +#### Section D: States & Variants + +Guide the user to think beyond the happy path. + +**Questions to ask** (work through these one at a time): +- "What does this screen look like the very first time a player sees it, when there + is no data yet? (empty state)" +- "What happens when something goes wrong — an error, a failed action, a missing + resource? (error state)" +- "Is there ever a loading wait on this screen? If so, what does it show? (loading state)" +- "Are there any player progression states that change what this screen shows? For + example, locked content, premium content, or tutorial-mode overlays?" +- "Does this screen behave differently on any supported platform? (platform variant)" + +Present the collected states as a table for approval: + +| State / Variant | Trigger | What Changes | +|-----------------|---------|--------------| +| Default | Normal load | — | +| Empty | No data available | [content area description] | +| [etc.] | [trigger] | [changes] | + +--- + +#### Section E: Interaction Map + +For each interactive component identified in the Layout Specification, define: +- The action (tap, click, press, hold, scroll, drag) +- The platform input(s) that trigger it (mouse click, gamepad A, keyboard Enter) +- The immediate feedback (visual, audio, haptic) +- The outcome (navigation target, state change, data write) + +Use the input methods loaded from `technical-preferences.md` in Phase 2h — do +not ask the user again. State them upfront: "Mapping interactions for: +[Input Methods from tech-prefs]. Covering [Gamepad Support] gamepad support." + +Work through components one at a time rather than asking for all at once. +For navigation actions (going to another screen), verify the target matches +an existing UX spec or note it as a spec dependency. + +--- + +#### Section E2: Events Fired + +For every player action in the Interaction Map, document the corresponding event the game or analytics system should fire — or explicitly note "no event" if none applies. + +**Questions to ask**: +- "For each action, should the game fire an analytics event, trigger a game-state change, or both?" +- "Are there any actions that should NOT fire an event — and is that a deliberate choice?" + +Present as a table alongside the Interaction Map: + +| Player Action | Event Fired | Payload / Data | +|---|---|---| +| [action] | [EventName] or none | [data passed with event] | + +Flag any action that modifies persistent game state (save data, progress, economy) — these need explicit attention from the architecture team. + +--- + +#### Section E3: Transitions & Animations + +Specify how the screen enters and exits, and how it responds to state changes. + +**Questions to ask**: +- "How does this screen appear? (fade in, slide from right, instant pop, scale from button)" +- "How does it dismiss? (fade out, slide back, cut)" +- "Are there any in-screen state transitions that need animation? (loading spinner, success state, error flash)" +- "Is there any animation that could cause motion sickness — and does the game have a reduced-motion option?" + +Minimum required: +- Screen enter transition +- Screen exit transition +- At least one state-change animation if the screen has multiple states + +--- + +#### Section F: Data Requirements + +Cross-reference the GDD UI Requirements sections gathered in Phase 2. + +For each piece of information the screen displays, ask: +- "Where does this data come from? Which system owns it?" +- "Does this screen need to write data back, or is it read-only?" +- "Is any of this data time-sensitive or real-time? (health bars, cooldown timers)" + +Flag any case where the UI would need to own or manage game state as an architectural +concern. UX specs define what the UI needs; they do not dictate how the data is +delivered. That is an architecture decision. + +Present the data requirements as a table: + +| Data | Source System | Read / Write | Notes | +|------|--------------|--------------|-------| +| [item] | [system] | Read | — | +| [item] | [system] | Write | [concern if any] | + +--- + +#### Section G: Accessibility + +Cross-reference `design/accessibility-requirements.md` if it exists. + +Walk through the ux-designer agent's standard checklist for this screen: +- Keyboard-only navigation path through all interactive elements +- Gamepad navigation order (if applicable) +- Text contrast and minimum readable font sizes +- Color-independent communication (no information conveyed by color alone) +- Screen reader considerations for any non-text elements +- Any motion or animation that needs a reduced-motion alternative + +Use `AskUserQuestion` to surface any open questions on accessibility tier: +- "Has the accessibility tier been committed to for this project?" + - Options: "Yes, read from requirements doc", "Not yet — let's flag it as a question", "Skip accessibility section for now" + +--- + +#### Section H: Localization Considerations + +Document constraints that affect how this screen behaves when text is translated. + +**Questions to ask**: +- "Which text elements on this screen are the longest? What is the maximum character count that fits the layout?" +- "Are there any elements where text length is layout-critical — e.g., a button label that must stay on one line?" +- "Are there any elements that display numbers, dates, or currencies that need locale-specific formatting?" + +Note: aim to flag any element where a 40% text expansion (common in translations from English to German or French) would break the layout. Mark those as HIGH PRIORITY for the localization engineer. + +--- + +#### Section I: Acceptance Criteria + +Write at least 5 specific, testable criteria that a QA tester can verify without reading any other design document. These become the pass/fail conditions for `/story-done`. + +**Format**: Use checkboxes. Each criterion must be verifiable by a human tester: + +``` +- [ ] Screen opens within [X]ms from [trigger] +- [ ] [Element] displays correctly at [minimum] and [maximum] values +- [ ] [Navigation action] correctly routes to [destination screen] +- [ ] Error state appears when [condition] and shows [specific message or icon] +- [ ] Keyboard/gamepad navigation reaches all interactive elements in logical order +- [ ] [Accessibility requirement] is met — e.g., "all interactive elements have focus indicators" +``` + +**Minimum required**: +- 1 performance criterion (load/open time) +- 1 navigation criterion (at least one entry or exit path verified) +- 1 error/empty state criterion +- 1 accessibility criterion (per committed tier) +- 1 criterion specific to this screen's core purpose + +Ask the user to confirm: "Do these criteria cover what would actually make this screen 'done' for your QA process?" + +--- + +### Section Guidance: HUD Design Mode + +HUD design follows a different order from UX spec mode. Begin with philosophy; +do not touch layout until the information architecture is complete. + +#### Section A: HUD Philosophy + +Ask the user to describe the game's relationship with on-screen information in +1-2 sentences. + +Offer framing examples to help: +- "Nearly HUD-free — atmosphere requires unobstructed immersion (e.g., Hollow Knight, Firewatch)" +- "Minimal but present — only critical information visible, everything else contextual (e.g., Dark Souls)" +- "Information-dense — all decision-relevant data always visible (e.g., Diablo IV, StarCraft II)" +- "Adaptive — HUD density responds to combat state, exploration mode, menus (e.g., God of War)" + +This philosophy becomes the design constraint for every subsequent HUD decision. +If a proposed element conflicts with the stated philosophy, surface that conflict. + +--- + +#### Section B: Information Architecture + +Complete this before any layout work. Do not skip it. + +**Step 1 — Full information inventory**: +Pull all information from GDD UI Requirements sections gathered in Phase 2. +Present the full list: "These are all the things your game systems say they need +to communicate to the player on screen." + +**Step 2 — Categorization**: +For each item, ask the user to categorize it: + +| Category | Description | +|----------|-------------| +| **Must Show** | Always visible, player needs it for core decisions | +| **Contextual** | Visible only when relevant (in combat, near interactable, etc.) | +| **On Demand** | Player must actively request it (toggle, hold button) | +| **Hidden** | Communicated through world/audio, never on-screen text | + +Use `AskUserQuestion` to step through items in groups of 3-4, not all at once. +This is the most consequential design decision in the HUD — do not rush it. + +**Conflict check**: If the information philosophy (Section A) says "nearly HUD-free" +but the Must Show list is growing long, surface the conflict explicitly: +> "The current Must Show list has [N] items. That may conflict with the HUD-free +> philosophy. Options: reduce the Must Show list, revise the philosophy, or define +> a hybrid approach where HUD is absent in exploration and present in combat." + +--- + +#### Section C: Layout Zones + +Only after the information architecture is approved, design layout zones. + +Base layout on: +- Which items are Must Show (they drive the permanent zone decisions) +- Where player attention naturally goes during gameplay (center-screen for action games, + corners for strategy games) +- Platform and aspect ratio targets + +Offer 2-3 zone arrangements. Include rationale based on the HUD philosophy and the +categorization from Section B. + +--- + +#### Section D: HUD Elements + +For each element in the layout, specify: +- Element name and category (Must Show / Contextual / On Demand) +- Content displayed +- Visual form (bar, number, icon, counter, map) +- Update behavior (real-time, event-driven, player-queried) +- Contextual trigger (if not always visible) +- Animation behavior (does it pulse when low? Fade in? Slam in?) + +Work element by element. Reference the interaction pattern library if relevant patterns +exist for status displays, resource bars, or cooldown indicators. + +--- + +#### Sections E, F, G: Dynamic Behaviors, Platform Variants, Accessibility + +These follow the same structure as the UX spec equivalents. See UX Spec section +guidance for D (States/Variants), E (Interactions), and G (Accessibility). + +For the HUD specifically, emphasize: +- Dynamic Behaviors: what causes the HUD to change density mid-gameplay? +- Platform Variants: does mobile/console require different element sizes or positions? + +--- + +### Section Guidance: Interaction Pattern Library Mode + +Pattern library authoring is additive and catalog-driven, not linear. + +#### Phase 1: Catalog Existing Patterns + +Glob `design/ux/*.md` (excluding `interaction-patterns.md`) and read the Component +Inventory and Interaction Map sections of each spec. Extract every interaction +pattern used. + +Present the extracted list: "Based on existing UX specs, these patterns are already +in use in the game:" +- [Pattern name]: used in [screen], [screen] +- [etc.] + +Ask: "Are there patterns you know exist but aren't in existing specs yet? List any +additional ones now." + +--- + +#### Phase 2: Formalize Each Pattern + +For each pattern (existing or new), document: + +```markdown +### [Pattern Name] + +**Category**: Navigation / Input / Feedback / Data Display / Modal / Overlay / [other] +**Used In**: [list of screens] + +**Description**: [One paragraph explaining what this pattern is and when to use it] + +**Specification**: +- [Component behavior] +- [Input mapping] +- [Visual/audio feedback] +- [Accessibility requirements for this pattern] + +**When to Use**: [Conditions where this pattern is appropriate] +**When NOT to Use**: [Conditions where another pattern is more appropriate] + +**Reference**: [Screenshot path or ASCII example, if available] +``` + +Work through patterns in groups. Offer: "Shall I draft the first batch based on what +I've found in the existing specs, or do you want to define them one by one?" + +--- + +#### Phase 3: Identify Gaps + +After cataloging known patterns, ask: +- "Are there screens or interactions planned that would need patterns not yet + in this library?" +- "Are there any patterns in existing specs that feel inconsistent with each + other and should be consolidated?" + +Document gaps in the Gaps section for follow-up. + +--- + +## 5. Cross-Reference Check + +Before marking the spec as ready for review, run these checks: + +**1. GDD requirement coverage**: Does every GDD UI Requirement that references +this screen have a corresponding element in this spec? Present any gaps. + +**2. Pattern library alignment**: Are all interaction patterns used in this spec +referenced by name? If a new pattern was invented during this spec session, flag +it for addition to the pattern library: +> "This spec uses [pattern name], which isn't in the pattern library yet. +> Want to add it now, or flag it as a gap?" + +**3. Navigation consistency**: Do the entry/exit points in this spec match the +navigation map in any related specs? Flag mismatches. + +**4. Accessibility coverage**: Does the spec address the accessibility tier +committed to in `design/accessibility-requirements.md`? If not, flag open questions. + +**5. Empty states**: Does every data-dependent element have an empty state defined? +Flag any that don't. + +Present the check results: +> **Cross-Reference Check: [Screen Name]** +> - GDD requirements: [N of M covered / all covered] +> - New patterns to add to library: [list or "none"] +> - Navigation mismatches: [list or "none"] +> - Accessibility gaps: [list or "none"] +> - Missing empty states: [list or "none"] + +--- + +## 6. Handoff + +When all sections are approved and written: + +### 6a: Update Session State + +Update `production/session-state/active.md` with: +- Task: [screen-name] UX spec +- Status: Complete (or In Review) +- File: design/ux/[filename].md +- Sections: All written +- Next: [suggestion] + +### 6b: Suggest Next Step + +Before presenting options, state clearly: + +> "This spec should be validated with `/ux-review` before it enters the +> implementation pipeline. The Pre-Production gate requires all key screen specs +> to have a review verdict." + +Then use `AskUserQuestion`: +- "Run `/ux-review [filename]` now, or do something else first?" + - Options: + - "Run `/ux-review` now — validate this spec" + - "Design another screen first, then review all specs together" + - "Update the interaction pattern library with new patterns from this spec" + - "Stop here for this session" + +If the user picks "Design another screen first", add a note: "Reminder: run +`/ux-review` on all completed specs before running `/gate-check pre-production`." + +### 6c: Cross-Link Related Specs + +If other UX specs link to or from this screen, note which ones should reference +this spec. Do not edit those files without asking — just name them. + +--- + +## 7. Recovery & Resume + +If the session is interrupted (compaction, crash, new session): + +1. Read `production/session-state/active.md` — it records the current screen + and which sections are complete. +2. Read `design/ux/[filename].md` — sections with real content are done; + sections with `[To be designed]` still need work. +3. Resume from the next incomplete section — no need to re-discuss completed ones. + +This is why incremental writing matters: every approved section survives any +disruption. + +--- + +## 8. Specialist Agent Routing + +This skill uses `ux-designer` as the primary agent (set in frontmatter). For +specific sub-topics, additional context or coordination may be needed: + +| Topic | Coordinate with | +|-------|----------------| +| Visual aesthetics, color, layout feel | `art-director` — UX spec defines zones; art defines how they look | +| Implementation feasibility (engine constraints) | `ui-programmer` — before finalizing component inventory | +| Gameplay data requirements | `game-designer` — when data ownership is unclear | +| Narrative/lore visible in the UI | `narrative-director` — for flavor text, item names, lore panels | +| Accessibility tier decisions | Handled by this session — owned by ux-designer | + +When delegating to another agent via the Task tool: +- Provide: screen name, game concept summary, the specific question needing expert input +- The agent returns analysis to this session +- This session presents the agent's output to the user +- The user decides; this session writes to file +- Agents do NOT write to files directly — this session owns all file writes + +--- + +## Collaborative Protocol + +This skill follows the collaborative design principle at every step: + +1. **Question -> Options -> Decision -> Draft -> Approval** for every section +2. **AskUserQuestion** at every decision point (Explain -> Capture pattern): + - Phase 2: "Ready to start, or need more context?" + - Phase 3: "May I create the skeleton?" + - Phase 4 (each section): design questions, approach options, draft approval + - Phase 5: "Run cross-reference check? What's next?" +3. **"May I write to [filepath]?"** before the skeleton and before each section write +4. **Incremental writing**: Each section is written to file immediately after approval +5. **Session state updates**: After every section write + +**Aesthetic deference**: When layout or visual choices come down to personal taste, +present the options and ask. Do not select a layout because it is "standard" — always +confirm. The user is the creative director. + +**Conflict surfacing**: When a GDD requirement and the available screen real estate +conflict, surface the conflict and present resolution options. Never silently drop +a requirement. Never silently expand the layout without flagging it. + +**Never** auto-generate the full spec and present it as a fait accompli. +**Never** write a section without user approval. +**Never** contradict an existing approved UX spec without flagging the conflict. +**Always** show where decisions come from (GDD requirements, player journey, user choices). + +Verdict: **COMPLETE** — UX spec written and approved section by section. + +--- + +## Recommended Next Steps + +- Run `/ux-review [filename]` to validate this spec before it enters the implementation pipeline +- Run `/ux-design [next-screen]` to continue designing remaining screens or flows +- Run `/gate-check pre-production` once all key screens have approved UX specs diff --git a/.omc/skills/ux-review/SKILL.md b/.omc/skills/ux-review/SKILL.md new file mode 100644 index 0000000..609bf69 --- /dev/null +++ b/.omc/skills/ux-review/SKILL.md @@ -0,0 +1,262 @@ +--- +name: ux-review +description: "Validates a UX spec, HUD design, or interaction pattern library for completeness, accessibility compliance, GDD alignment, and implementation readiness. Produces APPROVED / NEEDS REVISION / MAJOR REVISION NEEDED verdict with specific gaps." +argument-hint: "[file-path or 'all' or 'hud' or 'patterns']" +user-invocable: true +allowed-tools: Read, Glob, Grep +agent: ux-designer +--- + +## Overview + +Validates UX design documents before they enter the implementation pipeline. +Acts as the quality gate between UX Design and Visual Design/Implementation in +the `/team-ui` pipeline. + +**Run this skill:** +- After completing a UX spec with `/ux-design` +- Before handing off to `ui-programmer` or `art-director` +- Before the Pre-Production to Production gate check (which requires key screens + to have reviewed UX specs) +- After major revisions to a UX spec + +**Verdict levels:** +- **APPROVED** — spec is complete, consistent, and implementation-ready +- **NEEDS REVISION** — specific gaps found; fix before handoff but not a full redesign +- **MAJOR REVISION NEEDED** — fundamental issues with scope, player need, or + completeness; needs significant rework + +--- + +## Phase 1: Parse Arguments + +- **Specific file path** (e.g., `/ux-review design/ux/inventory.md`): validate + that one document +- **`all`**: find all files in `design/ux/` and validate each +- **`hud`**: validate `design/ux/hud.md` specifically +- **`patterns`**: validate `design/ux/interaction-patterns.md` specifically +- **No argument**: ask the user which spec to validate + +For `all`, output a summary table first (file | verdict | primary issue) then +full detail for each. + +--- + +## Phase 2: Load Cross-Reference Context + +Before validating any spec, load: + +1. **Input & Platform config**: Read `.claude/docs/technical-preferences.md` and + extract `## Input & Platform`. This is the authoritative source for which input + methods the game supports — use it to drive the Input Method Coverage checks in + Phase 3A, not the spec's own header. If unconfigured, fall back to the spec header. +2. The accessibility tier committed to in `design/accessibility-requirements.md` + (if it exists) +3. The interaction pattern library at `design/ux/interaction-patterns.md` (if + it exists) +4. The GDDs referenced in the spec's header (read their UI Requirements sections) +5. The player journey map at `design/player-journey.md` (if it exists) for + context-arrival validation + +--- + +## Phase 3A: UX Spec Validation Checklist + +Run all checks against a `ux-spec.md`-based document. + +### Completeness (required sections) + +- [ ] Document header present with Status, Author, Platform Target +- [ ] Purpose & Player Need — has a player-perspective need statement (not + developer-perspective) +- [ ] Player Context on Arrival — describes player's state and prior activity +- [ ] Navigation Position — shows where screen sits in hierarchy +- [ ] Entry & Exit Points — all entry sources and exit destinations documented +- [ ] Layout Specification — zones defined, component inventory table present +- [ ] States & Variants — at minimum: loading, empty/populated, and error states + documented +- [ ] Interaction Map — covers all target input methods (check platform target + in header) +- [ ] Data Requirements — every displayed data element has a source system and owner +- [ ] Events Fired — every player action has a corresponding event or null + explanation +- [ ] Transitions & Animations — at least enter/exit transitions specified +- [ ] Accessibility Requirements — screen-level requirements present +- [ ] Localization Considerations — max character counts for text elements +- [ ] Acceptance Criteria — at least 5 specific testable criteria + +### Quality Checks + +**Player Need Clarity** +- [ ] Purpose is written from player perspective, not system/developer perspective +- [ ] Player goal on arrival is unambiguous ("The player arrives wanting to ___") +- [ ] The player context on arrival is specific (not just "they opened the + inventory") + +**Completeness of States** +- [ ] Error state is documented (not just happy path) +- [ ] Empty state is documented (no data scenario) +- [ ] Loading state is documented if the screen fetches async data +- [ ] Any state with a timer or auto-dismiss is documented with duration + +**Input Method Coverage** +- [ ] If platform includes PC: keyboard-only navigation is fully specified +- [ ] If platform includes console/gamepad: d-pad navigation and face button + mapping documented +- [ ] No interaction requires mouse-like precision on gamepad +- [ ] Focus order is defined (Tab order for keyboard, d-pad order for gamepad) + +**Data Architecture** +- [ ] No data element has "UI" listed as the owner (UI must not own game state) +- [ ] Update frequency is specified for all real-time data (not just "realtime" — + what triggers update?) +- [ ] Null handling is specified for all data elements (what shows when data is + unavailable?) + +**Accessibility** +- [ ] Accessibility tier from `accessibility-requirements.md` is matched or exceeded +- [ ] If Basic tier: no color-only information indicators +- [ ] If Standard tier+: focus order documented, text contrast ratios specified +- [ ] If Comprehensive tier+: screen reader announcements for key state changes +- [ ] Colorblind check: any color-coded elements have non-color alternatives + +**GDD Alignment** +- [ ] Every GDD UI Requirement referenced in the header is addressed in this spec +- [ ] No UI element displays or modifies game state without a corresponding GDD + requirement +- [ ] No GDD UI Requirement is missing from this spec (cross-check the referenced + GDD sections) + +**Pattern Library Consistency** +- [ ] All interactive components reference the pattern library (or note they are + new patterns) +- [ ] No pattern behavior is re-specified from scratch if it already exists in + the pattern library +- [ ] Any new patterns invented in this spec are flagged for addition to the + pattern library + +**Localization** +- [ ] Character limit warnings present for all text-heavy elements +- [ ] Any layout-critical text has been flagged for 40% expansion accommodation + +**Acceptance Criteria Quality** +- [ ] Criteria are specific enough for a QA tester who hasn't seen the design docs +- [ ] Performance criterion present (screen opens within Xms) +- [ ] Resolution criterion present +- [ ] No criterion requires reading another document to evaluate + +--- + +## Phase 3B: HUD Validation Checklist + +Run all checks against a `hud-design.md`-based document. + +### Completeness + +- [ ] HUD Philosophy defined +- [ ] Information Architecture table covers ALL systems with UI Requirements in GDDs +- [ ] Layout Zones defined with safe zone margins for all target platforms +- [ ] Every HUD element has a full specification (zone, visibility trigger, data + source, priority) +- [ ] HUD States by Gameplay Context covers at minimum: exploration, combat, + dialogue/cutscene, paused +- [ ] Visual Budget defined (max simultaneous elements, max screen %) +- [ ] Platform Adaptation covers all target platforms +- [ ] Tuning Knobs present for player-adjustable elements + +### Quality Checks + +- [ ] No HUD element covers the center play area without a visibility rule to + hide it +- [ ] Every information item that exists in any GDD is either in the HUD or + explicitly categorized as "hidden/demand" +- [ ] All color-coded HUD elements have colorblind variants +- [ ] HUD elements in the Feedback & Notification section have queue/priority + behavior defined +- [ ] Visual Budget compliance: total simultaneous elements is within budget + +### GDD Alignment + +- [ ] All systems in `design/gdd/systems-index.md` with UI category have + representation in HUD (or justified absence) + +--- + +## Phase 3C: Pattern Library Validation Checklist + +- [ ] Pattern catalog index is current (matches actual patterns in document) +- [ ] All standard control patterns are specified: button variants, toggle, + slider, dropdown, list, grid, modal, dialog, toast, tooltip, progress bar, + input field, tab bar, scroll +- [ ] All game-specific patterns needed by current UX specs are present +- [ ] Each pattern has: When to Use, When NOT to Use, full state specification, + accessibility spec, implementation notes +- [ ] Animation Standards table present +- [ ] Sound Standards table present +- [ ] No conflicting behaviors between patterns (e.g., "Back" behavior consistent + across all navigation patterns) + +--- + +## Phase 4: Output the Verdict + +```markdown +## UX Review: [Document Name] +**Date**: [date] +**Reviewer**: ux-review skill +**Document**: [file path] +**Platform Target**: [from header] +**Accessibility Tier**: [from header or accessibility-requirements.md] + +### Completeness: [X/Y sections present] +- [x] Purpose & Player Need +- [ ] States & Variants — MISSING: error state not documented + +### Quality Issues: [N found] +1. **[Issue title]** [BLOCKING / ADVISORY] + - What's wrong: [specific description] + - Where: [section name] + - Fix: [specific action to take] + +### GDD Alignment: [ALIGNED / GAPS FOUND] +- GDD [name] UI Requirements — [X/Y requirements covered] +- Missing: [list any uncovered GDD requirements] + +### Accessibility: [COMPLIANT / GAPS / NON-COMPLIANT] +- Target tier: [tier] +- [list specific accessibility findings] + +### Pattern Library: [CONSISTENT / INCONSISTENCIES FOUND] +- [findings] + +### Verdict: APPROVED / NEEDS REVISION / MAJOR REVISION NEEDED +**Blocking issues**: [N] — must be resolved before implementation +**Advisory issues**: [N] — recommended but not blocking + +[For APPROVED]: This spec is ready for handoff to `/team-ui` Phase 2 +(Visual Design). + +[For NEEDS REVISION]: Address the [N] blocking issues above, then re-run +`/ux-review`. + +[For MAJOR REVISION NEEDED]: The spec has fundamental gaps in [areas]. +Recommend returning to `/ux-design` to rework [sections]. +``` + +--- + +## Phase 5: Collaborative Protocol + +This skill is READ-ONLY — it never edits or writes files. It reports findings only. + +After delivering the verdict: +- For **APPROVED**: suggest running `/team-ui` to begin implementation coordination +- For **NEEDS REVISION**: offer to help fix specific gaps ("Would you like me to + help draft the missing error state?") — but do not auto-fix; wait for user + instruction +- For **MAJOR REVISION NEEDED**: suggest returning to `/ux-design` with the + specific sections to rework + +Never block the user from proceeding — the verdict is advisory. Document risks, +present findings, let the user decide whether to proceed despite concerns. A user +who chooses to proceed with a NEEDS REVISION spec takes on the documented risk. diff --git a/.omc/state/hud-stdin-cache.json b/.omc/state/hud-stdin-cache.json new file mode 100644 index 0000000..0cea710 --- /dev/null +++ b/.omc/state/hud-stdin-cache.json @@ -0,0 +1 @@ +{"session_id":"7ba87a7d-413e-40a1-84b0-fe18f7a31f33","transcript_path":"C:\\Users\\September\\.claude\\projects\\D--Learn-GameLearn-UnityProjects-GeometryTD\\7ba87a7d-413e-40a1-84b0-fe18f7a31f33.jsonl","cwd":"D:\\Learn\\GameLearn\\UnityProjects\\GeometryTD","model":{"id":"claude-sonnet-4-6","display_name":"Sonnet 4.6"},"workspace":{"current_dir":"D:\\Learn\\GameLearn\\UnityProjects\\GeometryTD","project_dir":"D:\\Learn\\GameLearn\\UnityProjects\\GeometryTD","added_dirs":[]},"version":"2.1.114","output_style":{"name":"default"},"cost":{"total_cost_usd":0,"total_duration_ms":2177,"total_api_duration_ms":0,"total_lines_added":0,"total_lines_removed":0},"context_window":{"total_input_tokens":0,"total_output_tokens":0,"context_window_size":200000,"current_usage":null,"used_percentage":null,"remaining_percentage":null},"exceeds_200k_tokens":false} \ No newline at end of file diff --git a/Assets/Launcher.unity b/Assets/Launcher.unity index da0e66f..6f6e8c5 100644 --- a/Assets/Launcher.unity +++ b/Assets/Launcher.unity @@ -729,6 +729,10 @@ PrefabInstance: propertyPath: m_FrameRate value: 60 objectReference: {fileID: 0} + - target: {fileID: 11499388, guid: adb3eb1c35fcff14f89fba7b05c9d71c, type: 3} + propertyPath: m_GameSpeed + value: 2 + objectReference: {fileID: 0} - target: {fileID: 11499388, guid: adb3eb1c35fcff14f89fba7b05c9d71c, type: 3} propertyPath: m_EditorLanguage value: 8 diff --git a/CLAUDE.md b/CLAUDE.md new file mode 100644 index 0000000..d5fb635 --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1,180 @@ +# CLAUDE.md + +This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository. + +## 项目概述 + +几何塔防 (Geometry TD) - 塔防肉鸽游戏。使用 Unity 引擎,基于 GameFramework 游戏框架构建。 + +## 构建与开发 + +- **打开项目**: 使用 Unity Hub 加载 `GeometryTD` 目录 +- **运行**: Unity Editor 中点击 Play 按钮(无 CLI 方式) +- **构建**: `File > Build Settings` 选择目标平台 +- **IDE**: 打开 `GeometryTD.sln` 或 `Assembly-CSharp.csproj` 进行 C# 开发 + +## 启动流程 + +游戏启动路径:`Assets/Launcher.unity` → `ProcedureMenu` → `Menu.unity` → `Main.unity`。流程切换通过 `ChangeState` 驱动。 + +## 架构概览 + +### 目录结构 +``` +Assets/ +├── GameFramework/ # 游戏框架(来自 gameframework.cn) +├── GameMain/ +│ ├── Scripts/ # 游戏逻辑代码 +│ │ ├── Base/ # GameEntry 等基础组件 +│ │ ├── Components/ # 组件(InputComponent, TowerController 等) +│ │ ├── CustomComponent/# 自定义组件(UIRouterComponent 等) +│ │ ├── DataTable/ # 数据表(DR* 前缀,如 DREnemy, DRTag) +│ │ ├── Definition/ # 枚举、Struct 定义 +│ │ ├── Entity/ # 实体系统(EntityBase, EntityData, EntityLogic) +│ │ ├── Event/ # 事件定义 +│ │ ├── Factory/ # 工厂类 +│ │ ├── Procedure/ # 流程状态机(FSM) +│ │ ├── Scene/ # 场景相关 +│ │ ├── UI/ # UI 系统 +│ │ └── Utility/ # 工具类 +│ ├── Scenes/ # Unity 场景(Menu.unity, Main.unity) +│ ├── DataTables/ # 数据表资源 +│ └── Configs/ # 配置文件 +└── Tests/ # 编辑器模式测试 +``` + +### UI 架构 (MV* + UseCase 模式) + +UI 采用 Context/Controller/UseCase/View 分层: + +``` +UIFormController (接口) + └── UIFormControllerBase (泛型基类) + └── SpecificController (如 ShopFormController) + +IUIUseCase (用例接口) + └── SpecificUseCase (如 ShopFormUseCase) + +UIContext (UI 状态) + └── SpecificContext (如 ShopFormContext) + +View (MonoBehaviour) + └── SpecificView (如 ShopForm) +``` + +- **Controller**: 处理 UI 交互逻辑,实现 `IUIFormController` +- **UseCase**: 封装业务逻辑,实现 `IUIUseCase` +- **Context**: UI 状态数据,传递给 Controller +- **View**: Unity MonoBehaviour,处理 UI 表现 + +### 实体系统 (Entity System) + +基于 GameFramework 的实体系统: + +``` +EntityLogic (GameFramework) + └── EntityBase + ├── Player + ├── EnemyEntity + ├── TowerEntity + └── BulletEntity + +EntityData (数据对象) + └── EntityDataBase + ├── PlayerData + ├── EnemyData + └── TowerData +``` + +### 流程系统 (Procedure/FSM) + +游戏流程使用状态机管理: + +``` +ProcedureBase + ├── ProcedureMenu # 菜单流程 + ├── ProcedureChangeScene # 场景切换 + └── ProcedureMain # 主游戏流程(可能存在) +``` + +状态转换通过 `ChangeState` 触发,数据传递使用 `procedureOwner.SetData`。 + +### 组件系统 (Components) + +塔防组件系统用于组装防御塔: + +``` +BasicBaseComp # 底座组件 +BasicBearingComp # 轴承组件 +BasicMuzzleComp # 枪口组件 +ShooterMuzzleComp +ShooterBullet +MovementComponent +InputComponent +``` + +### 数据表 (DataTable) + +数据驱动的设计,数据表类以 `DR` 前缀: + +``` +DREnemy, DRTag, DRLevel, DRScene, DRShopPrice, +DROutGameDropPool, DRTagConfig, DRRarityTagBudget +``` + +数据表资源在 `Assets/GameMain/DataTables/`,代码在 `Assets/GameMain/Scripts/DataTable/`。开发时编辑 `数据表/` 目录下的数据表,导出后同步到 `Assets/GameMain/DataTables/`。 + +### 标签系统 (Tag System) + +组件产出(Tag 生成/掉落/奖励候选)由 `InventoryGenerationComponent` 统一运行时入口,编排 `DropPoolRoller`、`RewardCandidateBuilder`、`OutGameDropRuleService`。详见 `docs/TagSystemDesign.md` 和 `docs/TagSystemRoadmap.md`。 + +### 商店与 RepoForm + +`ShopNode` 只承载玩家购买组件的逻辑;`RepoForm` 负责出售功能(commit `2e54acb`)。两者职责分离,不得交叉。 + +## 测试 + +测试位于 `Assets/Tests/EditMode/` 目录下,使用 Unity Test Framework 运行。在 Unity Editor 中通过 `Window > General > Test Runner` 执行。运行单个测试:选中目标测试,点击 `Run Selected`。 + +## 编码规范 + +- **缩进**: 4 空格 + Allman 大括号风格 +- **命名**: 类型/方法/公开成员 `PascalCase`,局部变量/参数 `camelCase` +- **命名空间**: `GeometryTD.*` 按功能区域划分 +- **断言优先**: 使用 `Debug.Assert` 而非静默忽略错误 + +详见 `AGENTS.md` 中的完整编码原则。 + +## 关键设计文档 + +- `docs/CombatNodeArchitecture.md` - CombatNode 战斗系统架构规范(含命名后缀词典) +- `docs/TagSystemDesign.md` - 标签系统设计 +- `docs/MapEntityArchitecture.md` - 地图实体架构 +- `design/gdd/systems-index.md` - 核心系统索引与优先级 + +## 命名后缀规范 + +命名后缀具有严格语义约束: + +| 后缀 | 用途 | 示例 | +|------|------|------| +| `Scheduler` | 状态机边界/阶段推进总控 | `CombatScheduler` | +| `Manager` | 子域 Facade/聚合入口 | `EnemyManager` | +| `Coordinator` | 跨状态/跨服务的流程编排 | `CombatSchedulerCoordinator` | +| `Service` | 聚焦业务行为 | `OutGameDropRuleService` | +| `Calculator` | 纯计算与结果组装 | `CombatSettlementCalculator` | +| `Session` | 一次生命周期对象 | `CombatLoadSession` | +| `Bridge` | 框架边界适配器 | `CombatEventBridge` | +| `Runtime` | 运行时可变状态承载 | `PhaseLoopRuntime` | +| `Context` | 被动数据包/共享上下文 | `CombatSettlementContext` | +| `Result` | 动作输出/结算产出 | `DropResult` | +| `Flags` | 布尔控制项聚合 | `CombatFlowFlags` | +| `Resolver` | 映射/查找/判定/解析 | `EnemySpawnPathResolver` | +| `Tracker` | 跟踪运行中实体或事实真值 | `EnemyLifecycleTracker` | +| `Port` | 受限宿主接口 | `ICombatNodePort` | + +## 数据工作流 + +- 开发时编辑 `数据表/` 目录下的数据表 +- 导出后同步到 `Assets/GameMain/DataTables/` +- 数据表代码在 `Assets/GameMain/Scripts/DataTable/`(`DR` 前缀) diff --git a/design/gdd/event-system.md b/design/gdd/event-system.md new file mode 100644 index 0000000..ceb393b --- /dev/null +++ b/design/gdd/event-system.md @@ -0,0 +1,338 @@ +# Event System + +> **Status**: Designed +> **Author**: SepComet +> **Last Updated**: 2026-04-29 +> **Implements Pillar**: [To be designed] + +## Overview + +The Event System is a **deterministic narrative decision service** that presents the player with branching choices at Event nodes during a run. It reads event definitions from `DREvent` data table, selects the active event via `EventNodeComponent.SelectActiveEvent()` using a seed derived from `RunNodeExecutionContext`, and executes the player's chosen option through `EventOptionExecutor`. Each event offers one or more options, each containing: requirements (e.g., gold threshold, component count), cost effects applied before a probability roll, and reward effects applied only if the roll succeeds. All randomness uses seeded `System.Random` derived from `runSeed + sequenceIndex + nodeId + eventId + optionIndex + effectIndex + salt`, guaranteeing run reproducibility. Events are data-driven via JSON option payloads in `DREvent.OptionsRaw`, enabling new events to be authored without code changes. + +## Player Fantasy + +**"Every choice carves a different path."** + +The Event System delivers the fantasy of **narrative surprise and meaningful stakes**. The player enters an Event node knowing only that something unexpected awaits — they may gain a windfall, suffer a setback, or face a gamble where the odds are unclear. Events break the mechanical rhythm of combat and shop, adding the texture of a story that unfolds differently every run. Each option carries a clear cost and an uncertain reward — the player must read the situation, weigh their resources, and commit. + +The player should feel: +- **Curiosity and tension** — what will this event be? Events are the primary source of narrative variety between combat nodes +- **Genuine dilemma** — options feel meaningfully different, not obviously right or wrong +- **Risk awareness** — probabilistic options feel like a gamble, not a guaranteed upgrade +- **Story authorship** — the accumulated events of a run become a story the player tells about what happened to them + +**Reference**: Slay the Spire's event philosophy — events are never pure upside, rewards require trade-offs, and the best path through a run is never obvious. + +## Detailed Design + +### Core Rules + +**ER1 — Event Selection (Deterministic)** +When the player enters an Event node, `EventNodeComponent.SelectActiveEvent()` picks one event from the `DREvent` data table using a seed derived from `RunNodeExecutionContext`: `seed = runSeed + sequenceIndex + nodeId`. The same run seed, node ID, and sequence index always produce the same event — run reproducibility is guaranteed. If no context is available (null), a random Unity `Random.Range` selection is used (non-reproducible, only for dev/fallback). + +**ER2 — Option Availability Evaluation** +Before displaying options, `EventOptionExecutor.EvaluateOption()` checks every option against the player's `BackpackInventoryData` snapshot. Three requirement types exist: +- `GoldAtLeast(count)`: player gold ≥ count +- `CompCountAtLeast(count, rarity)`: at least count loose (unassembled) components of specified rarity +- `TowerCountAtLeast(count)`: at least count assembled towers + +Options with unmet requirements are shown as **Blocked** with a reason string (e.g., "需要至少 100 金币"). Blocked options cannot be selected but remain visible. + +**ER3 — Option Execution Flow** +When the player selects an option, `EventOptionExecutor.Execute()` runs in three steps: +1. **Cost Effects applied immediately** — all effects in `costEffects[]` execute against a working inventory copy (gold deducted, components removed, tower endurance reduced) +2. **Probability Roll** — `RollProbability()` uses seeded `System.Random`: `seed = runSeed + sequenceIndex + eventId + optionIndex + 0 + salt(17)`. If `random.NextDouble() ≤ probability`, roll succeeds +3. **Reward Effects applied on success only** — effects in `rewardEffects[]` execute if the roll succeeded, otherwise nothing is added + +**ER4 — Effect Types** +Four effect types exist: +- `AddGold(count)`: adjusts working gold by `count` (positive = gain, negative = cost). Throws if gold would go negative +- `RemoveRandomComps(count, rarity)`: removes `count` random loose components of specified rarity from inventory, using a seeded shuffle +- `AddRandomComps(count, minRarity, maxRarity)`: calls `InventoryGenerationComponent.BuildEventRewardComponents()` to generate `count` components in the rarity range, seeded by context +- `DamageRandomTowersEndurance(count, amount)`: reduces endurance of `count` random assembled towers by `amount`, using `InventoryTowerEnduranceUtility.ReduceTowerEndurance()` + +**ER5 — Inventory Working Copy & Commit** +All evaluation and execution uses a `BackpackInventoryData` snapshot (`GameEntry.PlayerInventory.GetInventorySnapshot()`). On `EventOptionExecutionResult.Accepted`, `GameEntry.PlayerInventory.ReplaceInventorySnapshot(workingInventory)` commits changes to the real inventory. The event form closes and `NodeCompleteEventArgs` fires. + +**ER6 — Determinism Guarantees** +Every random operation — event selection, component shuffle, probability roll, component generation — uses `System.Random` seeded from the run context. The seed chain is: +- Event selection: `runSeed * 31 + sequenceIndex * 31 + nodeId` +- Probability roll: `runSeed + sequenceIndex + eventId + optionIndex + 0 + salt(17)` +- Effect random: `runSeed + sequenceIndex + eventId + optionIndex + effectIndex + salt` + +### States and Transitions + +The Event System operates across two layers: the **node component** (managing lifecycle) and the **UI form** (managing player interaction). + +| State | Owner | Description | +|-------|-------|-------------| +| **Idle** | EventNodeComponent | No event active. Component initialized, data table loaded. | +| **EventActive** | EventNodeComponent | Event node is running. Context is set, event is selected, form is open. | +| **FormDisplayed** | EventFormUseCase | Event form is open, options are evaluated and shown. Player is browsing. | +| **Executing** | EventFormUseCase | Player has selected an option. Cost effects applied, probability rolled, rewards applied or skipped. | +| **FormClosed** | EventFormUseCase | Option execution complete. Form is closed. | + +**Transitions:** + +| From | To | Trigger | +|------|----|---------| +| Idle | EventActive | Node System triggers `StartEvent(RunNodeExecutionContext)` | +| EventActive | FormDisplayed | `EventFormUseCase.BindEvent()` + `OpenUI(EventForm)` completes | +| FormDisplayed | Executing | Player clicks a selectable option; `TrySelectOption(optionIndex)` is called | +| Executing | FormClosed | Execution result returned; `EndEvent()` called, `CloseUI(EventForm)` | +| FormClosed | Idle | `ClearActiveNodeContext()` resets component state | + +There are **no player-accessible states** — the player only ever sees FormDisplayed (browsing options). The Executing state is transient — cost effects, roll, and reward effects execute synchronously before the form closes. + +### Interactions with Other Systems + +**Upstream — Node System** +- **Receives**: `StartEvent(RunNodeExecutionContext)` trigger from Node System when player navigates to an Event node +- **Provides**: Fires `NodeCompleteEventArgs` on event end, with inventory snapshot +- **Interface owner**: Node System + +**Upstream — PlayerInventoryComponent** +- **Receives**: `GetInventorySnapshot()` to get current gold, components, towers before evaluating options +- **Receives**: `ReplaceInventorySnapshot(workingInventory)` to commit changes after option execution +- **Provides**: Working inventory state for requirement checks and effect application +- **Interface owner**: Event System (consumer) + +**Upstream — InventoryGenerationComponent** +- **Receives**: `BuildEventRewardComponents(count, minRarity, maxRarity, runSeed, sequenceIndex, eventId, optionIndex, effectIndex)` for `AddRandomComps` effect type +- **Provides**: Component generation service for event rewards +- **Interface owner**: Event System (consumer) + +**Downstream — UI (EventForm)** +- **Receives**: `EventFormRawData` (event title, description, option items with availability status) via `CreateInitialModel()` +- **Receives**: `TrySelectOption(optionIndex)` call when player clicks an option +- **Provides**: Opens/closes `UIFormType.EventForm` +- **Interface owner**: Event System (provider) + +## Formulas + +**F1 — Probability Roll** +``` +success = (random.NextDouble() <= probability) +where random is seeded with: seed = runSeed + sequenceIndex + eventId + optionIndex + 0 + 17 +``` + +**F2 — Event Selection Seed** +``` +seed = (((runSeed * 31) + sequenceIndex) * 31) + nodeId +``` +Used by `EventNodeComponent.BuildSelectionSeed()`. `*31` is a standard hash-combining technique. + +**F3 — Effect Random Seed** +``` +seed = runSeed + sequenceIndex + eventId + optionIndex + effectIndex + salt +``` +Salt values vary by effect type (e.g., 101 for component shuffle, 211 for tower endurance damage). This ensures each random operation within an event option is independently reproducible. + +**F4 — Gold Effect (AddGold)** +``` +workingInventory.Gold = workingInventory.Gold + count +``` +No internal cap. The 9999 `MaxPlayerGold` cap is applied by `PlayerInventoryComponent` on commit, not by the Event System. + +## Edge Cases + +**EC1 — No Event Data Loaded** +If `GameEntry.DataTable.GetDataTable()` returns null on `OnInit()`, the component logs a warning and sets `_initialized = true`. Any subsequent `StartEvent()` call logs a warning and returns early without opening the form. + +**EC2 — No Events in Data Table** +If `_eventItems.Count <= 0` when `StartEvent()` is called, the same early-return warning path is taken. + +**EC3 — Requirements Met by Exact Count** +`CompCountAtLeast(count, rarity)` and `TowerCountAtLeast(count)` use `>=` comparison. A player with exactly `count` components/towers satisfies the requirement. + +**EC4 — RemoveRandomComps — Insufficient Candidates** +If `CollectLooseComponents()` returns fewer components than `removeCount`, `ApplyRemoveRandomComponentsEffect()` throws `InvalidOperationException`. This indicates a data-authoring error — the requirement check should prevent this path at runtime. + +**EC5 — AddRandomComps — No InventoryGenerationComponent** +If `GameEntry.InventoryGeneration == null` when `AddRandomComps` is applied, `ApplyAddRandomComponentsEffect()` throws `InvalidOperationException`. Event authors must not use `AddRandomComps` in an environment where `InventoryGenerationComponent` is absent. + +**EC6 — DamageRandomTowersEndurance — No Towers or Count ≤ 0** +`ApplyDamageRandomTowerEnduranceEffect()` silently returns if `towerCount <= 0`, `enduranceLoss <= 0`, or there are no assembled towers. No exception is thrown — the effect is simply a no-op. + +**EC7 — Probability = 0 (Guaranteed Failure)** +`RollProbability()` returns `false` immediately for `probability <= 0`. Cost effects are still applied. The player pays the cost but always receives no reward. + +**EC8 — Probability = 1 (Guaranteed Success)** +`RollProbability()` returns `true` immediately for `probability >= 1`. Reward effects are always applied. + +**EC9 — Gold Would Go Negative from AddGold Cost** +`ApplyAddGoldEffect()` throws `InvalidOperationException` if `workingInventory.Gold + count < 0`. This is prevented by the `GoldAtLeast` requirement on options that spend gold. + +**EC10 — Component Instance Not Found on Removal** +`RemoveComponentByInstanceId()` throws `InvalidOperationException` if the component instance is not found in the list. This should not occur given the CollectLooseComponents → Shuffle → Remove flow. + +**EC11 — Duplicate Option Indices** +If `TrySelectOption()` receives an out-of-range `optionIndex`, it returns `false` and logs a warning. The form remains open. + +## Dependencies + +**Inherited from other systems:** + +| Entity | Value | Source | +|--------|-------|--------| +| `MaxPlayerGold` | 9999 | `design/gdd/shop.md` — applied on `PlayerInventoryComponent.ReplaceInventorySnapshot()` commit | + +**Upstream Dependencies:** + +| System | Status | Interface Contract | +|--------|--------|-------------------| +| Node System | Designed (`design/gdd/node-system.md`) | Fires `StartEvent(context)`, receives `NodeCompleteEventArgs` | +| PlayerInventoryComponent | Code only | `GetInventorySnapshot()` / `ReplaceInventorySnapshot()` | +| InventoryGenerationComponent | Code only | `BuildEventRewardComponents()` | +| DataTable (DREvent) | Code only | Event definitions with JSON `OptionsRaw` | + +**Downstream Dependents:** + +| System | Status | Interface Contract | +|--------|--------|-------------------| +| UI (EventForm) | Code only | `EventFormRawData`, `TrySelectOption()` | + +## Tuning Knobs + +**TK1 — Event Authoring (Data Table)** +All event tuning lives in `Assets/GameMain/DataTables/Event.txt`. Adding a new event requires a new DREvent row with a unique ID, title, description, and JSON option payloads. No code changes needed. + +Tunable per event: +- `probability` value per option (0.0–1.0) — changes success odds +- `costEffects` and `rewardEffects` JSON — changes what the option costs and rewards +- `requirements` JSON — changes entry threshold + +**TK2 — New Requirement Types** +Adding a new requirement type (e.g., `TowerLevelCountAtLeast`) requires: +1. New `EventRequirementType` enum value in `EventRequirementType.cs` +2. `EventRequirementFactory.Create()` branch +3. `IsRequirementSatisfied()` branch in `EventOptionExecutor` +4. `BuildBlockedReason()` branch +5. New JSON type in `Event.txt` + +**TK3 — New Effect Types** +Adding a new effect type (e.g., `ReduceGold`) requires: +1. New `EventEffectType` enum value in `EventEffectType.cs` +2. `EventEffectFactory.Create()` branch +3. `ApplyEffects()` switch branch in `EventOptionExecutor` +4. New JSON type in `Event.txt` + +**TK4 — Event Selection Variety** +The number of events in `DREvent` data table directly controls event variety. More events = more entropy in event selection per node. + +## Visual/Audio Requirements + +**V1 — Event Node Entry** +When `StartEvent()` is called, the Node System handles scene/camera transition and fires `NodeEnterEventArgs`. Event System itself has no standalone VFX. + +**V2 — Event Form Appearance** +When `EventForm` opens: +- Event title and description displayed prominently +- Options shown as cards with: option text, requirement status (Selectable / Blocked with reason) +- No VFX beyond standard UI hover/select feedback + +**V3 — Option Selection (Success Path)** +When `EventOptionExecutionResult.Accepted(isProbabilitySuccess=true)`: +- Standard UI close animation +- `NodeCompleteEventArgs` fires — Node System handles success feedback +- Inventory changes are silent (gold/component changes appear in the HUD on next frame) + +**V4 — Option Selection (Failure Path)** +When `EventOptionExecutionResult.Accepted(isProbabilitySuccess=false)`: +- Cost was deducted at execution time (gold already gone from working inventory) +- UI shows brief "失败" indicator before form closes (~0.5s) + +**V5 — Blocked Options** +Options with unmet requirements show blocked reason text (e.g., "需要至少 100 金币") in red/disabled style. No special audio. + +**V6 — Probabilistic Feel** +Probability values in `Event.txt` are designer-facing only. The UI must not reveal exact odds — events should feel like genuine gambles. The 70% and 30% options in 赌马 must not visually differ in probability signaling. + +## UI Requirements + +**U1 — Event Form Layout** +- Single form: event title (large), description (medium), option list (vertical, scrollable if > 4 options) +- Each option card shows: option text, availability state (normal / grayed-out with reason) +- No explicit probability display on cards + +**U2 — Option Card Anatomy** +- Option text (primary label) — should imply the cost/action +- Availability state: "可选择" (normal) or blocked reason text (disabled) +- No price/cost field — the option text is the cost communication + +**U3 — Gold Display** +- Player's current gold visible in standard HUD during event +- Gold changes reflected in HUD after event closes + +**U4 — Accessibility** +- All option text readable by screen readers +- Blocked reason must use both color AND text label (not color alone) + +## Acceptance Criteria + +**AC1 — Event Selection Determinism** +- Given: a run with `runSeed=12345`, `sequenceIndex=3`, `nodeId=7` +- When: the player enters the Event node twice with the same context +- Then: the same event is selected both times + +**AC2 — Option Availability — Gold Requirement Not Met** +- Given: player has 50 gold, an option has requirement `GoldAtLeast(100)` +- When: `EventOptionExecutor.EvaluateOption()` is called +- Then: `EventOptionAvailability.IsSelectable == false` with reason "需要至少 100 金币" + +**AC3 — Option Availability — Gold Requirement Met** +- Given: player has 150 gold, an option has requirement `GoldAtLeast(100)` +- When: `EvaluateOption()` is called +- Then: `EventOptionAvailability.IsSelectable == true` + +**AC4 — Cost Effects Deducted on Selection** +- Given: player has 200 gold, selects the "下注 100 金币" option of 赌马 +- When: `Execute()` is called +- Then: `workingInventory.Gold == 100` after cost effects + +**AC5 — Reward Effects Applied on Success** +- Given: player has 200 gold, selects the 70% option, roll succeeds +- When: `Execute()` returns `EventOptionExecutionResult.Accepted(true)` +- Then: `workingInventory.Gold == 250` (cost deducted + reward applied) + +**AC6 — Reward Effects Skipped on Failure** +- Given: player has 200 gold, selects the 70% option, roll fails +- When: `Execute()` returns `EventOptionExecutionResult.Accepted(false)` +- Then: `workingInventory.Gold == 100` (cost deducted, no reward) + +**AC7 — Probability Roll Reproducibility** +- Given: same context (runSeed, sequenceIndex, eventId, optionIndex), run twice +- When: `RollProbability()` is called both times +- Then: both calls return the same result + +**AC8 — RemoveRandomComps Requirement** +- Given: player has 2 loose white components, option has `CompCountAtLeast(2, White)` +- When: `EvaluateOption()` is called +- Then: `IsSelectable == true` + +**AC9 — Tower Damage Effect** +- Given: player has assembled towers, selects the "代价与回报" option +- When: `Execute()` completes +- Then: at least one tower's endurance is reduced by 20 + +**AC10 — Event Form Closes After Selection** +- Given: player selects any selectable option +- When: `TrySelectOption()` returns true +- Then: `GameEntry.UIRouter.CloseUI(UIFormType.EventForm)` is called + +## Open Questions + +**OQ1 — Event Frequency in Run** +How many Event nodes appear per run? The Node System GDD specifies 10 total nodes, but does not specify how many are Event nodes. If there are 0 event nodes per run, the Event System is unreachable dead code. + +**OQ2 — Player-Driven Event Avoidance** +Can the player choose to skip or avoid Event nodes? Currently there is no reroll or skip mechanic. In Slay the Spire, events are often optional (path away). Are Event nodes mandatory stops or optional detours? + +**OQ3 — Purely Narrative Events** +Current events all have mechanical trade-offs. Should purely narrative events exist (e.g., "a traveler tells you a story — nothing happens") for flavor, or should every event always have mechanical stakes? + +**OQ4 — Player Influence Over Event Selection** +Currently event selection uses `runSeed + sequenceIndex + nodeId` — deterministic but player has no agency. An alternative: add a choice layer ("you see two merchants — choose one"). Is this desirable? + +**OQ5 — Event Component Rarity Budget** +`AddRandomComps` uses `InventoryGenerationComponent` with the same rarity budget as shop/inventory generation. Should event rewards have a separate rarity budget (e.g., events always drop one tier lower than shop)? diff --git a/design/gdd/gdd-cross-review-2026-04-29-v2.md b/design/gdd/gdd-cross-review-2026-04-29-v2.md new file mode 100644 index 0000000..d96de81 --- /dev/null +++ b/design/gdd/gdd-cross-review-2026-04-29-v2.md @@ -0,0 +1,207 @@ +# Cross-GDD Review Report + +> **Date**: 2026-04-29 +> **Reviewer**: Consistency Agent + Game Design Holism Agent +> **GDDs Reviewed**: 5 (node-system.md, shop.md, tower-assembly.md, event-system.md, progression.md) +> **Systems Covered**: Node System, Shop System, Tower Assembly, Event System, Progression + +--- + +## Consistency Issues + +### Blocking (must resolve before architecture) + +🔴 **[C1] Rule contradiction: "no partial rewards on loss" vs. Progression accepting gold on loss** +- **Documents**: `node-system.md` vs `progression.md` +- **Node System** (Edge Cases): "Any combat loss: Run ends in failure immediately. No partial rewards are awarded." +- **Progression** (SR2 + AC): `RecordRunEnd()` is called on every run end (win or loss). On loss: `totalRunsStarted++`, `totalGoldEarned += goldEarned`, `furthestNodeReached` updates, but NO unlock evaluation. +- **Contradiction**: "No partial rewards" implies gold is NOT awarded/recorded on loss. Progression explicitly expects `goldEarned` on loss runs. +- **Resolution needed**: Clarify the intended design. Two options: + - (A) Loss runs do NOT record gold in LifetimeStats → remove `totalGoldEarned += goldEarned` from loss path in Progression AC + - (B) Loss runs DO record gold stats (only unlocks are withheld) → Node System's "no partial rewards" must be clarified to mean "no unlocks" not "no gold tracking" + +--- + +🔴 **[C2] One-directional dependency: Tower Assembly ↔ Progression** +- **Documents**: `tower-assembly.md` vs `progression.md` +- **Tower Assembly** (Dependencies): "Progression — May read aggregate tower assembly stats across runs. Pending Progression GDD." +- **Progression** (Dependencies): Lists Node System and Shop System as upstream. Does NOT list Tower Assembly. +- **Impact**: If Tower Assembly wanted to persist aggregate stats (e.g., "total towers assembled lifetime"), no interface contract exists. Progression has no `RecordTowerAssemblyStats()` method. +- **Resolution needed**: Either (A) Progression adds Tower Assembly as a soft upstream dependency with a defined read interface, or (B) Tower Assembly's reference to Progression is removed/clarified as speculative. + +--- + +### Warnings + +⚠️ **[C3] Bidirectionality gap: Node System → Tower Assembly (soft vs. hard mismatch)** +- **Documents**: `node-system.md` vs `tower-assembly.md` +- Node System marks Tower Assembly as **Soft/provisional** in its Dependencies. +- Tower Assembly marks Node System as **Hard** upstream. +- This is a directionality mismatch — the softer designation should flow one way consistently. +- **Not blocking**: Tower Assembly's treatment of Node System as hard is likely correct; Node System's soft marking may be residual uncertainty from before Tower Assembly was designed. + +⚠️ **[C4] Shop System GDD status in Node System is stale** +- **Documents**: `node-system.md` (Open Questions #3) +- Node System marks Shop System GDD as "blocking — interface contract pending alignment." +- However, `shop.md` is now **Designed** (complete design exists). +- Node System's Open Question #3 flag is stale. +- **Resolution**: Node System Open Question #3 should be resolved now that Shop GDD exists. + +⚠️ **[C5] Tuning Knob ownership: MaxPlayerGold shared across 3 GDDs** +- **Documents**: `shop.md`, `event-system.md`, `progression.md` +- All three reference `MaxPlayerGold = 9999` consistently — no value conflict. +- However, no single GDD formally declares itself the **owner** of this constant. +- Entity registry correctly sources it from `shop.md`. +- **Recommendation**: Document in Shop GDD that it is the authoritative owner of `MaxPlayerGold`. + +--- + +## Game Design Issues + +### Blocking + +🔴 **[G1] Exponential boss HP vs. linear player power — indefinite gap creates a hard wall** +- **Documents**: `node-system.md` + `tower-assembly.md` +- `BossEffectiveHp = BaseHp × 2^(completedLoopCount)` — exponential in boss cycles survived +- Player tower stats: `statValue[i] = baseValue + perLevel × i` — linear, capped at 5 levels +- After ~5–7 boss cycles, boss HP doubles so fast that even max-rarity max-level towers cannot deal enough damage before the boss outscales +- The game becomes mathematically unbeatable at high loop counts regardless of build quality +- **This is not a difficulty curve — it is a hard wall** +- **Resolution needed**: Add a boss cycle cap, or add a player power catch-up mechanic (e.g., each boss cycle also grants a temporary attack buff, or boss HP scaling changes to logarithmic rather than exponential). Alternatively, redefine "VictoryType" to not involve looping, removing the exponential scaling trigger. + +--- + +🔴 **[G2] Run End Screen cannot display gold earned this run — undefined data flow** +- **Documents**: `node-system.md` + `progression.md` +- Node System fires `RunEnd` → calls `Progression.RecordRunEnd(runStats)` +- Progression updates `LifetimeStats.totalGoldEarned += goldEarned` +- But `OnUnlockedEventArgs` carries only `UnlockResult[]` — NOT the run's gold amount +- Run End Victory screen should show "Gold earned this run" — but no interface provides this data +- **Resolution needed**: Either (A) `UnlockedEventArgs` carries a `goldEarnedThisRun` field, or (B) Node System provides gold data directly to Run End screen (bypass Progression), or (C) Run End screen queries `Progression.GetLastRunGold()` (add this method) + +--- + +🔴 **[G3] Undefined behavior: does RecordRunEnd fire on loss runs?** +- **Documents**: `node-system.md` vs `progression.md` +- Progression SR2: "`RecordRunEnd()` is called on every run end (win or loss)" +- Node System Edge Case: "Any combat loss: Run ends in failure immediately" — no mention of calling `RecordRunEnd` +- Progression AC explicitly covers loss runs (stats update, no unlocks) +- But if Node System never calls `RecordRunEnd` on loss, Progression's loss-path AC is unreachable +- **Resolution needed**: Clarify: does Node System call `RecordRunEnd` on loss? If yes — Node System GDD must document this. If no — Progression's loss-run AC is unreachable and must be revised. + +--- + +### Warnings + +⚠️ **[G4] Unbounded gold accumulation — no sink between runs** +- **Documents**: `node-system.md` + `shop.md` +- Per full winning run: ~600 (combat) + 300 (boss level) + 200 (boss bonus) = ~1100 gold +- With `MaxPlayerGold = 9999`, a player needs ~9 wins to cap out +- Once at cap, excess gold from combat is discarded — additional survival is not rewarded +- No sink between runs: no repair, no permanent upgrades, no sacrifice mechanic +- **Risk**: Short optimized runs may be more rewarding than long successful ones; endgame economy becomes meaningless +- **Recommendation**: Consider a meta-gold sink (permanent repair station, unlock purchases, or cosmetic unlocks) to recycle late-game gold + +⚠️ **[G5] Multiple systems claim to be the primary progression loop** +- **Documents**: All 5 GDDs +- Node System: "tactician executing a plan" — navigates the run +- Tower Assembly: "puzzle solver, cleverness" — builds power +- Shop: "tactical urgency, deliberate investment" — resource decisions +- Event: "narrative surprise, meaningful stakes" — variety +- Progression: "relentless collection" — meta motivation +- **Risk**: Without a declared primary loop, players optimize the wrong thing +- **Recommendation**: Explicitly designate Tower Assembly as the core tactical loop; Node System as the structural frame; Shop/Event as the decision points; Progression as the meta-reward + +⚠️ **[G6] Component Tag stacking — potential dominant strategy** +- **Documents**: `tower-assembly.md` +- R7: No compatibility constraints between component types +- Tags aggregate with stack counts merging across 3 components +- Optimal strategy: collect 3 copies of the best tag, stack it +- **Risk**: The "puzzle" of tower assembly may have a trivially discoverable dominant solution +- **Recommendation**: Consider adding anti-synergy rules (e.g., same tag on 3 components reduces effectiveness) or explicit trade-offs to maintain build diversity + +⚠️ **[G7] Lossy economy (sell ~50% of buy) discourages experimentation** +- **Documents**: `shop.md` +- Sell price = midpoint of `[MinPrice, MaxPrice]` ≈ 50% of average buy +- Repeated buy-sell cycles destroy gold +- Players hoard components rather than experiment +- **Risk**: Reduces strategic depth; players avoid build diversity +- **Recommendation**: Consider a higher sell ratio (e.g., 60–70%) or a component enhancement sink to make exploration more affordable + +⚠️ **[G8] Event rewards use same rarity budget as shop — no distinct reward tier** +- **Documents**: `event-system.md` + `shop.md` (OQ5) +- `AddRandomComps` uses `InventoryGenerationComponent` with identical rarity budget to shop +- Events offer no higher-tier rewards than shop can provide +- **Risk**: Events feel mathematically equivalent to additional shop visits; "genuine dilemma" fantasy may be undermined +- **Recommendation**: Consider a separate event rarity budget (e.g., events drop 1 tier lower than equivalent shop purchase) to give events a distinct identity + +⚠️ **[G9] Assembly Phase has 5 simultaneous information panels** +- **Documents**: `tower-assembly.md` +- During Assembly Phase: Inventory Grid + Tower Slots + Assembled Towers Panel + Combat Roster + Next Node Preview +- **Risk**: Cognitive overload for new players +- **Recommendation**: Consider a tabbed or sequenced interface rather than all panels visible simultaneously + +--- + +## Cross-System Scenario Issues + +**Scenario: Player completes a winning run — full RunEnd chain** + +### Steps: +1. **Node System**: Combat victory at Boss node → fires `NodeCompleteEventArgs` with `CombatWon=true` +2. **Node System**: Calls `Progression.RecordRunEnd(runStats)` with `{goldEarned, nodesCompleted=10, bossDefeated=true, ...}` +3. **Progression**: Updates `LifetimeStats` → evaluates unlocks → fires `UnlockedEventArgs` with `UnlockResult[]` +4. **UI / Run End Screen**: Receives `OnUnlockedEventArgs` → shows toast for new unlocks + +### Issues found: + +🔴 **G2 (already flagged above)**: Run End screen cannot display gold earned this run +- Step 3: `UnlockedEventArgs` carries no `goldEarned` field +- Run End Victory screen should show gold earned — no data path defined + +🔴 **G3 (already flagged above)**: Ambiguous whether loss runs call `RecordRunEnd` +- If loss path does not call `RecordRunEnd`, steps 2–3 never occur on loss + +⚠️ **Scenario: Player reaches Boss but loses** +- Node System fires `RunEnd` with `bossDefeated=false` +- Does `RecordRunEnd` fire? (G3 ambiguity) +- If yes: LifetimeStats records `furthestNodeReached=9`, no unlocks +- If no: Partial run progress is lost, player gets no credit for reaching node 9 + +⚠️ **Scenario: Multiple unlocks fire simultaneously** +- Step 3: `UnlockResult[]` can contain multiple items +- Step 4: Toast popup shows all — good +- But no priority ordering if unlocks are from different categories (e.g., Difficulty + Theme simultaneously) +- **Info**: Not a blocker; worth documenting expected ordering + +--- + +## GDDs Flagged for Revision + +| GDD | Reason | Type | Priority | +|-----|--------|------|----------| +| `node-system.md` | C1/C3/C4: Stale Open Question #3, ambiguous loss behavior, "no partial rewards" contradicts Progression's loss-path AC | Consistency + Design Theory | **High** | +| `progression.md` | C1: Loss-run gold recording contradicts Node System's "no partial rewards"; G3: loss run behavior undefined | Consistency + Design Theory | **High** | +| `tower-assembly.md` | C2: Progression dependency is orphaned (one-directional); G6: Tag stacking dominant strategy risk | Consistency + Design Theory | Medium | + +--- + +## Verdict: **CONCERNS** + +Three blocking issues must be resolved before architecture begins: +1. **C1**: "No partial rewards" vs. Progression loss-path gold — design decision required +2. **G1**: Exponential boss HP vs. linear player power — hard wall makes boss unbeatable after ~cycle 5–7 +3. **G2/G3**: Undefined data flow for Run End screen gold display + ambiguous loss run behavior + +Warnings (G4–G9) are advisory and should be addressed before implementation but do not block architecture. + +--- + +## Recommended Actions + +1. **C1**: Decide: do loss runs record gold in LifetimeStats? Update both Node System and Progression GDDs accordingly +2. **G1**: Redesign boss scaling formula — change from exponential to logarithmic, or add player catch-up mechanic, or cap boss cycles +3. **G2**: Add `goldEarnedThisRun` to `UnlockedEventArgs`, OR have Node System provide gold directly to Run End screen +4. **G3**: Clarify whether Node System calls `RecordRunEnd` on loss. Update Progression SR2 and AC to match. +5. **C2**: Either add Tower Assembly as soft upstream to Progression (with interface), or remove the speculative reference from Tower Assembly GDD +6. **C4**: Resolve Node System Open Question #3 now that Shop GDD is complete diff --git a/design/gdd/gdd-cross-review-2026-04-29.md b/design/gdd/gdd-cross-review-2026-04-29.md new file mode 100644 index 0000000..a9ae10b --- /dev/null +++ b/design/gdd/gdd-cross-review-2026-04-29.md @@ -0,0 +1,189 @@ +# Cross-GDD Review Report + +**Date:** 2026/04/29 +**GDDs Reviewed:** 2 +**Systems Covered:** Node System, Tower Assembly + +--- + +## 一致性问题 (Consistency Issues) + +### 警告级 (Warnings) + +#### ⚠️ `TryDisassembleTower` 未实现但 UI 可调用 +- **GDD**: `tower-assembly.md` Open Question #1 +- **问题**: `node-system.md` R3 规定"组装后可以免费拆解",Assembly Phase UI 也提供 Disassemble 选项。但 `tower-assembly.md` 明确标注 `TryDisassembleTower` 方法不存在。玩家看到可点击的 Disassemble 按钮但无法使用,属于误导性 UI。 +- **建议**: `tower-assembly.md` 应将 Open Question #1 从 "OPEN" 改为标注为 "Blocking — GDD 自洽性受损",直至方法实现 + +--- + +## 游戏设计问题 (Game Design Issues) + +### 阻塞级 (Blocking) + +#### 🔴 Boss 难度曲线可能无法追赶 — 威胁两大系统核心幻想 + +**位置**: `node-system.md` Boss Difficulty Scaling 公式;`tower-assembly.md` Stat Scaling + +**问题描述**: +- Boss HP 增长: `BossEffectiveHp = DRLevel.BaseHp × 2^(completedLoopCount)` — 指数增长 +- 玩家战力增长: Tower Assembly 提供线性成长(每场战斗最多 8 次升级事件,每次 `baseValue + perLevel * i`) +- 在某个 loop 阈值后,玩家数学上无法击败 Boss + +**设计后果**: +- `node-system.md` Player Fantasy: "whether I survive **depends on** how I prepare" — 若准备永远不够,此幻想崩塌 +- `tower-assembly.md` Player Fantasy: "arrange pieces to solve it" — 若 puzzle 解不开,此幻想崩塌 +- 两大系统的核心幻想共享同一个前提:玩家准备应当是充分的 + +**未知依赖**: `DRMuzzleComp`、`DRBearingComp`、`DRBaseComp` 数据表值未知,无法验证实际成长率 + +**设计建议**: +1. 在 Boss 战中引入 catch-up 机制(每 loop 玩家获得临时增益) +2. 让 Tower Assembly 存在超线性成长路径(tag synergy、combo 机制) +3. 降低 Boss 的指数系数(从 ×2 改为 ×1.3~1.5) +4. 在 `node-system.md` 中明确说明"Boss 的设计意图是部分玩家无法一次通关"以调整预期 + +--- + +#### 🔴 无 Gold 消耗强制机制 — 经济回路开放无界 + +**位置**: `node-system.md` 经济表格(1100 gold 上限);`tower-assembly.md` 无 gold sink 定义 + +**问题描述**: +- 6 场 Combat 节点共 600 gold,Boss 胜利额外 500 gold,最大总量 1100 gold/run +- Shop Node 4 和 8 奖励 0 gold,不强制消费 +- 无任何机制(修理费、强制升级、门票等)要求消耗 gold +- **Shop System GDD(待编写)必须定义强制消耗场景**,否则 Shop 节点沦为 UI 空操作 + +**风险**: Shop 节点存在但内容空洞,玩家跳过购买不打 Boss —— 浪费了 2 个节点的设计价值 + +--- + +### 警告级 (Warnings) + +#### ⚠️ 塔组装存在显然的最优策略 + +**位置**: `tower-assembly.md` R7(无组件兼容性约束) + +**问题**: 任意 Muzzle+Bearing+Base 组合均可。Rarity Resolution 取算术均值: +- Red(5)+Red(5)+Red(5) → 均值 5.0 → Red Tower +- Red(5)+Green(2)+White(1) → 均值 2.67 → 向下取整 → Green Tower(比单 Red 差) + +**结论**: 贪婪地堆最高稀有度组件永远是最优策略,无任何权衡代价。塔组装的最优解是排序后取 top 3,无需战术思考。 + +**建议**: 引入稀有度以外的优化维度: +- Tag synergy 机制(Fire+Ice 组合产生额外效果) +- Endurance 配平(3 高稀有度 = 3 高损耗率,混合可延长功能性) +- Roster slot 约束下的边际收益(分散稀有度可在 4 槽位限制下覆盖更多敌人类型) + +--- + +#### ⚠️ 节点选择若两条路径难度不同,最优路径显而易见 + +**位置**: `node-system.md` 节点选择流程 + +**问题**: 每 junction 显示 2 个目的节点类型。若两条路径在难度/奖励上存在差异(比如两 Combat 但一个是 L1 一个是 L3),"战术决策"退化为"比较两个数字"。 + +**建议**: 确保两条分支在难度和奖励上等价,或引入隐藏变量(敌人波次、特殊条件)使玩家无法在选择前判断哪个更有利。 + +--- + +#### ⚠️ 玩家注意力预算已达上限,无扩展空间 + +**当前 Assembly Phase + Node Choice 时活跃系统**: 4 个 +1. Tower Assembly 决策(选 3 组件组合) +2. Roster 管理(4 槽位分配) +3. 下个节点威胁评估(已知类型 + 难度) +4. 节点二选一(强制决策) + +**设计阈值**: 技能定义 >4 时触发警告,当前 = 4(刚好达标) + +**风险**: Event System(待设计)若在 Assembly Phase 窗口引入额外主动决策(如事件影响当前备战),预算立即超标。 + +--- + +#### ⚠️ Pillar 轻微漂移 + +| GDD | Pillar 声明 | 实际行为 | 漂移程度 | +|-----|------------|---------|---------| +| `node-system.md` | "players drive their own path" | 节点类型序列固定(Plain 主题下所有玩家路径相同);Junction 处只选顺序不选类型 | 轻微 — "path" 实为"choice order" | +| `tower-assembly.md` | "adaptation to known threats" | Shop/Event 节点无需塔组装决策;"adaptation" 对 30% 节点不适用 | 轻微 — pillar 覆盖部分场景 | + +--- + +## 跨系统场景问题 (Cross-System Scenario Issues) + +**场景走过**: 3 个 + +--- + +### 🔴 场景1: 战斗结束 → 组件掉落 → 塔组装(数据流歧义) + +**涉及系统**: Combat → Inventory → Tower Assembly + +**问题描述**: +Combat 节点完成后,组件掉落通过 `InventoryComponent.AddItem` 进入 inventory。**接口未定义**: +- 掉落的是全新组件实例(InstanceId 新生成)? +- 还是对现有组件的修改(如 `IsAssembledIntoTower` 标记改变)? + +**隐患**: 若为后者,而玩家在 Assembly Phase 前一步已组装了某些塔,战损标记可能与已组装组件的状态产生歧义。 + +**需确认**: `InventoryComponent.AddItem` 的精确语义,建议在 `node-system.md` Dependencies 或接口协议中明确定义。 + +--- + +### ⚠️ 场景2: Boss 前夕的 Assembly Phase(难度曲线验证) + +**涉及系统**: Node System + Tower Assembly + Combat + +**问题描述**: +玩家在 Node 9 完成后进入 Assembly Phase,此时 `BossEffectiveHp` 是可计算的(因 `completedLoopCount` 已知)。玩家可以提前计算胜率。 + +**风险**: +- 若计算结果为正 → Boss 战是仪式感结局,fantasy 得到验证 +- 若计算结果为负 → 玩家感受到"数值碾压"而非"战术压力",两大系统的核心幻想同时受损 + +**需验证**: +- `BossEffectiveHp` 是否对玩家 UI 可见(建议:仅在 Boss 血条上显示,不显示计算公式) +- 实际 tower DPT(每秒伤害)是否能与指数增长的 Boss HP 达到动态平衡 + +--- + +### ℹ️ 场景3: Degraded 塔在战斗开始时的处理 + +**涉及系统**: Tower Assembly + Combat + +**问题描述**: +`tower-assembly.md` 明确:"mid-combat 降级不会自动从 roster 移除"。但 `CombatNodeComponent` 的 roster 验证逻辑未定义。 + +**两种处理方式的后果**: +- 若验证在战斗**开始**时:degraded 塔被筛除,玩家以 <4 塔对抗预期敌人规模 +- 若验证在战斗**进行中**:塔在战斗中失效,战斗难度非线性上升 + +**建议**: 在 `tower-assembly.md` Acceptance Criteria 中补充:`CombatNodeComponent` 必须在战斗开始前调用 `CombatParticipantTowerValidationService.ValidateParticipantTowers`,拒绝 degraded 塔入战。 + +--- + +## GDD 标记需修订 + +| GDD | 原因 | 类型 | 优先级 | +|-----|------|------|--------| +| `tower-assembly.md` | `TryDisassembleTower` 未实现但 UI 可调用;Open Question #1 应更新状态为 Blocking | 设计完整性 | Warning | +| `node-system.md` | Boss catchability 需在 GDD 中明确设计意图;建议补充"战斗开始前 roster 验证逻辑"的跨系统约定 | 难度曲线 | Warning | +| `systems-index.md` | Combat System 标注"Designed"但 GDD 在 `docs/CombatNodeArchitecture.md` 而非 `design/gdd/`,文档位置不统一 | 文档管理 | Warning | + +--- + +## Verdict: **CONCERNS** + +存在 2 个阻塞级问题(Boss catchability + 无 gold sink)和多个警告级问题。阻塞问题不会阻止架构设计,但应在 `/create-architecture` 前明确设计意图。 + +--- + +## Next Steps + +- `/design-system shop` — 编写 Shop System GDD(阻塞项:gold sink 定义) +- `/design-system event` — 编写 Event System GDD(阻塞项:EventContext 合约) +- `/design-system progression` — 编写 Progression GDD(阻塞项:RunEnd 数据持久化) +- `/design-review tower-assembly` — 修订 Open Question #1 并更新 TryDisassembleTower 状态 +- `/create-architecture` — 在所有 MVP GDD 完成后开始架构设计(Verdict 为 CONCERNS 但非 FAIL) diff --git a/design/gdd/node-system.md b/design/gdd/node-system.md new file mode 100644 index 0000000..16a9c71 --- /dev/null +++ b/design/gdd/node-system.md @@ -0,0 +1,420 @@ +# Node System (节点系统) + +> **Status**: Revised — post-design-review fixes applied (edge divergence, economy, Boss scaling, Assembly Phase entry, Boss color, Coin sink, AC gaps) +> **Author**: SepComet + agents +> **Last Updated**: 2026-04-30 (post-review revisions: edge level variants, first-shop tiering, Boss nodesCompleted scaling, Assy Phase auto-enter, Boss VFX color, Coin sink clarification, AC gaps filled) +> **Implements Pillar**: Core game loop navigation — players drive their own path through the run + +## Overview + +The Node System is the **run-level navigation layer** that structures a complete playthrough. A run consists of exactly 10 sequential nodes. The player advances through nodes one at a time, choosing which available node to tackle next. After each node resolves, the player enters a brief **assembly phase** to reconfigure their towers before committing to the next node. + +**Node types**: +- **Combat Node**: Triggers a wave-based tower defense battle via `CombatNodeComponent`. The player earns Gold (persistent run currency) and component drops based on performance. +- **Event Node**: Presents a branching choice with risk/reward outcomes. No combat; purely decision-based. +- **Shop Node**: Opens the component store for purchasing upgrades between battles. +- **Boss Node** (Node 10): A combat node with higher difficulty and guaranteed valuable drops — the run's climax. + +**Currency note**: This GDD distinguishes two currencies. **Gold** is the persistent run-level currency earned from combat nodes and spent at Shop nodes. **Coin** is a combat-internal currency earned per combat round and spent within a single combat encounter on tower building and other intra-combat actions — Coin does not persist between nodes and is exclusive to the CombatNode domain. The Coin sink is defined in the CombatNode design; Coin has no interaction with Gold or the shop system. + +**Node graph structure**: +The node graph is a **linear track with one branch per node**. At each node entry, the player is shown the available outgoing edge(s) and must choose which node to enter next. There is no convergence/merging of paths within a run — the player advances linearly, not across branching tracks. The two edges at each junction lead to the **same node type** but offer **different level variants** (distinct map layouts, enemy compositions, or environmental conditions) — creating strategic divergence through topology and threat profile rather than through node-type variety. + +**Node type generation**: Node types follow a **fixed sequence** (not randomized) per run. The sequence for the default (Plain theme) is: Combat → Combat → Combat → Shop → Combat → Event → Combat → Shop → Combat → BossCombat. Events are restricted to positions 4–8 only. Future themes may define their own fixed sequences. Players cannot choose or reroll node types. The two outgoing edges from each node lead to distinct level variants of the next node type, not to different node types. + +## Player Fantasy + +**"I feel like a tactician executing a plan through hostile terrain — the route is set, but how I prepare and when I commit my forces determines whether I survive."** + +The Node System delivers the fantasy of **tactical navigation under pressure**. The player is a tactician with a fixed route ahead — they know what kinds of challenges await (the full node track and boss are visible at run start), but at each junction they choose which of two paths to commit to. The core feeling is **the weight of tactical commitment**: selecting a path means locking in your approach. You can see the Boss at Node 10 glowing at the end, and you know the full track from the start — but the question is whether the resources and tower builds you've chosen will be sufficient to reach it intact. + +The player should feel: +- **Preparing and adapting** — using Assembly Phases to optimize tower builds based on known upcoming challenges; choosing path segments that complement the components in hand +- **The weight of commitment** — once you enter a node, the choice is locked; there's no undoing or backtracking within a run +- **Building toward a climax** — each node brings the player closer to the Boss; the 10-node arc creates mounting tension toward the run's inevitable crescendo +- **Satisfaction when the plan holds** — the run "reads" as a coherent story in retrospect: "I invested heavily in early towers, conserved resources mid-run, and deployed my best assembly for the Boss" + +**Reference**: Into the Breach's "visible consequences of choices" feeling — the player can see what lies ahead (both edge destinations and their level variants) and must prepare accordingly. The Geometry TD node system achieves this through the fixed Boss at Node 10, the linear-but-choiced track structure where the two outgoing edges present different level variants of the next node, and the Assembly Phase where the player configures their towers for the known upcoming challenge. Resource timing and build optimization matter more than node-type gambling — but the specific level variant encountered is also shaped by the player's path choice. + +## Detailed Design + +### Core Rules + +**Run Structure** +1. A run consists of exactly **10 sequential nodes**. Node 10 is always a Boss Combat node. +2. Node types follow a **fixed sequence** per theme (see Node Type Generation above). The player cannot choose or reroll node types. +3. The run graph is **strictly forward-only**: no backtracking, no skipping nodes, no retries of completed nodes. + +**Node Entry Flow** +4. Player arrives at a node. The node resolves based on type: + - **Combat / Boss**: `CombatNodeComponent.StartCombat()` is called by the Procedure layer. On victory, player receives Coin, component drops, and Gold. + - **Event**: Branching choice presented. Player selects an option; risk/reward resolves immediately. + - **Shop**: Shop interface opens. Player buys/sells components. Player exits freely. +5. Node resolves. **Assembly Phase is automatically entered** after every node resolves. Player interaction within it is optional — the "Ready" button can be clicked immediately to proceed, or the player may re-enter freely before selecting the next node. Assembly Phase is never skipped; it is a mandatory transit point, not a mandatory modification point. +6. **Assembly Phase**: Player can swap any component on any tower, reorganize inventory, and review current stats. Player confirms "Ready" to proceed to node choice, or may re-enter Assembly Phase freely before selecting the next node. +7. Assembly Phase ends (player-initiated or via "Ready" confirmation). The **2 outgoing edge destinations** from the completed node are revealed (node types shown). +8. Player selects one destination. The chosen edge is locked in. Player travels to the next node. +9. Repeat steps 4–8 until Node 10 (Boss) is reached. + +**Combat Loss Rules** +10. **Any combat loss**: Run ends in failure immediately. There is no continuation after a combat loss — the run concludes at the point of failure. This applies to both regular Combat nodes and the Boss node. Node is marked as `RunNodeStatus.Exception` in the run state on loss. + +**Data Persistence** +11. Within a run: Inventory, Repository, Gold, Coin, Tower configs, visited node history, and active buffs/debuffs **persist across nodes**. +12. Between runs: All of the above **reset to starting values**. Only permanent meta-progression (unlocks, permanent upgrades) persists. + +**NodeComponent — Ownership and Interface** +15. There is no `NodeComponent` class. Run-level orchestration is handled by the Procedure layer via `RunStateAdvanceService` and `RunState`. +16. The Procedure layer calls `CombatNodeComponent.StartCombat()` only after the Assembly Phase is confirmed complete. +17. `CombatNodeComponent` fires `NodeCompleteEventArgs` (with `CombatWon` field) after combat resolves. The Procedure layer receives this event and drives state transitions via `RunStateAdvanceService.TryCompleteCurrentNode`. + +### States and Transitions + +| State | Description | Exits | +|-------|-------------|-------| +| `RunIdle` | Pre-run, at main menu. Player not yet in a run. | → `NodeReveal` on "Start Run" | +| `NodeReveal` | Outgoing edges from current node are displayed. Player makes a choice. | → `NodeTransition` on choice confirmed | +| `NodeTransition` | Player travels to the chosen destination node. | → `NodeEntry` | +| `NodeEntry` | Player arrives at the node. Node-type logic triggers (Combat/Event/Shop/Boss). | → `AssemblyPhase` on node resolved | +| `AssemblyPhase` | Full tower assembly enabled. Player may enter/exit freely. Player confirms "Ready" to proceed to node choice. | → `NodeReveal` (next node) or `RunEnd` (Boss completed) | +| `RunEnd` | Victory or failure screen. Stats recorded. Return to main menu. | → `RunIdle` | + +*Note: `CombatNodeComponent` manages its own internal `Loading → RunningPhase → ... → Settlement` state machine (per CombatNodeArchitecture.md). From the Node System's perspective, a Combat node entry is a single atomic transition: `NodeEntry → AssemblyPhase` on receiving the `CombatVictory` or `CombatDefeat` event.* + +### Interactions with Other Systems + +| System | Direction | Interface | +|--------|-----------|------------| +| **CombatNodeComponent** | Delegates to | `StartCombat(CombatData)`, receives `OnCombatVictory / OnCombatDefeat` events | +| **ShopSystem** | Delegates to | `ShopFormController.OpenShop()`, receives `OnShopClosed` callback | +| **EventSystem** | Delegates to | `EventNodeComponent.ProcessEvent()`, receives `OnEventResolved` | +| **TowerAssembly** | Reads/writes | Tower config persists in `NodeComponent` run state; Assembly Phase reads current inventory | +| **Inventory** | Reads | Component drops from combat are added to inventory via `InventoryComponent.AddItem` | +| **MapEntity / MapTopologyService** | Reads | Combat nodes query `MapTopologyService` for path data to pass to `CombatNodeComponent` via `MapData` | +| **Progression** | Writes | On `RunEnd`, final stats (Gold, nodes completed, Boss killed) are written to Progression | + +``` +RunState (data container, owned by Procedure layer) +├── RunStateAdvanceService (state transition logic) +├── CombatNodeComponent (delegates combat entry) +├── ShopNodeComponent (opens Shop UI on Shop node) +├── EventNodeComponent (processes Event node choices) +└── TowerAssembly (read/written during Assembly Phase) +``` + +Note: There is no `NodeComponent` class. Orchestration is handled by the Procedure layer. + +## Formulas + +**Important note on `completedLoopCount`**: This refers to the **number of completed combat cycles within a single Boss node encounter** — i.e., when a Boss fight loops (e.g., VictoryType requires surviving N rounds), each completed cycle increments the count. This is independent of the run's node count. The formula does NOT use `nodesCompleted` from the run state. + +Gold earned per node completed, plus boss bonus: + +`TotalGold = Σ DRLevel.RewardGold(CompletedCombatNodes) + (HasDefeatedBoss ? BossBonus : 0)` + +Each Combat node's gold reward is determined by its linked level's `DRLevel.RewardGold` value. Event and Shop nodes do not award gold directly. The Boss node's own reward comes from its linked level (`DRLevel.RewardGold` at level index for Boss) plus the `BossBonus`. + +| Variable | Symbol | Type | Range | Description | +|----------|--------|------|-------|-------------| +| Combat nodes cleared | n | int | 0–6 | Number of non-boss combat nodes cleared (nodes 1, 2, 3, 5, 7, 9). Event and Shop nodes award 0 gold and are excluded from the sum. | +| Boss bonus | BossBonus | int | 200 | Flat bonus for defeating Boss (applied only if `HasDefeatedBoss = true`) | + +**Per-node gold (REVISED — economy rebalanced):** +After rebalancing, the illustrative values for the Plain theme sequence (Combat at L1, L2, L3, L1; Boss at L4) are: + +| Node | Type | Level | Gold (illustrative) | +|------|------|-------|-------------------| +| 1 | Combat | Level 1 | 90 | +| 2 | Combat | Level 2 | 90 | +| 3 | Combat | Level 3 | 120 | +| 4 | Shop | — | 0 | +| 5 | Combat | Level 1 | 90 | +| 6 | Event | — | 0 | +| 7 | Combat | Level 2 | 90 | +| 8 | Shop | — | 0 | +| 9 | Combat | Level 3 | 120 | +| 10 | BossCombat | Level 4 | 300 + BossBonus(200) | + +**Output Range:** 0 to 1100 (6×Combat=600 + BossBonus=200 + BossLevelGold=300) depending on how far the player progressed and whether Boss was defeated. +**Constraint:** Plain theme places exactly one Event node at position 6. Events may only occupy positions 4–8. Combat nodes 1, 2, 3, 5, 7, 9 use the Plain cycle L1, L2, L3, L1, L2, L3. **Shop tiering**: The first shop (Node 4) offers only White and Green rarity components; Blue and above appear only from Node 8 onward. + +--- + +### 2. Boss Difficulty Scaling + +Boss difficulty scales with both the Boss node's own loop/round count **and** the number of non-boss nodes the player has completed in the run. + +`BossEffectiveHp = DRLevel.BaseHp × 2^(completedLoopCount) × (1 + 0.1 × nodesCompleted)` + +| Variable | Symbol | Type | Range | Description | +|----------|--------|------|-------|-------------| +| Boss base HP | DRLevel.BaseHp | int | ≥ 1 | Fixed HP from level config (note: `DRLevel` only has `BaseHp`, not a separate `BossBaseHp` field). Floor of 1 applied at data load time. | +| Completed loop count | completedLoopCount | int | 0–31 | Number of **combat rounds/cycles completed within the current Boss encounter** — NOT the count of nodes completed in the run. When a Boss fight loops (e.g., VictoryType requires surviving N rounds), each completed cycle increments the count. Resets when a new Boss fight begins. Hard cap of 31 loops before `BossEffectiveHp` would reach `int.MaxValue`; implementation clamps at `int.MaxValue`. | +| Nodes completed | nodesCompleted | int | 0–9 | Number of non-boss combat nodes (nodes 1, 2, 3, 5, 7, 9) successfully completed this run. Does not include Shop or Event nodes. Resets each run. | +| Difficulty multiplier | (1 + 0.1 × nodesCompleted) | float | 1.0–1.9 | Run-progress multiplier. More nodes completed → harder Boss. Caps naturally at 1.9× (at 9 nodes completed). Does not exceed 2.0×. | +| Boss effective HP | BossEffectiveHp | int | ≥ 1 | Final boss HP | + +**Constraint:** Players who lost early AND players who breezed through will face different Boss HP values at the same loop count — the run-progress multiplier differentiates them. A player with `nodesCompleted=3` and `completedLoopCount=0` faces `BaseHp × 1.3`; a player with `nodesCompleted=9` and `completedLoopCount=0` faces `BaseHp × 1.9` at the same level. +**Note:** Formula extends `EnemyConfigProvider.ResolveScaledEnemyBaseHp` with a run-progress factor. The `DRLevel` fields used are: `Id`, `LevelThemeType`, `BaseHp`, `StartCoin`, `VictoryType`, `VictoryParam`, `RewardGold`. + +## Edge Cases + +- **If Player loses Combat at any node**: Run ends in failure immediately. No partial rewards are awarded. The run is complete at the point of failure. + +- **If Player has 0 components entering Assembly Phase**: Player cannot assemble or modify any towers. Assembly phase is effectively a no-op pass-through. Player proceeds to the next node with existing tower state unchanged. + +- **If Player encounters two consecutive Shop nodes**: Player has back-to-back purchase opportunities. If Gold is insufficient at Shop 1, no mitigation occurs — Shop 2 may also be unaffordable. No rule forces spending or guarantees affordability. + +- **If Player encounters Shop Node 4 (first shop)**: Only White and Green rarity components are available. Blue and higher rarities are excluded from this shop. If the player's gold is insufficient for all available items, no additional items appear — the player may proceed with insufficient purchases. + +- **If Player encounters Shop Node 8 (second shop)**: All rarity tiers (White through Red) are available. There is no tier restriction on the second shop. + +- **If Player loses at Boss node**: Run ends in failure immediately with **no partial rewards**. The Boss node awards no rewards on loss. + +## Dependencies + +### Upstream Dependencies (what Node System depends on) + +| System | Type | Interface | Status | +|--------|------|-----------|--------| +| **CombatNodeComponent** | Hard | Fires `NodeCompleteEventArgs` with `CombatWon` field after combat resolves. Calls to `CombatNodeComponent.StartCombat()` enter combat. | Implemented (`Assets\GameMain\Scripts\CustomComponent\CombatNode\CombatNodeComponent.cs`) | +| **ShopSystem** | Hard | Calls `ShopNodeComponent.StartShop()`; receives `OnShopClosed` callback. | Designed (`design/gdd/shop.md`). `ShopContext` contract and buy/sell behavior are defined and consistent with Assembly Phase timing. | +| **EventSystem** | Hard | Calls `EventNodeComponent.ProcessEvent()`; receives `OnEventResolved`. | Designed (`design/gdd/event-system.md`). `EventContext` contract and risk/reward resolution flow are defined and consistent with `NodeEntry → AssemblyPhase` atomic transition model. | +| **TowerAssembly** | Soft | Reads/writes tower configs during Assembly Phase. Tower state persists in `NodeComponent` run context. | Designed (`design/gdd/tower-assembly.md`). `TryAssembleTower()` and `TryDisassembleTower()` interfaces are defined; Assembly Phase timing and inventory access patterns are consistent with this GDD. | +| **Inventory** | Soft | Reads component drops from combat; adds items via `PlayerInventoryComponent.MergeInventory`. | Implemented | +| **MapEntity / MapTopologyService** | Hard | Reads path/topology data for combat map setup; assembles `MapData` passed to `CombatNodeComponent`. | Implemented (see `Assets\GameMain\Scripts\CustomComponent\Map\`) | +| **Progression** | Soft | Writes final run stats (Gold, nodes completed, Boss killed) on `RunEnd`. | Designed (`design/gdd/progression.md`). `RecordRunEnd()` interface is defined; all acceptance criteria involving `Progression` are now implementable. | + +### Downstream Dependents (what depends on Node System) + +| System | Type | Interface | Status | +|--------|------|-----------|--------| +| **Progression** | Hard | Reads run completion data (Gold, nodes cleared, Boss defeat) from `RunState` on `RunEnd`. | Designed (`design/gdd/progression.md`). Interface contract is defined. | + +### Bidirectional Consistency Check +- [x] `CombatNodeComponent` → listed as upstream (Node System receives its events) ✅ +- [x] `Progression` → downstream only; Node System writes to it ✅ +- [x] `ShopSystem` → GDD exists; interface contract aligned ✅ +- [x] `EventSystem` → GDD exists; interface contract aligned ✅ +- [x] `TowerAssembly` → GDD exists; interface contract defined ✅ + +### Provisional Assumptions +- `ShopSystem` receives a `ShopContext` from the orchestrating component containing current Gold/Coin and run node index +- `EventSystem` receives an `EventContext` containing run state (node index) +- `TowerAssembly` is called during Assembly Phase and writes back to `RunState`'s inventory snapshot +- There is no `NodeComponent` — orchestration is handled by the Procedure layer via `RunStateAdvanceService` + +## Tuning Knobs + +All designer-adjustable values for the Node System. Changing these does not require code changes. + +### Run Structure + +| Knob | Default | Safe Range | Extreme: Too Low | Extreme: Too High | +|------|---------|-----------|-----------------|------------------| +| `TotalNodesPerRun` | 10 | 5–20 | Run feels too short; boss arrives too quickly | Run feels repetitive; pacing drags | +| `BossNodeIndex` | 10 | = `TotalNodesPerRun` | N/A | N/A | +| `OutgoingEdgesPerNode` | 2 | 2–3 | Fewer choices reduces strategic depth | More choices may overwhelm UI/decision-making | + +### Boss Scaling + +| Knob | Default | Safe Range | Extreme: Too Low | Extreme: Too High | +|------|---------|-----------|-----------------|------------------| +| `BossBonusGold` | 200 | 100–500 | Boss reward feels trivial | Boss trivializes economy | + +### Assembly Phase + +| Knob | Default | Safe Range | Extreme: Too Low | Extreme: Too High | +|------|---------|-----------|-----------------|------------------| +| `AssemblyPhaseIsMandatory` | true | true/false | Player skips assembly (reduces strategy depth) | N/A | +| `AssemblyPhaseHasTimeLimit` | false | false or 30–120s | N/A | Time pressure reduces quality of decisions | + +## Visual/Audio Requirements + +### VFX Event Specifications + +| Event | Visual Effect | Audio Cue | Duration | +|-------|--------------|-----------|----------| +| **Node Completion** | Node pulses with type-color glow (1.0x → 1.1x → 1.0x), emits 8–12 geometric particles (triangles/diamonds) in radial burst | Soft ascending chime (C5-E5-G5), low volume | ~600ms | +| **Choice Made** | Selected edge animates from dashed to solid (300ms); unselected edge fades to 20% opacity | Subtle "lock-in" percussive click | ~400ms | +| **Loss Suffered** | Screen flashes red at 30% opacity for 150ms; failed node icon cracks/dims permanently; screen transitions to Run Failure screen | Low thud + dissonant minor-2nd tone | ~300ms | +| **Boss Defeated** | Full-screen golden particle shower (64+ hexagonal particles); Boss node explodes into geometric shards reforming as victory badge | Triumphant rising major chord fanfare (1.5s) | ~2000ms | +| **Run Victory** | All past nodes illuminate sequentially bottom-to-top (80ms each) forming a completed-path glow; Boss transforms into trophy/star icon; golden vignette | Extended C-E-G-C chord sustain + chime cascade | ~2500ms | +| **Run Failure** | Screen desaturates over 500ms; node track cracks along failed node; fade to dark with red tinge | Descending minor tone + deep bell | ~1500ms | + +### Color Palette + +| Node Type | Color | Hex | Icon | +|-----------|-------|-----|------| +| Combat | Crimson Red | `#FF4A4A` | Sword | +| Event | Royal Purple | `#9B59B6` | Question mark | +| Shop | Gold | `#FFD700` | Coin | +| Boss | Golden Crown | `#FF8C00` | Crown | +| Locked/Future | Slate Gray | `#4A5568` | — | +| Completed | Dimmed type color | 50% brightness | Checkmark | +| Failed | Desaturated + Red X | — | Broken node | + +### Node Visual States + +| State | Treatment | +|-------|-----------| +| **Current** | Full opacity, type color, white 3px border, pulse animation (1.0x–1.05x, 1.5s loop), outer glow ring | +| **Completed** | 40% opacity, grayscale tint, no glow, checkmark overlay | +| **Failed** | 30% opacity, desaturated, crack texture, red X overlay | +| **Future/Locked** | 25% opacity, no edges visible | +| **Future/Revealed** | 70% opacity, type color, dashed interactive edges | +| **Boss** | Full opacity, 1.3x scale, crown icon, amber/gold particle aura (#FF8C00) | + +### Track Layout +- Vertical orientation, Boss at top (fixed beacon glow), Node 1 at bottom +- Current node centered in viewport; past nodes slide up and compress (0.9x scale per node above) +- Edges: 2px type-colored lines, dashed when active, solid when locked +- All particle shapes: triangles, diamonds, hexagons only — no organic curves + +### Audio Style +- Sounds: clean sine/triangle waves — digital-mathematical, not organic +- SFX duration: 100–400ms typical +- Ambient: C major chord drone/pad during track exploration +- Boss entry: dedicated boss music stinger + +### Accessibility +- Color + iconography always paired (color alone never conveys type) +- Loss flash 150ms at 30% opacity — accompanied by a brief screen shake; **an accessibility toggle in Settings allows this flash to be replaced with a slow fade (500ms)** for players with photosensitive concerns +- Boss Defeated particle shower (64+ hexagonal particles) — **an accessibility toggle in Settings reduces particle count to 16** for players with visual sensitivity +- Audio cues have visual alternatives (state changes, screen flashes) +- High-contrast mode: node border brightens to 4px white +- **Colorblind differentiation**: Failed state uses a distinct shape treatment (jagged/broken frame outline) in addition to desaturated color + red X, so it is distinguishable from Completed without relying on color perception. Boss node aura uses golden-orange (#FF8C00) rather than crimson to differentiate from Combat node red (#FF4A4A); crown icon and particle aura provide additional differentiation. +- **Node state opacity minimum**: Future/Locked state uses minimum 25% opacity (not 15%) so it remains visible rather than appearing as blank space. +- **Inventory access**: Player can view (but not modify) inventory and Gold/Coin balance from the Node Map Screen at any time. Modification is restricted to Assembly Phase only. This enables informed node choice decisions without violating assembly-phase-only build modification. + +## UI Requirements + +### Node Map Screen + +**Layout**: Full-screen vertical scrollable node track. +- Boss node fixed at top with persistent beacon glow. +- Current node centered vertically. +- Past nodes stacked above (dimmed, compressed 0.9x per node). +- Future nodes hidden below fold. + +**Node Card** (per node): +- Size: ~80×80px base, Boss 1.3x (104×104px) +- Content: type icon (sword/question/coin/crown), node index number +- Border: 3px white on current, type-colored on revealed, none on locked +- Background: type color fill + +**Edge Display**: +- Lines connecting current node to 2 revealed destinations +- Dashed while unchosen, solid after selection +- Type-colored + +**Run Progress HUD**: +- Top-left: Run index badge ("Run #3") +- Top-right: Gold counter and Coin counter (displayed separately with distinct icons; tooltip on hover explains each) + +### Node Choice Overlay + +**Trigger**: Appears when entering `NodeReveal` state after Assembly Phase. + +**Content**: +- Title: "Choose Your Path" (or equivalent) +- Two node cards displayed horizontally, each showing node type + type icon +- Cards highlight on hover; selection locks on click +- "Locked in" confirmation animation on selection + +**Constraints**: +- No third option visible +- Player cannot advance without choosing +- ESC/Cancel not supported — choice is mandatory + +### Assembly Phase Screen + +**Trigger**: Auto-enters after any node resolves. + +**Content**: +- Tower slots (current tower configs, 3 component slots each) +- Inventory grid (all owned components) +- Repository grid (all stored components) +- **Next Node Preview**: The 2 outgoing edge destinations from the completed node are displayed on-screen during Assembly Phase, showing each destination's node type and index. This allows the player to optimize their tower build based on known upcoming challenges — matching the "preparing and adapting" Player Fantasy. +- "Ready" button (bottom-right, large, pulsing until confirmed) + +**Interactions**: +- Drag components between inventory and tower slots +- Click "Ready" to confirm and advance +- No time limit in default configuration (tunable) + +**Empty State**: +- When inventory is empty: tower slots show placeholder silhouettes (dotted outline) with "Empty" label +- "Ready" button is still present and functional when inventory is empty — it pulses to indicate action is available +- Repository grid shows empty state with "No stored components" message + +### Run End Screen + +**Trigger**: Appears on `RunEnd` state. + +**Variants**: +- **Victory**: Golden theme, Boss defeated badge, Gold total, nodes cleared, "Return to Menu" button +- **Failure**: Desaturated/red theme, "Run Failed" message, furthest node reached, "Return to Menu" button + +**No retry button** — runs are single-attempt + +### Interaction Constraints +- No back button during node choice +- No undo after node selection is confirmed +- **Mandatory commitment confirmation**: A "This path cannot be undone." message must appear in the Node Choice Overlay before the player can confirm. This is not optional flavor text. +- ESC does not cancel Assembly Phase (must click "Ready") +- Player can **view** inventory and Gold/Coin balance from the Node Map Screen at any time; player can only **modify** inventory and tower builds during Assembly Phase +- **Next node types visible during Assembly Phase**: The 2 outgoing edge destinations are displayed on the Assembly Phase screen before the player clicks Ready, enabling informed build optimization + +## Acceptance Criteria + +### Run Structure +- **GIVEN** a new run, **WHEN** the player starts, **THEN** a 10-node track is generated with Node 10 as BossCombat, and node types follow the fixed sequence (Combat, Combat, Combat, Shop, Combat, Event, Combat, Shop, Combat, BossCombat) for the duration of the run. +- **GIVEN** the player is at NodeReveal, **WHEN** they see the outgoing edges, **THEN** exactly 2 destination nodes are displayed with their `RunNodeType` enum values visible. The full track (all 10 nodes) is visible from the run start. +- **GIVEN** the player has selected a node edge, **WHEN** they confirm, **THEN** the choice is locked, a modal dialog displays the text "This Path Cannot Be Undone" with a "Confirm" button, and previously visited nodes remain in Completed state and are not present in the NodeReveal choice set. +- **GIVEN** the player has completed Node N (N < 10), **WHEN** they are on the Node Choice Overlay or any subsequent screen, **THEN** no UI element, back button, or code path allows navigation back to Node N-1 or any previously completed node. +- **GIVEN** the player completes Node 10 (Boss), **WHEN** the run end resolves, **THEN** the run enters `RunEnd` state and does not return to the node track or generate additional nodes. + +### Node Resolution +- **GIVEN** the player completes Combat node n, **WHEN** they win, **THEN** they receive Gold equal to `DRLevel.RewardGold` for the linked level, plus component drops as defined by the level's component drop table. +- **GIVEN** the player completes Event node, **WHEN** the event resolves, **THEN** the event's outcome modifiers (Gold delta, HP delta, buffs/debuffs) are reflected in the player's run state immediately, the Event UI is dismissed, and the transition to Assembly Phase begins within 2 seconds. +- **GIVEN** the player completes Shop node, **WHEN** they exit the shop, **THEN** all purchases and sales are committed to the player's inventory and the UI transitions to Assembly Phase within 2 seconds. +- **GIVEN** the player opens a Shop node, **WHEN** they click "Leave" without making any purchases or sales, **THEN** no changes are made to the player's inventory or Gold, and the UI transitions to Assembly Phase. +- **GIVEN** the player completes Shop node 4, **WHEN** they later reach Shop node 8, **THEN** Shop node 8 functions normally with the same rules; there is no special mitigation or bonus for consecutive shop nodes. + +### Combat Loss +- **GIVEN** the player loses Combat at any node (including Node 10), **WHEN** the loss is recorded, **THEN** the run ends immediately in failure. The run's Gold, Coin, Inventory, and TowerConfig are not written to Progression; the in-memory run state is cleared; and the player is placed on the RunEnd (Failure) screen. +- **GIVEN** the player loses at the Boss (Node 10), **WHEN** Node 10 resolves, **THEN** the Boss node does not fire `NodeCompleteEventArgs` with `CombatWon = true`, and no `DRLevel.RewardGold` or `BossBonus` is added to run state. + +### Boss +- **GIVEN** the player defeats the Boss, **WHEN** Node 10 resolves, **THEN** they receive the Boss level's `DRLevel.RewardGold` plus `BossBonus = 200` gold. +- **GIVEN** the player faces the Boss, **WHEN** the Boss spawns, **THEN** `BossEffectiveHp = DRLevel.BaseHp × 2^(completedLoopCount) × (1 + 0.1 × nodesCompleted)`, where `completedLoopCount` is the number of completed boss cycles and `nodesCompleted` is the count of non-boss combat nodes cleared this run. + +### Assembly Phase +- **GIVEN** the player is in Assembly Phase, **WHEN** the screen is displayed, **THEN** the 2 outgoing edge destinations from the completed node are visible on-screen, each showing its node type and index. +- **GIVEN** the player is in Assembly Phase, **WHEN** they click the "Ready" button, **THEN** the Assembly Phase ends, the Node Choice Overlay appears, and the player selects from the 2 already-revealed destinations. +- **GIVEN** the player has 0 components in inventory, **WHEN** they enter Assembly Phase, **THEN** the "Ready" button is enabled and clicking it proceeds to the next node without modification. +- **GIVEN** a node resolves (Combat victory, Event completed, Shop exited), **WHEN** the resolution completes, **THEN** Assembly Phase is entered automatically. The player is never required to take an action to trigger Assembly Phase entry. +- **GIVEN** the player is in Assembly Phase, **WHEN** they have not yet clicked "Ready", **THEN** the player may re-enter and modify tower configurations freely. The "Ready" button is the only mandatory action to proceed. + +### Data Persistence +- **GIVEN** a completed run ending in victory, **WHEN** the run ends, **THEN** a subsequent read of `Progression.GetRunHistory()` returns an entry containing the Gold total, nodesCompleted count, and BossDefeated flag for this run, and the player is returned to main menu. +- **GIVEN** a new run starts, **WHEN** the player begins, **THEN** Gold, Coin, Inventory, and Tower configs are reset to `DRRunConfig.StartGold`, `DRRunConfig.StartCoin`, empty inventory, and default tower configs respectively. + +### Cross-System +- **GIVEN** the player completes a Combat node, **WHEN** they win, **THEN** component drops are added to the player's inventory before the Assembly Phase UI is shown. +- **GIVEN** the player completes a Combat node, **WHEN** `NodeCompleteEventArgs` with `CombatWon = true` is dispatched, **THEN** the transition to Assembly Phase begins within 2 seconds of the event. + +## Open Questions + +### 1. Should Event Nodes Appear in the First 3 Nodes? +**Status**: ✅ RESOLVED — Option [C] adopted: Events restricted to positions 4–8. Node sequence updated accordingly (Events only at node 6 in Plain theme). + +### 2. Does the Player See the Full Track at Run Start? +**Status**: ✅ RESOLVED — Option [A] adopted: Full track visible from run start. Player Fantasy updated to reflect this. All nodes visible, current node highlighted, past nodes dimmed. + +### 3. Blocked Systems Before Node System Implementation +**Status**: ✅ RESOLVED — All blocking GDDs have been completed: +- **Shop System GDD** (`design/gdd/shop.md`) — Status: Designed. `ShopContext` contract and buy/sell behavior are defined. +- **Event System GDD** (`design/gdd/event-system.md`) — Status: Designed. `EventContext` contract and risk/reward resolution flow are defined. +- **Progression GDD** (`design/gdd/progression.md`) — Status: Designed. `RecordRunEnd()` and `GetLifetimeStats()` interfaces are defined. diff --git a/design/gdd/progression.md b/design/gdd/progression.md new file mode 100644 index 0000000..346a19c --- /dev/null +++ b/design/gdd/progression.md @@ -0,0 +1,421 @@ +# Progression (成长系统) + +> **Status**: Designed +> **Author**: SepComet + agents +> **Last Updated**: 2026-04-29 +> **Implements Pillar**: [To be defined in game-pillars.md — Progression serves the "collection and mastery" fantasy; pillar text not yet written] + +## Overview + +Progression is the **permanent state manager** that persists across runs. It owns the read/write of unlock state (themes, difficulty tiers, component pools) and the lifetime statistics record. It does not hold run-level state (Gold, Inventory, Tower configs — those live and die within a run). On every `RunEnd`, the Node System sends final run stats to Progression; Progression evaluates whether any unlock thresholds are crossed; if so, the player's unlock pool expands. The player sees this as "my account is more powerful" — new options available at the start of every subsequent run. + +## Player Fantasy + +**"Complete your collection. Fill every gap. Nothing left on the table."** + +The Progression fantasy is the feeling of **relentless collection** — every run earns something toward a permanent expansion of what's possible. A component type unlocked, a difficulty tier cracked, a theme revealed. The next run the player opens, they notice the new option immediately and feel the game acknowledge their effort. The goal is to empty the unlock list — to have seen everything the game offers. This is the roguelike's fundamental pull: *the set of available tools today is larger than it was ten runs ago*. + +The player should feel: +- **Driven by gaps** — the unlock list shows what's missing; completing a set feels like closing a circuit +- **Rewarded for exploration** — trying a new difficulty or theme unlocks more content as a side effect +- **Invested in permanence** — nothing earned is ever lost; the account is a record of everything accomplished + +## Detailed Design + +### Core Rules + +**SR1. Data Persistence**: `ProgressionData` is a persistent save-file object. It is loaded at game start and saved after every `RecordRunEnd()` call. It holds: `UnlockedDifficulties`, `UnlockedThemes`, `UnlockedComponentPools`, `UnlockedStartingLoadouts`, `CompletionCounts`, and `LifetimeStats`. + +**SR2. Unlock Evaluation Trigger**: On every `RunEnd` with `bossDefeated = true`, the Node System calls `Progression.RecordRunEnd(runStats)`. Progression evaluates all unlock conditions against the current `ProgressionData`. Unlocked items are added to the appropriate pool immediately and an `UnlockResult[]` is returned. Loss runs record stats only; no unlock evaluation. + +**SR3. Difficulty Unlock Chain**: `Normal` is always unlocked. `Hard` unlocks when player defeats Boss on Normal. `Expert` unlocks when player defeats Boss on Hard. `Nightmare` unlocks when player defeats Boss on Expert. + +**SR4. Theme Unlock**: Themes are parallel — any theme whose unlock condition is met becomes available. Player selects theme at run start from the New Game screen. Unlock condition per theme stored in `DRTheme.UnlockCondition`. + +**SR5. Component Pool Unlock**: Component pools (rarity tiers) unlock based on difficulty and win-count conditions. Pools are additive — when a pool unlocks, its components become available in shop and drop tables. `DRComponentPool.UnlockCondition` defines the gating condition per pool. + +**SR6. Starting Loadout Selection**: At run start, player chooses one loadout from all `UnlockedStartingLoadouts`. Starting bonuses are applied immediately when the run begins: gold added to `PlayerInventoryComponent.Gold`, pre-built towers assembled and placed in roster. + +**SR7. Lifetime Stats**: `LifetimeStats` is updated on every run end (win or loss): `TotalRunsStarted++`, `TotalGoldEarned += gold`, `FurthestNodeReached = max(previous, nodesCompleted)`, etc. Win stats updated only on `bossDefeated = true`. + +**SR8. Unlock Feedback**: On successful unlock evaluation, a `UnlockedEventArgs` is fired. UI listens and shows an animated toast popup listing the newly unlocked item(s). The toast appears on the RunEnd victory screen before returning to menu. + +### States and Transitions + +Progression is **purely passive** — it has no runtime state machine. It exposes interfaces that other systems call. + +| State | Description | +|-------|-------------| +| `Loaded` | Save file loaded into memory. Progression data is current. | +| `Evaluating` | `RecordRunEnd()` is executing; unlock conditions are being checked. | +| `Dirty` | New unlocks found; waiting for save. | +| `Saved` | Dirty state persisted to disk. | + +*No user-facing state machine — UI screens that display Progression data (Profile, New Game) are owned by the UI layer, not by Progression.* + +### Interactions with Other Systems + +| System | Direction | Interface | +|--------|-----------|------------| +| **Node System** | Receives from | `RecordRunEnd(runStats)` — called on every run end (win or loss). `runStats` contains `{goldEarned, nodesCompleted, bossDefeated, coinsEarned, componentsDropped}`. | +| **Shop System** | Reads from | `GetUnlockedComponentPools()` — shop uses this to determine which component rarities appear in `BuildShopGoods()`. | +| **UI / New Game Screen** | Reads from | `GetUnlockedDifficulties()`, `GetUnlockedThemes()`, `GetUnlockedStartingLoadouts()` — populate run setup UI. | +| **UI / Profile Screen** | Reads from | `GetLifetimeStats()` — displays career statistics. | +| **UI / Run End Screen** | Receives from | `OnUnlockedEventArgs` — triggers toast popup for new unlocks. | +| **Event System** | Soft read | Event outcomes do not directly affect Progression. Events may reference `GetLifetimeStats()` for conditional text. | + +## Formulas + +### 1. UnlockEvaluation — Run-End Unlock Check + +The `UnlockEvaluation(runStats)` function is called by `Progression.RecordRunEnd()` on every winning run. It checks all locked unlockables and returns a list of `UnlockResult` objects for newly unlocked items. + +**Function signature:** +``` +UnlockResult[] UnlockEvaluation(RunStats runStats) +``` + +**Variables:** + +| Variable | Symbol | Type | Range | Description | +|----------|--------|------|-------|-------------| +| bossDefeated | b | bool | {true, false} | Whether Boss was defeated this run | +| difficulty | d | DifficultyType | Normal..Nightmare | Difficulty at run start | +| totalWins | w | int | ≥ 0 | Cumulative wins across all runs | +| totalEnemiesDefeated | e | int | ≥ 0 | Cumulative enemies killed (lifetime) | +| nodesCompleted | n | int | 0–10 | Nodes cleared this run | + +**Output Range:** 0 to N newly unlocked items per run. In practice, typically 0–2. + +--- + +**A. Difficulty Unlock (chain)** + +``` +nextDifficulty(d) = { + Normal → Hard, + Hard → Expert, + Expert → Nightmare, + Nightmare → null (no next tier) +} + +IF b == true AND nextDifficulty(d) != null THEN + unlock(nextDifficulty(d)) +``` + +**Example:** Player defeats Boss on Normal → `nextDifficulty(Normal) = Hard` → Hard is unlocked. + +--- + +**B. Theme Unlock (per-theme condition from DRTheme)** + +``` +FOR each locked theme T: + IF T.condition.type == WinOnDifficulty + AND b == true AND d >= T.condition.targetDifficulty + OR T.condition.type == TotalWins + AND w >= T.condition.targetWins + OR T.condition.type == Mixed + AND b == true AND d >= T.condition.minDifficulty + AND w >= T.condition.minWins + THEN unlock(T) +``` + +**Example:** Frost theme has condition `WinOnDifficulty(Normal)`. Player wins on Normal → Frost unlocked. + +--- + +**C. Component Pool Unlock (rarity tiers)** + +``` +poolUnlocked(r, d, w) = ( + (r == White) OR + (r == Green AND d >= Normal) OR + (r == Blue AND d >= Hard AND w >= 2) OR + (r == Purple AND d >= Expert AND w >= 5) OR + (r == Red AND d >= Nightmare AND w >= 10) +) + +FOR each locked component pool P: + IF poolUnlocked(P.rarity, d, w) == true THEN unlock(P) +``` + +**Example:** Player wins on Hard (d=Hard, w now = 1). Blue pool: `d >= Hard (true), w >= 2 (false)` → not yet. After 2 total wins: Blue pool unlocks. + +--- + +**D. Starting Loadout Unlock (milestone conditions)** + +``` +FOR each locked loadout L: + IF L.condition.type == TotalWins AND w >= L.condition.targetWins + OR L.condition.type == EnemiesDefeated AND e >= L.condition.targetEnemies + OR L.condition.type == NodesCompleted AND n >= L.condition.targetNodes + OR L.condition.type == Combined AND w >= L.condition.wins + AND e >= L.condition.enemies + THEN unlock(L) +``` + +**Example:** "Starter Pack" loadout has condition `TotalWins(3)`. After 3rd win → unlocked. + +--- + +### 2. LifetimeStats.Update + +Called on every `RecordRunEnd()` for both win and loss runs. + +``` +LifetimeStatsUpdate(runStats): + // All runs + totalRunsStarted += 1 + totalGoldEarned += runStats.goldEarned + furthestNodeReached = max(furthestNodeReached, runStats.nodesCompleted) + totalNodesCompleted += runStats.nodesCompleted + totalEnemiesDefeated += runStats.enemiesDefeatedThisRun + totalComponentsCollected += runStats.componentsCollectedThisRun + + // Win runs only (bossDefeated == true) + IF runStats.bossDefeated == true: + totalWins += 1 + winsByDifficulty[runStats.difficulty] += 1 + winsByTheme[runStats.theme] += 1 + bossesDefeated += 1 +``` + +**Output:** `LifetimeStats` updated in-place. No return value. + +--- + +### 3. StartingBonusResolve + +Called at run start when player selects a starting loadout. + +``` +StartingBonus StartingBonusResolve(loadoutId): + LOADOUT = DRStartingLoadout[loadoutId] + IF LOADOUT == null: + return StartingBonus { goldAmount=0, prebuiltTower=null } + + goldAmount = LOADOUT.goldBonus + + IF LOADOUT.hasPrebuiltTower == true: + prebuiltTower = AssembleTower(LOADOUT.prebuiltTowerComponents) + ELSE: + prebuiltTower = null + + return StartingBonus { goldAmount, prebuiltTower } +``` + +**Edge case:** If `loadoutId` not found → return `{goldAmount=0, prebuiltTower=null}`. If tower components unavailable → return `{goldAmount=LOADOUT.goldBonus, prebuiltTower=null}`. + +--- + +### 4. Starting Gold Cap Check + +When `StartingBonusResolve` returns a gold amount, it is added to the run's starting gold, which is then subject to `MaxPlayerGold` (9999) per the Shop GDD. + +``` +effectiveStartingGold = Min(defaultStartGold + goldAmount, MaxPlayerGold) +``` + +`defaultStartGold` comes from `DRRunConfig.StartGold`. + +## Edge Cases + +- **If player loses any run**: No unlock evaluation. LifetimeStats still updated (run count, gold, furthest node). Losses contribute to stats but not unlocks. +- **If all unlock conditions already satisfied**: `UnlockEvaluation` returns empty array. No-op. Player keeps their unlocks. +- **If save file is corrupted or missing on load**: Initialize fresh `ProgressionData` with defaults (Normal difficulty only, Plain theme only, no bonus loadouts). Log error. +- **If `RecordRunEnd()` is called twice for the same run**: Deduplicated by run ID. Only first call processes. +- **If `totalEnemiesDefeated` or `totalComponentsCollected` would overflow**: Use `long` (Int64) for these cumulative fields. `int` for other counters. +- **If `runStats.goldEarned < 0`**: Treat as 0, log error. Gold should never be negative. +- **If pre-built tower components are not yet unlocked**: Return `{goldAmount=LOADOUT.goldBonus, prebuiltTower=null}`. Log warning. Player can still start run. +- **If `defaultStartGold + goldBonus > MaxPlayerGold` (9999)**: Cap at 9999. Excess discarded. +- **If Alt+F4 mid-run (no RunEnd dispatched)**: No stats recorded for that partial run. Next run starts clean. +- **If two unlocks trigger in same run**: Both appear in the `UnlockResult[]`. `UnlockedEventArgs` fired once with all new unlocks. Player sees both in toast. +- **If difficulty enum is invalid in runStats**: Skip difficulty unlock evaluation. Log error. Other unlocks (themes, pools) still processed. +- **If player wins on Hard but has not unlocked Hard (impossible by SR3)**: `UnlockEvaluation` still processes — the difficulty field in runStats reflects what was played. Player having unlocked Hard is pre-checked at run start, not re-checked at evaluation. +- **If pre-built tower loadout selected but player has no inventory space**: Pre-built tower is placed directly into combat roster (slot 1), not inventory. No inventory space required. + +## Dependencies + +### Upstream Dependencies (what Progression depends on) + +| System | Type | Interface | Status | +|--------|------|-----------|--------| +| **Node System** | Hard | `RecordRunEnd(runStats)` is called by the Procedure layer after every run end. `runStats` contains `{difficulty, theme, goldEarned, nodesCompleted, bossDefeated, coinsEarned, componentsDropped}`. No run-level state is stored in Progression between calls. | GDD exists (`design/gdd/node-system.md`) — In Review | +| **Shop System** | Soft | Progression reads `GetUnlockedComponentPools()` to determine which component rarities appear in shop. If shop needs to filter by rarity, it calls this method. | GDD exists (`design/gdd/shop.md`) — In Design | + +### Downstream Dependents (what depends on Progression) + +| System | Type | Interface | Status | +|--------|------|-----------|--------| +| **Node System** | Hard | Node System's `RunEnd` state cannot persist without Progression. All acceptance criteria involving `Progression.RecordRunEnd()` and `Progression.GetLifetimeStats()` are blocked until this GDD is completed. | GDD exists — In Review; blocked by this GDD | +| **Shop System** | Soft | Shop may read `GetLifetimeStats()` for conditional text or future features (e.g., "you've spent X gold across all runs"). Not currently required. | GDD exists | +| **UI / New Game Screen** | Hard | Populates difficulty, theme, and starting loadout selection from `GetUnlockedDifficulties()`, `GetUnlockedThemes()`, `GetUnlockedStartingLoadouts()`. | Pending implementation | +| **UI / Profile Screen** | Hard | Displays lifetime statistics from `GetLifetimeStats()`. | Pending implementation | +| **UI / Run End Screen** | Hard | Listens for `OnUnlockedEventArgs`. Shows toast popup listing new unlocks on victory. | Pending implementation | +| **Event System** | Soft | Events may read `GetLifetimeStats()` for conditional flavor text (e.g., "You've won X times"). Not required for MVP. | GDD exists | + +### Bidirectional Consistency Check + +- [x] Node System → upstream (writes to Progression via `RecordRunEnd`) ✅ +- [x] Progression → downstream to Node System (Node System blocked until Progression exists) ✅ +- [x] Shop System → reads component pool unlock state ✅ +- [ ] Event System → soft dependency, no hard coupling ✅ + +## Tuning Knobs + +All unlock conditions are data-table-driven. No code changes are required to add or modify unlock conditions. + +### Data-Driven Tuning Tables + +| Table | Controls | Designer Knobs | +|-------|----------|---------------| +| `DRDifficultyTier` | Difficulty unlock chain | `nextTierId`, `unlockConditionType`, `unlockThreshold` | +| `DRTheme` | Theme unlock conditions | `unlockConditionType`, `targetDifficulty`, `targetWins`, `minWins`, `minDifficulty` | +| `DRComponentPool` | Component pool rarity gates | `rarity`, `minDifficulty`, `minWins` | +| `DRStartingLoadout` | Starting loadout conditions and bonuses | `conditionType`, `targetWins`, `targetEnemies`, `targetNodes`, `goldBonus`, `hasPrebuiltTower`, `prebuiltTowerComponents` | + +### Runtime Tuning Knobs + +| Knob | Default | Safe Range | Extreme: Too Low | Extreme: Too High | +|------|---------|-----------|-----------------|------------------| +| `MaxPlayerGold` | 9999 | 5000–99999 | Starting bonuses feel large; shop loses tension | Gold feels pointless; player never feels rich | +| `DefaultStartGold` (`DRRunConfig.StartGold`) | varies | 0–5000 | Player starts too poor; first shop feels bad | Player starts too rich; first shop trivial | +| `DRStartingLoadout.goldBonus` | varies | 0–5000 | Bonus too low; no meaningful start change | Bonus too high; shop becomes irrelevant | +| `DRComponentPool.minWins` | varies | 0–50 | Higher pools accessible too early; power spike | Higher pools locked too long; mid-game feels boring | +| `DRTheme UnlockDifficulty` | varies | varies | Easier themes → faster content exhaustion | Harder themes → grindy; "one more run" becomes frustrating | + +### Knob Interactions + +- **Starting gold bonus + MaxPlayerGold**: If `defaultStartGold + goldBonus > MaxPlayerGold`, excess is silently discarded. Ensure bonuses respect the cap. +- **Difficulty unlock + Component pool**: When a new difficulty tier unlocks, the component pool for that tier becomes available. Ensure pool content (components) exists before the difficulty is unlockable. +- **Theme unlock conditions**: Some themes reference `minWins`. Ensure the intended playtime before unlocking matches the expected pacing curve. + +## Visual/Audio Requirements + +Progression is a data-layer system with no inherent visual or audio identity. The player's interaction with Progression is mediated entirely through UI screens (New Game, Profile, Toast). No dedicated VFX or audio events are owned by the Progression system itself. + +However, the **Toast Popup** (Section UI Requirements) does have visual requirements: the unlock notification should use the game's geometric shape vocabulary (diamonds, triangles, hexagons) consistent with the shop rarity shimmer and node completion VFX. + +**Visual Style for Toast Popup:** +- Shape: diamond outline frame around unlock icon +- Rarity color coding: theme unlocks use theme's accent color; difficulty unlocks use difficulty's color; component pool unlocks use the newly available rarity's color +- Entry animation: scale 0.8x → 1.0x, 250ms ease-out +- Particle burst on unlock: geometric particles matching the unlock category + +**Audio for Toast Popup:** +- Ascending 3-note arpeggio (C4-E4-G4) at volume 0.5, 200ms +- Distinct from shop purchase arpeggio (which is rarity-keyed and longer) + +## UI Requirements + +### New Game Screen (Run Setup) + +**Trigger**: Player selects "New Run" from main menu. + +**Layout**: +- Title: "New Run" +- Difficulty row: horizontal list of unlocked difficulties; locked ones shown as silhouettes with lock icon and tooltip showing unlock condition +- Theme row: horizontal grid of theme cards; locked themes hidden or shown as silhouettes +- Starting Loadout row: horizontal list of owned loadouts; radio-button selection (one active at a time) +- Start Run button: bottom-center, large, disabled until at least one valid combination is selected + +**Unlock Feedback on Screen:** +- When a new unlock becomes available between when the player last viewed New Game and pressed Start, the newly available item pulses briefly (once) to draw attention + +**Constraint**: Player cannot select a locked difficulty or theme. UI enforces this; no runtime validation needed. + +--- + +### Profile Screen (Lifetime Statistics) + +**Trigger**: Player selects "Profile" or "Stats" from main menu. + +**Layout**: +- Header: total runs, total wins, win rate (percentage) +- Primary stats grid: Total Gold Earned, Furthest Node Reached (1–10), Bosses Defeated +- Secondary stats (tabbed or collapsible): Components Collected, Wins by Difficulty (table), Wins by Theme (table) +- No editing — display only + +**Constraint**: All stats are read from `GetLifetimeStats()`. No modification is possible from this screen. + +--- + +### Toast Popup (Unlock Notification) + +**Trigger**: `OnUnlockedEventArgs` fires on `RecordRunEnd` with non-empty `UnlockResult[]`. + +**Layout**: +- Position: bottom-center of screen, above Run End screen content +- Content: unlock category icon (geometric shape), unlocked item name, "UNLOCKED" label +- Shape: diamond outline frame +- Entry: scale 0.8x → 1.0x, 250ms ease-out +- Exit: fade out over 200ms on click or after 5 seconds + +**Constraints**: +- Multiple unlocks in same run: show one toast per item, staggered 300ms apart +- If player clicks away immediately, toast dismisses immediately — do not block + +--- + +### Accessibility + +- All unlock conditions shown as text in tooltip (no icon-only indicators) +- Toast is keyboard-accessible (Enter/Space to dismiss) +- Profile screen stats are screen-reader friendly with labeled values + +## Acceptance Criteria + +### Data Persistence +- **GIVEN** a new player starts the game for the first time, **WHEN** they begin a run, **THEN** only Normal difficulty, Plain theme, and the default starting loadout are available. +- **GIVEN** the player's save file is corrupted or missing, **WHEN** the game loads, **THEN** a fresh `ProgressionData` is initialized with defaults (Normal only, Plain only, no bonus loadouts) and no crash occurs. + +### Unlock Evaluation +- **GIVEN** a player completes a run with `bossDefeated=false` (loss), **WHEN** `RecordRunEnd` is called, **THEN** `totalRunsStarted` increments, `totalGoldEarned` increases by `goldEarned`, `furthestNodeReached` and `totalNodesCompleted` update correctly, but no unlock evaluation occurs. +- **GIVEN** a player defeats the Boss on Normal difficulty for the first time, **WHEN** `RecordRunEnd` is called with `bossDefeated=true` and `difficulty=Normal`, **THEN** Hard difficulty is unlocked and appears in the New Game screen. +- **GIVEN** a player defeats the Boss on Hard difficulty, **WHEN** `RecordRunEnd` is called, **THEN** Expert difficulty is unlocked. +- **GIVEN** a player defeats the Boss on Expert difficulty, **WHEN** `RecordRunEnd` is called, **THEN** Nightmare difficulty is unlocked. + +### Theme Unlock +- **GIVEN** a theme has `UnlockCondition = WinOnDifficulty(Normal)` and the player defeats the Boss on Normal, **WHEN** `RecordRunEnd` completes, **THEN** that theme appears in the theme selection on the New Game screen. +- **GIVEN** a theme has `UnlockCondition = TotalWins(5)` and the player earns their 5th total win, **WHEN** `RecordRunEnd` completes, **THEN** that theme becomes available in the New Game screen. + +### Component Pool Unlock +- **GIVEN** a player wins on Normal difficulty for the first time, **WHEN** `RecordRunEnd` completes, **THEN** the Green component pool is unlocked and Green rarity components appear in the shop's offered goods. +- **GIVEN** a player has 2+ wins and wins on Hard difficulty, **WHEN** `RecordRunEnd` completes, **THEN** the Blue component pool is unlocked. + +### Starting Loadout +- **GIVEN** a player has 3 total wins and has unlocked Hard, **WHEN** they complete a run on Hard difficulty, **THEN** any Starting Loadout with condition `TotalWins(3)` becomes unlocked and visible in the loadout selection. +- **GIVEN** a player selects a Starting Loadout with `goldBonus=500`, **WHEN** the run begins, **THEN** `PlayerInventoryComponent.Gold` equals `defaultStartGold + 500`, capped at `MaxPlayerGold` (9999). +- **GIVEN** a player selects a Starting Loadout with a pre-built tower, **WHEN** the run begins, **THEN** slot 1 of the combat roster contains the pre-built tower assembled from the loadout's components. +- **GIVEN** a player's pre-built tower loadout references components not yet unlocked, **WHEN** `StartingBonusResolve` is called, **THEN** the gold bonus is applied, `prebuiltTower` is `null`, and a warning is logged. + +### Lifetime Stats (All Runs) +- **GIVEN** a player completes 3 runs with `nodesCompleted` of 4, 7, and 5 respectively, **WHEN** `LifetimeStats` is queried, **THEN** `totalNodesCompleted` equals 16. +- **GIVEN** a player Alt+F4s mid-run without triggering `RunEnd`, **WHEN** the game restarts, **THEN** no stats from that partial run appear in `LifetimeStats`. + +### Lifetime Stats (Win Runs Only) +- **GIVEN** a player wins a run, **WHEN** `RecordRunEnd` is called with `bossDefeated=true`, **THEN** `totalWins` increments, `winsByDifficulty[difficulty]` increments, `winsByTheme[theme]` increments, and `bossesDefeated` increments. +- **GIVEN** `totalEnemiesDefeated` would exceed `int.MaxValue` with a large cumulative value, **WHEN** `LifetimeStats.Update` is called, **THEN** the field uses `long` (Int64) and does not overflow. + +### Unlock Feedback +- **GIVEN** a player wins a run that triggers two new unlocks simultaneously, **WHEN** `RecordRunEnd` completes, **THEN** the Run End screen shows a toast popup listing all newly unlocked items. + +### Edge Cases +- **GIVEN** `RecordRunEnd` has already been called for run ID "abc123", **WHEN** it is called again with the same run ID, **THEN** `LifetimeStats` only increments once and only one unlock evaluation occurs. +- **GIVEN** `runStats.goldEarned` is negative (e.g., −100) due to an upstream bug, **WHEN** `LifetimeStats.Update` is called, **THEN** `totalGoldEarned` increases by 0, not −100, and the error is logged. + +## Open Questions + +### 1. Standalone Binary Achievements +**Status**: OPEN — Should there be standalone binary achievements separate from the 4 unlock categories (e.g., "Defeat 100 Bosses", "Collect 1000 Components", "Win on all difficulties")? These would be display-only badges with no gameplay effect. Currently all unlocks are gated by the 4 categories. Adding achievements as a 5th category would increase content without adding mechanical variety. + +### 2. Cloud Save Sync +**Status**: OPEN — Any cloud sync considerations for `ProgressionData` across devices? Single-player desktop game — likely local-only. If multiplayer or cross-device play is ever added, ProgressionData would need serialization parity and conflict resolution. + +### 3. Starting Loadout Components Catalog +**Status**: OPEN — When a pre-built tower loadout references `prebuiltTowerComponents`, which component pool do those components draw from? If the referenced components are from a pool the player hasn't unlocked yet, `StartingBonusResolve` returns null tower (per edge case). Should pre-built loadouts source from a special "Starter" component pool that's always available, rather than the normal pool? + +### 4. "First Time" Callback to Audio/VFX +**Status**: OPEN — The GDD specifies a Toast popup for unlock feedback. Should there also be a dedicated "first time ever" callback signal that the audio system can hook for a distinct sound on the very first unlock of any category? Or is the Toast audio sufficient? diff --git a/design/gdd/reviews/node-system-review-log.md b/design/gdd/reviews/node-system-review-log.md new file mode 100644 index 0000000..6fe223a --- /dev/null +++ b/design/gdd/reviews/node-system-review-log.md @@ -0,0 +1,47 @@ +# node-system.md — Review Log + +## Review — 2026-04-29 — Verdict: MAJOR REVISION NEEDED (first pass) +**Scope signal**: XL +**Specialists**: game-designer, systems-designer, qa-lead, ux-designer, creative-director +**Blocking items**: 8 | **Recommended**: 9 +**Summary**: First review found critical spec/implementation contradiction (node types are fixed in code, not randomized as spec stated), arithmetic impossibility in TotalGold tables (3 inconsistent values: 830/750/670), missing Shop/Event system definitions, and LossPenalty system completely unimplemented. Creative director synthesis concluded this constitutes "fundamental design integrity failure" requiring major revision. + +**Prior verdict resolved**: N/A (first review) + +--- + +## Review — 2026-04-29 — Verdict: NEEDS REVISION (post-revision) +**Scope signal**: XL +**Specialists**: game-designer, systems-designer, qa-lead, ux-designer, creative-director +**Blocking items**: 0 (resolved) | **Recommended**: 9 (partially addressed) +**Summary**: All 8 blocking items resolved in-session. Key changes: spec updated to fixed node sequence (matching implementation), TotalGold rebuilt using DRLevel.RewardGold, Position 9 probability gap fixed, LossPenalty marked [NOT YET IMPLEMENTED], all 7 broken ACs corrected, BaseHp=0 design resolved (any loss = run end), architecture references fixed. Remaining recommended items: Shop/Event systems need separate GDDs, accessibility improvements pending. + +**Prior verdict resolved**: Yes — original MAJOR REVISION NEEDED addressed; design now internally consistent and implementable pending Shop/Event GDDs. + +--- + +## Review — 2026-04-29 — Verdict: MAJOR REVISION NEEDED (second review — pre-revision) +**Scope signal**: L +**Specialists**: game-designer, systems-designer, qa-lead, creative-director +**Blocking items**: 6 | **Recommended**: 8 +**Summary**: Second review found economy mathematically broken (Boss=70% total gold, shop decorative), fantasy contradiction (enemy composition never revealed to player), BaseHp structurally meaningless (any loss=instant run end), AC5/7/9/10/13 not independently testable, Shop/Event code exists but no GDD, stale NodeComponent reference in diagram. All 6 blocking items resolved in-session. +**Prior verdict resolved**: Yes — first NEEDS REVISION addressed; new issues were economy balance, fantasy consistency, and testability. + +--- + +## Review — 2026-04-29 — Verdict: NEEDS REVISION (third review — post revision) +**Scope signal**: L +**Specialists**: game-designer, systems-designer, qa-lead, ux-designer, creative-director +**Blocking items**: 3 | **Recommended**: 6 +**Summary**: Third review found 3 blocking issues: (1) TotalGold range stated as 780 but table sums to 1100 — fixed to 1100; (2) BossEffectiveHp GDD formula (linear × LoopScaling, 5× cap) didn't match code (exponential × 2^n, no cap) — GDD reconciled to match code; (3) Assembly Phase forced blind commitment before seeing next node types, contradicting stated Player Fantasy — fixed by showing Next Node Preview on Assembly screen before Ready. All 3 blocking items resolved in-session. Remaining recommended items: 2-choice differentiation unspecified, Boss scaling disconnected from run performance, Event node design unspecified, accessibility gaps, "within 2 seconds" unenforceable, single-loss zero-partial-rewards design. +**Prior verdict resolved**: Yes — second MAJOR REVISION NEEDED addressed; new issues were economy arithmetic, spec/code mismatch, and Assembly Phase UX flow. + +--- + +## Review — 2026-04-30 — Verdict: MAJOR REVISION NEEDED (fourth review) +**Scope signal**: XL +**Specialists**: game-designer, systems-designer, economy-designer, qa-lead, ux-designer, creative-director +**Blocking items**: 8 | **Recommended**: 9 +**Summary**: Fourth review found 8 blocking issues: (1) Both edges lead to identical node types — cosmetic choice, not tactical (all 5 specialists converged); (2) Early economy starvation — 300g first shop arrival, Red costs 200-220g, shop non-functional; (3) Boss difficulty completely uncorrelated with run performance (nodesCompleted has zero effect); (4) Core Rules vs UI Requirements contradiction on Assembly Phase entry; (5) TotalGold n=1-9 ambiguous (count vs indices), Boss loop count domain 0-∞ but clamped; (6) Coin currency has no documented sink; (7) AC coverage gaps for Core Rules 3, 5, 9; (8) Boss VFX color crimson (Combat color) contradicts Color Palette gold/amber. All 8 resolved in-session. Key changes: edge divergence clarified as level-variant model; first shop tiered to White/Green only; Boss formula extended with (1 + 0.1 × nodesCompleted) multiplier; Assembly Phase set to auto-enter; Boss VFX color reconciled to amber/gold; Coin sink clarified (CombatNode intra-combat tower building); 5 new ACs added. Re-review in fresh session recommended. +**Prior verdict resolved**: Yes — third NEEDS REVISION addressed; new critical issues were false-choice architecture, economy starvation, Boss uncorrelation, and spec contradictions. + diff --git a/design/gdd/shop.md b/design/gdd/shop.md new file mode 100644 index 0000000..f888630 --- /dev/null +++ b/design/gdd/shop.md @@ -0,0 +1,509 @@ +# Shop System + +> **Status**: In Design +> **Author**: SepComet +> **Last Updated**: 2026-04-29 +> **Implements Pillar**: [To be designed] + +## Overview + +The Shop system is a **run-time economy service** that surfaces component goods to the player at designated Shop nodes during a run. It reads available component templates from `InventoryGenerationComponent.BuildShopGoods()`, resolves per-item pricing from `DRShopPrice`, and processes purchase transactions against the player's current gold via `PlayerInventoryComponent`. Purchased components are instantiated with stable `InstanceId`s, applied Tags, and Endurance, then added to the player's inventory. The Shop is not a passive display — it is a **tactical decision point** where the player evaluates their gold reserves and build gaps against the current component offering, deciding whether to invest or conserve for future nodes. Shop nodes appear in the node graph as a distinct node type; their placement frequency and pricing are the primary levers for run-level economy balance. + +## Player Fantasy + +**"Your gold is your ammunition. Spend it like you mean it."** + +The Shop fantasy is the feeling of **tactical urgency and deliberate investment**. The player has earned gold from the last combat — the question isn't "can I afford it?" but "do I need it right now, or will I need it more later?" Every component purchase is a bet on the future: this Muzzle closes a gap in my build, this Bearing makes my existing towers better, this Base hedges against an unknown threat two nodes from now. The shop should feel like a **weaponised pause** — a moment of calm strategy between fights, where the stakes are real because gold is finite and so are the shop's offerings. + +The player should feel: +- **Assessing scarcity** — the shop doesn't have everything; what it has is what you get this run +- **Valuing anticipation** — saving gold for a future shop node, or spending it now on a component that "completes" a tower, both feel like valid strategies +- **Experiencing consequence** — a spent gold coin is gone; the tower it enabled (or didn't) is the consequence + +## Detailed Design + +### Core Rules + +**SR1. Shop Node Entry**: When the player navigates to a Shop node in the node graph, `InventoryGenerationComponent.BuildShopGoods(goodsCount=6, runSeed, sequenceIndex)` generates a fixed pool of offered goods. The pool is deterministic for a given `runSeed + sequenceIndex` — revisiting the same shop node with the same seed produces the same goods. The pool is fixed for this visit once generated. + +**SR2. Shop Offerings Display**: Each `GoodsItemRawData` is displayed as a component card showing: type (Muzzle/Bearing/Base), name, rarity, Tags, description, and buy price. `IsPurchased = true` items are removed from display for this visit. + +**SR3. Purchase Transaction** (`PlayerInventoryTradeService.TryPurchaseComponent`): Player selects a component and pays the pre-rolled `GoodsItemRawData.Price`. `TryConsumeGold(price)` deducts gold; on success, `InventoryCloneUtility.CloneXxxComp()` clones the component with a new stable `InstanceId` and adds it to inventory. `IsPurchased` is set to `true`. + +**SR4. Gold Cap**: Player gold is capped at `MaxPlayerGold` (hard cap). `TryConsumeGold` fails if insufficient; `AddGold` caps at `MaxPlayerGold`. Excess earned gold is lost. + +**SR5. Shop Visit Exit**: Player may exit at any time via "Leave". No purchase is mandatory. + +**SR6. Selling (via RepoForm)**: `PlayerInventoryTradeService.TrySellItems(itemIds)` processes sales. Sale price = midpoint of `MinPrice..MaxPrice` for the component's rarity. Assembled components must be disassembled first. Towers in the combat roster may not be sold. + +**SR7. Determinism**: Shop goods are generated via `InventoryGenerationRandomContext(runSeed, sequenceIndex, Shop, goodsIndex)`. Same inputs always produce the same goods and prices — run reproducibility is guaranteed. + +### States and Transitions + +Shop has no persistent state machine of its own — it is entered and exited via the Node System. State is held in the `GoodsItemRawData.IsPurchased` flags and `PlayerInventoryComponent.Gold`. + +**Shop Visit States**: + +| State | Description | Exits | +|-------|-------------|-------| +| `Browsing` | Player is viewing the shop; no purchase attempted yet | → `PurchaseConfirmed` on successful buy; → `Left` on "Leave" | +| `PurchaseConfirmed` | A purchase just succeeded; shop display updates (item marked purchased) | → `Browsing` — player can continue shopping | +| `Left` | Player exited via "Leave" or all 6 items purchased | → (shop phase ends; Node System advances) | + +**Player Gold States**: + +| State | Condition | Display | +|-------|-----------|---------| +| `CanAfford` | `Gold >= min(shopPrices)` | Normal display | +| `CannotAffordAny` | `Gold < min(shopPrices)` | Greyed-out buy buttons | +| `AtCap` | `Gold == MaxPlayerGold` | Gold display shows cap icon; "Earned gold capped" note | + +### Interactions with Other Systems + +| System | Direction | Interface | +|--------|-----------|------------| +| **Node System** | Driven by | Shop node type triggers the Shop phase. "Leave" exits → Node System advances to next node choice. | +| **InventoryGenerationComponent** | Reads | `BuildShopGoods(goodsCount=6, runSeed, sequenceIndex)` generates the deterministic goods pool. | +| **PlayerInventoryComponent** | Reads/writes | Gold balance read for affordability; purchase deducts gold; sold items added via `MergeInventory()`. | +| **PlayerInventoryTradeService** | Reads | `TryPurchaseComponent(item, price)` executes purchase. `TrySellItems(itemIds)` executes sales. | +| **Tower Assembly** | Supplies goods to | Purchased components are available for assembly in the Assembly Phase. | +| **RepoForm** | Owns sell UI | RepoForm (not ShopNode) owns the selling interaction. `TrySellItems` is called by RepoForm's UseCase. | + +## Formulas + +### 1. Buy Price (Component) + +`buyPrice = Random.Range(minPrice, maxPrice + 1)` — uniform random integer in `[MinPrice, MaxPrice]` inclusive, rolled at shop generation time and stored in `GoodsItemRawData.Price`. + +**Variables:** + +| Variable | Type | Range | Description | +|----------|------|-------|-------------| +| MinPrice | int | ≥ 0 | Per-rarity floor from `DRShopPrice[row].MinPrice` | +| MaxPrice | int | ≥ MinPrice | Per-rarity ceiling from `DRShopPrice[row].MaxPrice` | + +**Output Range:** `[MinPrice, MaxPrice]` per purchase. + +**Example** — White rarity, `MinPrice=50`, `MaxPrice=150`: `buyPrice ∈ [50, 150]`, uniformly random. + +### 2. Sell Price (Component) + +`sellPrice = Round((minPrice + maxPrice) / 2.0f)` — midpoint, rounded. Called by `ShopPriceRuleService.ResolveComponentSalePrice`. + +**Example** — Blue rarity, `MinPrice=300`, `MaxPrice=600`: `sellPrice = 450`. + +### 3. Sell Price (Tower) + +`towerSellPrice = ResolveComponentSalePrice(muzzleComp) + ResolveComponentSalePrice(bearingComp) + ResolveComponentSalePrice(baseComp)` — sum of all three component sell prices via `ShopPriceRuleService.TryResolveTowerSalePrice`. Tower must be fully assembled and not in the combat roster. + +### 4. Gold Cap + +`effectiveGold = Min(actualGold, MaxPlayerGold)` where `MaxPlayerGold = 9999`. Hard cap applied on every `AddGold` call. Excess gold is discarded. + +## Edge Cases + +- **If `playerGold < itemPrice`**: Buy button is disabled. `TryConsumeGold` returns `false`. No change to gold or inventory. + +- **If player has 0 gold at shop entry**: All buy buttons are disabled. Player may browse and skip. + +- **If all 6 goods are purchased**: Shop shows empty state "All items sold". Player may exit. + +- **If a purchased component is disassembled after purchase**: Component returns to inventory. Shop does not re-offer it — `IsPurchased` is visit-scoped. Disassembled components can be sold via RepoForm. + +- **If `runSeed` or `sequenceIndex` changes between visits**: `BuildShopGoods` produces a different deterministic pool. Revisiting the same node with a different seed generates different goods. + +- **If the same component config appears twice in one run**: Each `GoodsItemRawData` has its own `InstanceId`. Buying twice produces two separate component instances. No deduplication. + +- **If player sells a component shown in the current shop visit**: Shop display is unaffected. `IsPurchased` is visit-scoped. + +- **If a tower being sold has no sellable components**: `TryResolveTowerSalePrice` returns `false`. `FailureReason = MissingTowerComponent`. Tower cannot be sold. + +- **If `MaxPlayerGold` is reached during a reward payout**: `AddGold` silently caps at 9999. Excess gold is discarded. + +- **If player attempts to sell an assembled component via RepoForm**: `FailureReason = AssembledComponent`. Player must disassemble first. + +- **If player attempts to sell a tower in the combat roster via RepoForm**: `FailureReason = ParticipantTower`. Tower must be removed from roster first. + +## Dependencies + +### Upstream Dependencies (what Shop depends on) + +| System | Type | Interface | Status | +|--------|------|-----------|--------| +| **Node System** | Hard | Shop node type triggers Shop phase. `runSeed` and `sequenceIndex` passed to `BuildShopGoods`. | GDD exists (`design/gdd/node-system.md`) | +| **InventoryGenerationComponent** | Hard | `BuildShopGoods(goodsCount, runSeed, sequenceIndex)` generates the goods pool. `BuildRandomComponentItem` picks slot type, config, and applies tags. | Implemented | +| **DRShopPrice** | Hard | Price ranges per rarity tier (`MinPrice`, `MaxPrice`). Missing rows cause 0-price fallback. | Implemented | +| **DRMuzzleComp / DRBearingComp / DRBaseComp** | Hard | Component config lookup for shop-offered items. Missing rows cause null returns. | Implemented | + +### Downstream Dependents (what depends on Shop) + +| System | Type | Interface | Status | +|--------|------|-----------|--------| +| **Tower Assembly** | Soft | Purchased components become available for assembly. No direct coupling — Tower Assembly reads from inventory. | GDD exists | +| **Combat System** | Soft | Gold earned from combat is spent at Shop. Indirect — no direct coupling. | GDD exists | +| **Progression** | Soft | May read total gold spent at shop across runs. Pending Progression GDD. | Not yet designed | +| **RepoForm** | Soft | RepoForm reads `TrySellItems` to process sales. Sell prices derived from `ShopPriceRuleService`. | Implemented | + +### Provisional Assumptions +- `MaxPlayerGold = 9999` is a design constant — pending confirmation from balance tuning. +- Shop node count per run is governed by Node System GDD — Shop GDD assumes at least one shop node appears per run. + +## Tuning Knobs + +| Knob | Default | Safe Range | Extreme: Too Low | Extreme: Too High | +|------|---------|-----------|-----------------|------------------| +| `MaxPlayerGold` | 9999 | 5000–99999 | Gold feels pointless — player never feels rich | Gold never feels scarce — no tension | +| `GoodsCountPerShop` | 6 | 3–12 | Fewer choices — shop feels unrewarding | More choices — decision paralysis | +| `DRShopPrice[rarity].MinPrice` | varies | varies | Cheap high-rarity items — gold becomes abundant | Expensive high-rarity — shop feels futile | +| `DRShopPrice[rarity].MaxPrice` | varies | ≥ MinPrice | High price variance — luck dominates | Low variance — price is predictable but boring | +| Sell price multiplier | 0.5 | 0.3–0.7 | Selling too rewarding — players flip components freely | Selling too punishing — players hoard everything | + +**Data-table-driven knobs**: +- `DRShopPrice.MinPrice / MaxPrice` — per rarity tier, controls both buy and sell prices +- `DRShopPrice.Rarity` — which rarity tiers are available in the shop + +## Visual/Audio Requirements + +### VFX Event Specifications + +All shop VFX is **localized to relevant cards and the gold display** — no full-screen flashes or global pulses. The shop is a tactical pause, not a cinematic. Every effect should reinforce the geometric/mathematical aesthetic and rarity hierarchy established in the Art Bible. + +#### Rarity Shimmer Specification (Purple / Red) + +Purple and Red rarity cards carry a persistent shimmer during the entire shop visit: + +| Rarity | Shimmer Behavior | Shape | +|--------|----------------|-------| +| Purple `#C084FC` | Continuous horizontal sweep, 60% opacity, 2-second cycle, ease-in-out | Thin diamond outline traveling across card border | +| Red `#F87171` | Continuous horizontal sweep, 70% opacity, 1.5-second cycle, ease-in-out | Thin diamond outline + 2 small triangle particles orbiting card corners | + +White through Blue cards have no shimmer — their rarity is communicated through border color and particle density on purchase only. + +--- + +### Event: Shop Open / Card Cascade + +**Trigger**: Player enters a Shop node; shop form appears. + +**Sequence**: +1. **Background pulse** (t=0): A single hexagonal ring expands from screen center to full shop panel bounds, opacity 20%, rarity-neutral white `#E8E8E8`, 250ms ease-out. Signals "shop materialized." +2. **Gold display appears** (t=50ms): Gold counter scales in from 0.8x to 1.0x, 200ms ease-out. +3. **Card cascade** (t=100ms–700ms): 6 component cards enter sequentially, 100ms apart. Each card: scale 0.7x → 1.0x, opacity 0 → 1, 250ms ease-out per card. Stagger: card 1 at t=100ms, card 6 at t=600ms. +4. **Rarity shimmer starts** (t=700ms): Purple/Red cards begin shimmer loop immediately upon entering. No shimmer during card-in animation. + +**Particle spec for card cascade**: On each card's entry, 3 small triangles burst from the card's bottom edge, rarity-colored, 200ms lifetime, fade out. Originates from card center-bottom. This reinforces the geometric theme without being distracting. + +**Audio**: Soft two-tone chord (C3-G3, 100ms each). No melody — the shop should feel like a calm briefing, not a reward screen. + +**Duration**: ~800ms total. + +--- + +### Event: Card Hover — Affordable + +**Trigger**: Player cursor enters an affordable component card. + +**Sequence**: +1. **Card lift** (t=0): Card translates Y+8px, 150ms ease-out. Shadow deepens (Y offset increases, blur expands, opacity 0.15 → 0.25). +2. **Border glow** (t=0): Card border brightens to full rarity color at 90% opacity, 150ms. +3. **Type icon pulse** (t=0): Component type icon (Muzzle/Bearing/Base geometric symbol) scales 1.0x → 1.15x → 1.0x, 300ms ease-out. +4. **Description reveal** (t=100ms): Description text fades in if previously truncated, 200ms ease-out. Name and tags remain visible always. + +**Particle spec**: 4 tiny triangles emit from card corners (one per corner), rarity-colored, 150ms lifetime, outward drift 20px, fade out. Static emit — no continuous particle stream. + +**Audio**: Subtle tick/chirp (C5, 40ms, volume 0.3). Very quiet — the player should barely notice it consciously. + +**Duration**: Hover effects play while cursor is over card; reverse animation on cursor exit (150ms ease-out, no particle burst on exit). + +--- + +### Event: Card Hover — Cannot Afford + +**Trigger**: Player cursor enters a component card whose price exceeds current gold. + +**Sequence**: +1. **Card tint** (t=0): Card background dims to 60% opacity, 150ms. Full-color border remains visible so rarity is still readable. +2. **Price badge shake** (t=0): Buy-price badge shakes horizontally — triangle waveform: `+3px → -3px → +3px → 0`, 300ms. Signals "I see the price but I can't." +3. **Gold display flash** (t=100ms): Gold display border flashes red `#F87171` at 40% opacity, 200ms, then returns to normal. +4. **No lift**: Card does NOT translate up. It stays in place, visually communicating "not interactable." +5. **Rarity shimmer continues**: Purple/Red shimmer plays normally — affordability state does not suppress shimmer. + +**Particle spec**: None on hover (cannot afford = absence, not presence). + +**Audio**: Low dissonant thud (C2, 60ms, volume 0.4). Subconsciously communicates rejection without being alarming. + +**Duration**: Persists while cursor is over card; reverses on exit (150ms ease-out). + +--- + +### Event: Purchase — Click and Confirm + +**Trigger**: Player clicks an affordable component card's Buy button. + +**Sequence**: +1. **Click confirmation** (t=0): Buy button scales 1.0x → 0.92x → 1.0x, 100ms (press feel). +2. **Gold deduction** (t=80ms): Gold counter does a rapid countdown animation — numbers tick down rapidly (50ms per digit-change), final value reached at t=200ms. No bounce or overshoot. +3. **Particle burst** (t=80ms): 8–12 geometric particles (triangles/diamonds mixed) burst from the card's center, rarity-colored. Particles: random velocity 80–200px/s, 400ms lifetime, fade out over last 150ms. Burst is roughly circular but shaped by triangle/diamond outlines at edges. +4. **Rarity shimmer stop** (t=80ms): Purple/Red shimmer on this card ceases immediately — the card is now "spoken for." +5. **Card fade-out** (t=300ms): The purchased card fades to 0% opacity and scales to 0.85x over 250ms ease-out. Remaining cards do NOT shift to fill the gap — the empty slot remains as a visual record of purchase. +6. **Inventory indicator** (t=400ms): A small rarity-colored diamond icon pulses once near the inventory/accessory panel, 200ms ease-out. Signals "this component is now yours." + +**Particle spec (rarity-weighted)**: +| Rarity | Particle Count | Extra | +|--------|--------------|-------| +| White | 8 | — | +| Green | 8 | — | +| Blue | 10 | — | +| Purple | 10 | + shimmer trace line connecting 3 particles (diamond outline, 60% opacity, 300ms) | +| Red | 12 | + shimmer trace line connecting 4 particles (diamond outline, 70% opacity, 250ms) | + +**Audio**: Ascending arpeggio keyed to rarity: +| Rarity | Sound | +|--------|-------| +| White | C4-E4, 80ms total | +| Green | C4-E4-G4, 120ms total | +| Blue | C4-E4-G4-C5, 160ms total | +| Purple | C4-E4-G4-C5-E5 + shimmer overtone, 200ms total | +| Red | C4-E4-G4-C5-E5-G5 + shimmer overtone, 240ms total | + +Volume: 0.7. Pitch-shifted slightly higher than tower assembly sounds — shop purchases feel like a personal acquisition, not a construction event. + +**Duration**: ~450ms total. Player can continue shopping during this sequence. + +--- + +### Event: All 6 Items Purchased / Empty State + +**Trigger**: Final card is purchased; no items remain. + +**Sequence**: +1. **Empty state fade-in** (t=0): "All items sold" message fades in at center of card grid, 300ms ease-out. Background behind message: `#1A1A2E` at 60% opacity, geometric diamond shape as backdrop. +2. **Geometric pulse** (t=100ms): A large diamond outline pulses once (scale 0.9x → 1.1x → 1.0x, 400ms ease-out) behind the message. +3. **Gold display remains visible**: Player can review their remaining gold. + +**Audio**: Single resonant tone (G3, 400ms, volume 0.5, soft decay). Signals conclusion without urgency. + +--- + +### Event: Shop Leave / Exit + +**Trigger**: Player clicks "Leave" button or all items purchased. + +**Sequence**: +1. **Leave button click** (t=0): Button press animation (scale 1.0x → 0.95x → 1.0x, 80ms). +2. **Card fade-out** (t=100ms): All remaining cards fade to 0% opacity, scale to 0.9x, staggered 50ms apart (back-to-front order), 200ms each ease-out. +3. **Gold display fade** (t=300ms): Gold counter fades out, 200ms ease-out. +4. **Hexagonal ring collapse** (t=350ms): Single hexagonal ring contracts from panel edges to center, opacity 15%, 200ms ease-out. Geometric "door closing." +5. **Panel exit** (t=500ms): Shop panel slides out or fades, standard node-system transition. + +**Audio**: Reverse of shop-open chord (G3-C3, 150ms total). Clean, no reverb. + +**Duration**: ~600ms total. + +--- + +### Event: Gold Display — At Cap + +**Trigger**: Player gold equals `MaxPlayerGold` (9999). + +**Visual**: Gold display shows a cap icon (small filled hexagon) beside the gold number. Icon pulses once every 3 seconds — scale 1.0x → 1.2x → 1.0x, 500ms ease-out. Color: `#F87171` (red, warning) at 60% opacity. + +**Audio**: No sound on cap indicator pulse. The cap warning is purely visual. + +--- + +### Event: Sell Interaction (RepoForm) + +> Selling is owned by RepoForm, not ShopNode. These effects apply when the player sells from the inventory view, not during a shop visit. They are documented here for VFX consistency. + +**Sell confirm click**: A geometric diamond splits into two triangles that fly toward the gold display. Gold display increments with rarity-keyed arpeggio (same as purchase, one octave lower: C3-E3-G3 for Red). + +**Sell price reveal**: When hovering over a sellable item in RepoForm, a small triangle pointer indicates the sell price (midpoint), color `#4ADE80` (green = positive return). On click: green particle burst, gold counter ticks up. + +--- + +### Rarity Color Palette — Shop Application + +| Rarity | Hex | Card Border | Shimmer | Purchase Particle | Purchase Audio | +|--------|-----|-------------|---------|-------------------|----------------| +| White | `#E8E8E8` | `#E8E8E8` solid | None | 8 white triangles | C4-E4, 80ms | +| Green | `#4ADE80` | `#4ADE80` solid | None | 8 green triangles | C4-E4-G4, 120ms | +| Blue | `#60A5FA` | `#60A5FA` solid | None | 10 blue diamonds | C4-E4-G4-C5, 160ms | +| Purple | `#C084FC` | `#C084FC` solid | Diamond sweep, 2s cycle | 10 purple diamonds + shimmer trace | C4-E4-G5-C5-E5 + overtone, 200ms | +| Red | `#F87171` | `#F87171` solid | Diamond sweep + corner triangles, 1.5s cycle | 12 red diamonds + shimmer trace | C4-E4-G4-C5-E5-G5 + overtone, 240ms | + +--- + +### Animation & Style Constraints + +**Shape vocabulary**: Triangles, diamonds, hexagons ONLY. No circles, no curves, no organic forms. Every particle, every icon, every UI border uses one of these three. + +**Waveform character**: All motion uses clean triangle or sine waveforms — no exponential easing, no elastic overshoot, no bounce. Duration: 80–250ms for micro-interactions, up to 500ms for structural transitions. + +**Easing rule**: Ease-out for all entry animations. Linear for countdown/countup number animations (gold ticks). No ease-in-only — nothing should feel like it is "warming up." + +**Rarity shimmer constraints**: +- Sweep direction: left-to-right, continuous loop +- Particle size: 4–8px (small, not dominant) +- Shimmer opacity: never exceeds 70% — rarity is communicated by shimmer quality, not intensity +- Corner triangles for Red: orbit path is a small equilateral triangle 16px from each corner + +**Shop-form layout**: 6 cards in a 3-column × 2-row grid. Card aspect ratio: 3:4 (portrait). Card border radius: 0 (sharp corners — no rounded corners, consistent with geometric aesthetic). Cards must have 8px gaps minimum. + +**No full-screen effects**: Shop VFX is card-scoped or gold-display-scoped. A full-screen flash or color wash is never appropriate for a shop event. The exception is the hexagonal ring on shop open/leave, which is a border-to-border panel effect, not full-screen. + +**Particle lifecycle**: All particles fade over their final 30% of lifetime. No particles simply disappear (pop out of existence). Particle count per burst: max 12. Particle lifetime: 200–400ms. + +**Performance budget**: At most 3 simultaneous particle emitters active in the shop (card cascade burst, purchase burst, empty state pulse). Do not stack more. + +--- + +### DataTable Extension — Shop Sounds + +| SoundId | AssetName | Volume | Duration | Notes | +|---------|-----------|--------|----------|-------| +| ShopOpen | Shop_Open | 0.5 | 200ms | C3-G3 chord, no reverb | +| ShopCardHover | Shop_Card_Hover | 0.3 | 40ms | C5 tick, very quiet | +| ShopCardHover_CannotAfford | Shop_Card_Hover_Deny | 0.4 | 60ms | C2 thud, dissonant | +| ShopPurchase_White | Shop_Purchase_White | 0.7 | 80ms | C4-E4 | +| ShopPurchase_Green | Shop_Purchase_Green | 0.7 | 120ms | C4-E4-G4 | +| ShopPurchase_Blue | Shop_Purchase_Blue | 0.7 | 160ms | C4-E4-G4-C5 | +| ShopPurchase_Purple | Shop_Purchase_Purple | 0.7 | 200ms | C4-E4-G4-C5-E5 + shimmer | +| ShopPurchase_Red | Shop_Purchase_Red | 0.7 | 240ms | C4-E4-G4-C5-E5-G5 + shimmer | +| ShopEmpty | Shop_Empty | 0.5 | 400ms | G3, soft decay | +| ShopLeave | Shop_Leave | 0.5 | 150ms | G3-C3 reverse chord | +| GoldAtCap_Pulse | Gold_AtCap_Pulse | 0.0 | — | No audio — visual only | +| RepoForm_SellConfirm | RepoForm_Sell_Confirm | 0.7 | 200ms | C3-E3-G3 arpeggio (purchase sounds one octave lower) | + +--- + +## UI Requirements + +### Shop Form Layout + +**Trigger**: Shop node type in Node System triggers this form. + +**Gold Display** (top of form): +- Shows current gold as a numeric value with a small hexagon coin icon +- Position: top-center of shop panel, horizontally centered +- Size: enough for 4-digit display (up to "9999") +- States: normal (white text `#E8E8E8`), at-cap (red cap icon pulses) +- Animation: gold value counts up/down with rarity-keyed arpeggio on change + +**Component Cards** (center of form): +- 6 cards in 3×2 grid +- Each card displays: component type icon (geometric symbol), name, rarity border (full-color), Tags (small icons), description (2-line max), buy price badge +- Card layout order: cards arranged left-to-right, top-to-bottom (1-2-3 / 4-5-6) +- Cards do NOT reorder after purchase — empty slots remain visible + +**Card Anatomy** (per card): +``` +[ Rarity Border (2px solid, full rarity color) ] +[ Type Icon (geometric symbol, 32x32) | Name (localized) ] +[ Rarity Label (text, rarity color) ] +[ Tag Icons (row of small geometric tag markers) ] +[ Description (2 lines max, truncated with "..." if needed) ] +[ Price Badge: [Gold Icon] [Price Number] [BUY] ] +``` + +**Buy Button**: +- Integrated into card bottom row +- States: Enabled (rarity-colored, pointer cursor), Disabled/cannot-afford (40% opacity, no-shader hover effect) +- No tooltip — all information is on the card itself + +**Leave Button**: +- Position: bottom-right of shop panel +- Label: "LEAVE" or equivalent localized string +- Style: outlined button, white border `#E8E8E8`, no fill +- Hover: border brightens, 150ms + +**Empty State** (all purchased): +- Geometric diamond shape as backdrop +- "All items sold" centered in card grid area +- Leave button remains accessible + +### Accessibility + +- All rarity colors are paired with distinct geometric symbols (triangle=Muzzle, diamond=Bearing, hexagon=Base) — color-blind safe +- Price badges use absolute number display, not icon counts +- Cannot-afford state uses border shake (triangle waveform) + red tint, not color alone +- All interactions have keyboard equivalents: Tab to navigate cards, Enter to purchase, Escape to leave +- Gold counter updates are announced via UIFocus system (not audio-only) + +### Component Type Symbols + +| Type | Geometric Symbol | Shape | +|------|-----------------|-------| +| Muzzle | Triangle | Equilateral triangle, pointing up | +| Bearing | Diamond | Rhombus / rotated square | +| Base | Hexagon | Regular hexagon | + +These symbols appear as: card type icon, hover type icon pulse, particle shape origin. They are the primary shape vocabulary of the game. + +### Animation Timing Reference + +| Event | Entry Duration | Exit Duration | Notes | +|-------|---------------|---------------|-------| +| Shop Open | 800ms total | 600ms total | — | +| Card Cascade | 250ms per card, 100ms stagger | 200ms per card, 50ms stagger | — | +| Card Hover (affordable) | 150ms | 150ms | — | +| Card Hover (cannot afford) | 150ms | 150ms | No lift; price shake instead | +| Purchase Burst | 400ms particles | — | Card fade 250ms concurrent | +| Gold Countdown | ~150ms (rate: 50ms/digit) | — | Linear, not ease-out | +| Empty State | 400ms | — | — | +| Leave | 500ms total | — | — | +| Rarity Shimmer (Purple) | 2000ms cycle | — | Continuous while card visible | +| Rarity Shimmer (Red) | 1500ms cycle | — | Continuous while card visible | + +## Acceptance Criteria + +### Shop Visit +- **Given** the player enters a Shop node, **then** 6 component cards cascade into view in 3x2 grid with staggered entry animation (card 1 at t=100ms, card 6 at t=600ms). +- **Given** the player enters a Shop node, **then** the hexagonal ring expand animation plays once, opacity 20%, 250ms ease-out. +- **Given** Purple or Red rarity cards are in the shop, **then** the rarity shimmer loop runs continuously until the card is purchased or the player leaves. + +### Card Interaction +- **Given** the player hovers over an affordable component card, **then** the card lifts Y+8px with shadow deepening, border glows full rarity color, and 4 corner triangles emit. +- **Given** the player hovers over a cannot-afford card, **then** the card dims to 60% opacity, price badge shakes (triangle waveform), and gold display border flashes red. +- **Given** the player clicks the Buy button on an affordable card, **then** the purchase burst plays (rarity-keyed particle count), gold counter ticks down, and the card fades to empty slot. +- **Given** the player clicks Buy with insufficient gold, **then** the buy button does not activate, gold display shakes, and no purchase occurs. + +### Rarity Shimmer +- **Given** a Purple card is visible in the shop, **then** a diamond outline sweeps left-to-right across the card border every 2 seconds, 60% opacity, ease-in-out. +- **Given** a Red card is visible in the shop, **then** a diamond outline sweeps left-to-right every 1.5 seconds (70% opacity) and 2 small triangles orbit the card corners simultaneously. + +### Gold Display +- **Given** the player purchases a component, **then** the gold counter counts down rapidly (linear, ~50ms per digit change) to the new value. +- **Given** the player reaches `MaxPlayerGold` (9999), **then** a red hexagon cap icon pulses once every 3 seconds beside the gold display. + +### Empty State +- **Given** all 6 shop items are purchased, **then** the empty state appears with a diamond backdrop shape and "All items sold" message. +- **Given** the empty state is shown, **then** the Leave button remains accessible and functional. + +### Shop Exit +- **Given** the player clicks Leave or all items are purchased, **then** remaining cards fade out staggered, gold display fades, and the hexagonal ring collapses to center (500ms total). + +### Audio +- **Given** the shop opens, **then** a C3-G3 chord plays (200ms, volume 0.5). +- **Given** a component is purchased, **then** an ascending arpeggio plays keyed to rarity (White=2-note 80ms, Red=6-note 240ms + shimmer overtone). +- **Given** a cannot-afford card is hovered, **then** a low dissonant C2 thud plays (60ms, volume 0.4). +- **Given** the shop leaves, **then** a reverse G3-C3 chord plays (150ms). + +### Accessibility +- **Given** all rarity colors are displayed, **then** each rarity also has a distinct geometric symbol (Muzzle=triangle, Bearing=diamond, Base=hexagon) visible on every card. +- **Given** the player navigates by keyboard, **then** Tab cycles through cards, Enter purchases, Escape leaves. +- **Given** a color-blind player views the shop, **then** the cannot-afford state is communicated via price badge shake + dim, not color alone. + +## Open Questions + +### 1. Shop Sell Multiplier +**Status**: OPEN — The sell price formula uses midpoint (`Round((min+max)/2)`), which is approximately 50% of average buy price. Should there be an explicit sell multiplier (e.g., `Round(midpoint * sellMultiplier)`)? Without it, the effective return rate is implicit. Adding an explicit multiplier makes it a tunable knob (see Tuning Knobs). + +### 2. Shop Node Frequency per Run +**Status**: OPEN — How many Shop nodes appear per run is governed by the Node System GDD. Shop GDD needs this to calibrate `MaxPlayerGold` against expected gold income. Minimum recommended: 2 shop nodes per run to create meaningful save-vs-spend tension. + +### 3. Duplicate Component Exclusion Across Shop Visits +**Status**: OPEN — Design decision: once a component config is purchased, it should not appear in subsequent shop visits this run. `BuildShopGoods` does not currently track purchased configs. This exclusion logic needs to be added: either as a filter in `ShopGoodsBuilder` or as a parameter passed to `BuildShopGoods`. **This is an implementation gap — the GDD specifies the behavior but the code does not yet implement it.** + +### 4. Minimum Purchase Requirement +**Status**: OUT OF SCOPE — Not adopted. Design chose optional visits (SR5). Revisiting this would create a mandatory gold sink but risks feeling punitive on early runs with bad RNG. diff --git a/design/gdd/systems-index.md b/design/gdd/systems-index.md new file mode 100644 index 0000000..2ca090a --- /dev/null +++ b/design/gdd/systems-index.md @@ -0,0 +1,37 @@ +# Systems Index + +> **Last Updated**: 2026-04-29 + +## Index + +| Priority | System | Layer | Category | Status | Design Doc | +|---------|--------|-------|----------|--------|------------| +| 1 | Node System (节点系统) | Game Flow | Level/World | In Review | `design/gdd/node-system.md` | +| 2 | Combat System (战斗系统) | Gameplay | Combat | Designed | `docs/CombatNodeArchitecture.md` | +| 3 | Tower Assembly (塔组装系统) | Gameplay | Economy | Needs Revision | `design/gdd/tower-assembly.md` | +| 4 | Shop System (商店系统) | Gameplay | Economy | Designed | `design/gdd/shop.md` | +| 5 | Event System (事件系统) | Gameplay | Narrative | Designed | `design/gdd/event-system.md` | +| 6 | Progression (成长系统) | Meta | Progression | Needs Revision | `design/gdd/progression.md` | + +## Progress Tracker + +- **Total Systems**: 6 +- **Designed**: 3 (Shop, Event, Combat) +- **Needs Revision**: 3 (Node System, Tower Assembly, Progression) + +## Layer Definitions + +| Layer | Description | +|-------|-------------| +| Meta | Outer loop (progression, unlocks) | +| Game Flow | Navigation, state machine, save/load | +| Gameplay | Core game mechanics | +| Foundation | Engine integration, services | + +## Category Definitions + +- **Combat**: Damage, health, enemy AI, tower behavior +- **Economy**: Currency, shop, crafting, loot +- **Level/World**: Maps, nodes, terrain, world state +- **Progression**: XP, levels, unlocks, meta progression +- **Narrative**: Events, dialogue, story beats diff --git a/design/gdd/tower-assembly.md b/design/gdd/tower-assembly.md new file mode 100644 index 0000000..d7e39b4 --- /dev/null +++ b/design/gdd/tower-assembly.md @@ -0,0 +1,367 @@ +# Tower Assembly (塔组装系统) + +> **Status**: Designed +> **Author**: SepComet + agents +> **Last Updated**: 2026-04-29 +> **Implements Pillar**: Tactical preparation and adaptation — players optimize tower builds between combat encounters + +## Overview + +The Tower Assembly system is the **component combination engine** that transforms individual Muzzle, Bearing, and Base components into functional Tower instances. During Assembly Phase (between combat nodes), the `PlayerInventoryTowerAssemblyService.TryAssembleTower()` method accepts three component instance IDs and produces a `TowerItemData` containing aggregated stats across 5 level tiers. The system resolves tower rarity from component rarities, computes per-level stat arrays from rarity-scaled base values plus data-table-defined per-level deltas, and merges Tags from all constituent components. Assembled towers can be rostered for combat (max 4 active) via the `PlayerInventoryTowerRosterService`. The system operates purely on in-memory inventory state; all data is ephemeral per run. + +## Player Fantasy + +**"Every battle is a puzzle. You have the pieces—arrange them to solve it."** + +The Tower Assembly delivers the fantasy of **tactical threat assessment and counter-build satisfaction**. The player arrives at Assembly Phase knowing exactly what challenges lie ahead (the 2 outgoing node types are visible), and must decide which components to combine into towers that answer those specific threats. The core feeling is **the satisfaction of feeling clever** — not raw power, but the right tool for the known job. + +The player should feel: +- **Analyzing incoming threats** — the next node types are known; the question is "what does this enemy fear?" +- **Making irreversible commitments** — once components are assembled into a tower, they cannot be reclaimed until the tower is disassembled or the run ends. Every build decision is permanent within its context. +- **Seeing the consequences of their choices** — a well-matched tower against the right enemy type is viscerally effective; a mismatched build feels wrong and the player knows exactly what they could have done differently. +- **Building toward the boss** — each assembly decision accumulates toward the final confrontation; the boss fight is where build quality is ultimately tested. + +**Reference**: Into the Breach's "I can see exactly what will happen and I planned for it" feeling — the Tower Assembly achieves this through visible upcoming threats during assembly, and transparent tower stats that make the outcome predictable. + +## Detailed Design + +### Core Rules + +**Tower Assembly** combines exactly one Muzzle, one Bearing, and one Base component into a Tower instance. + +**R1. Assembly Eligibility**: A component is eligible for assembly if and only if: +- It exists in the player's inventory (identified by `InstanceId`) +- Its `IsAssembledIntoTower` flag is `false` + +**R2. Assembly Process** (`PlayerInventoryTowerAssemblyService.TryAssembleTower`): +1. Player selects one Muzzle, one Bearing, and one Base component from inventory +2. System validates all three components are eligible (R1) +3. System looks up per-level delta values from data tables (`DRMuzzleComp.AttackDamagePerLevel`, `DRBearingComp.RotateSpeedPerLevel`/`AttackRangePerLevel`, `DRBaseComp.AttackSpeedPerLevel`) +4. Tower rarity computed via `InventoryRarityRuleService.ResolveTowerRarity(muzzleRarity, bearingRarity, baseRarity)` — arithmetic mean of three rarities, rounded and clamped to `RarityType` enum range +5. Stat arrays built at `TowerLevelCount = 5` granularity using rarity-scaled base values plus per-level deltas +6. Tags aggregated via `TowerTagAggregationService.AggregateTowerTags()` — stack counts merged across all three components +7. New `TowerItemData` created with a system-allocated `InstanceId`, display name, and the computed stats +8. All three components' `IsAssembledIntoTower` flags are set to `true` +9. The new tower is added to `inventory.Towers` + +**R3. Disassembling**: Any assembled tower may be disassembled at no cost. Disassembly: +- Removes the tower from `inventory.Towers` +- Sets all three constituent components' `IsAssembledIntoTower` flags to `false` +- Components' `Endurance` values are preserved (not reset) +- If the tower is currently in the combat roster (`ParticipantTowerInstanceIds`), it is automatically removed from the roster before disassembling + +**R4. Tower Roster** (`PlayerInventoryTowerRosterService`): +- Maximum 4 towers may be rostered for combat (`MaxParticipantTowerCount`) +- Roster is set during Assembly Phase via `TryAddParticipantTower(towerInstanceId)` / `TryRemoveParticipantTower(towerInstanceId)` +- Only rostered towers participate in combat +- Roster state persists in `inventory.ParticipantTowerInstanceIds` and is reset on each new run + +**R5. Tower Endurance**: Each component has an `Endurance` value (0–100). When a tower participates in combat, all three components' endurance is reduced proportionally via `InventoryTowerEnduranceUtility.ReduceTowerEndurance()`. + +**R6. Endurance at Zero**: If any constituent component's `Endurance` reaches 0: +- The tower **cannot be rostered** for combat (`TryAddParticipantTower` returns failure) +- The tower **cannot be used** in combat — it is treated as non-functional +- The components retain their 0 endurance state until repaired or the run ends +- A 0-endurance component cannot be disassembled (must be repaired first — repair is out of scope for this GDD, see Open Questions) + +**R7. No Component Compatibility Constraints**: Any MuzzleCompItemData may combine with any BearingCompItemData and any BaseCompItemData. No affinity rules, type matching, or stat constraints are enforced. `AttackMethodType` and `AttackPropertyType` are independent dimensions that do not affect assembly eligibility. + +### States and Transitions + +Tower Assembly has no standalone state machine — it operates as a service within the `PlayerInventoryComponent`. State transitions are driven by the Node System's Assembly Phase. + +**Component States** (per component instance): + +| State | Description | Exits | +|-------|-------------|-------| +| `InInventory` | Component resides in inventory, not assembled | → `Assembled` on successful `TryAssembleTower` | +| `Assembled` | Component is part of a tower, `IsAssembledIntoTower = true` | → `InInventory` on successful `TryDisassembleTower`; → `InRoster` when tower is added to roster | +| `InRoster` | Tower containing this component is in the combat roster | → `Assembled` when tower removed from roster | +| `Degraded` | Any constituent component has `Endurance = 0`; tower cannot be rostered | → `Assembled` if endurance is repaired (out of scope) | + +**Tower States** (per tower instance): + +| State | Description | Exits | +|-------|-------------|-------| +| `AssembledIdle` | Tower exists in `inventory.Towers`, not in combat roster | → `AssembledRostered` on `TryAddParticipantTower` | +| `AssembledRostered` | Tower is in `ParticipantTowerInstanceIds`, eligible for combat | → `AssembledIdle` on `TryRemoveParticipantTower`; → `Degraded` if any component reaches 0 endurance | +| `Degraded` | Tower cannot be rostered due to 0-endurance component | → `AssembledRostered` if endurance is repaired (out of scope) | + +### Interactions with Other Systems + +| System | Direction | Interface | +|--------|-----------|------------| +| **Node System** | Driven by | Assembly Phase is triggered by `NodeSystem` after each node resolves. During Assembly Phase, the player may call `TryAssembleTower`, `TryDisassembleTower`, `TryAddParticipantTower`, and `TryRemoveParticipantTower`. | +| **PlayerInventoryComponent** | Reads/writes | Tower Assembly operates entirely through `PlayerInventoryComponent`'s inventory state. Components and towers are stored in `BackpackInventoryData`. | +| **Combat System** | Delegates to | `PlayerInventoryComponent.GetParticipantTowerSnapshot()` returns the current combat roster (up to 4 towers). Combat system reads tower stats from this snapshot. | +| **Progression** | Writes | On `RunEnd`, final tower configurations are not persisted (run resets). Progression may read aggregate stats (e.g., total towers assembled across all runs) — pending Progression GDD. | + +## Formulas + +### 1. Tower Stat Per-Level Scaling + +Each tower stat (AttackDamage, RotateSpeed, AttackRange, AttackSpeed) is built as a 5-element array via `BuildLevelIntArray` or `BuildLevelFloatArray`. All stats follow the same structural formula: + +`statValue[i] = baseValue + perLevel * i` for `i` in `0..4` + +**Variables:** + +| Variable | Symbol | Type | Range | Description | +|----------|--------|------|-------|-------------| +| rarityBaseArray | B | int[5] or float[5] | varies | Per-rarity base values indexed by rarity (White=0..Red=4) | +| rarity | R | RarityType | White..Red | Component's rarity, converted to 0-based index | +| rarityIndex | ri | int | 0–4 | `Clamp((int)R - 1, 0, 4)` | +| baseValue | B[ri] | int or float | varies | Starting value at level 0, selected by rarity | +| perLevel | P | int or float | varies | Per-level increment from data table | +| statValue[i] | V_i | int or float | varies | Stat value at level i (level = i + 1) | + +**Output Range:** Level 1 value = `B[ri]`; Level 5 value = `B[ri] + P * 4`. Actual range depends on component data tables. + +**Example — AttackDamage** (Muzzle, Green rarity, `perLevel=3`, `AttackDamage = [10, 20, 30, 40, 50]`): + +| Level | Index | Value | +|-------|-------|-------| +| 1 | 0 | 20 + 3×0 = **20** | +| 2 | 1 | 20 + 3×1 = **23** | +| 3 | 2 | 20 + 3×2 = **26** | +| 4 | 3 | 20 + 3×3 = **29** | +| 5 | 4 | 20 + 3×4 = **32** | + +--- + +### 2. Tower Rarity Resolution + +`InventoryRarityRuleService.ResolveTowerRarity(muzzleRarity, bearingRarity, baseRarity)` resolves tower rarity as the arithmetic mean of the three constituent component rarities, rounded and clamped: + +`towerRarity = Clamp(Round((mR + bR + baseR) / 3), White, Red)` + +**Variables:** + +| Variable | Symbol | Type | Range | Description | +|----------|--------|------|-------|-------------| +| muzzleRarity | mR | RarityType | White..Red | Muzzle component rarity | +| bearingRarity | bR | RarityType | White..Red | Bearing component rarity | +| baseRarity | baseR | RarityType | White..Red | Base component rarity | +| normalized | n | int | 1–5 | (int)clampedRarity - 1 (0-based index) | +| average | avg | float | 1–5 | (mR + bR + baseR) / 3f | +| rounded | rnd | int | 1–5 | Math.Round(average) | +| towerRarity | — | RarityType | White..Red | Final tower rarity | + +**Output Range:** White to Red. Average rounding: 1.50–1.99 → Green; 2.50–2.99 → Blue; etc. + +**Example** — Muzzle=Green(2), Bearing=Blue(3), Base=Purple(4): `(2+3+4)/3 = 3.0` → **Blue** + +--- + +### 3. Tag Aggregation + +`TowerTagAggregationService.AggregateTowerTags(muzzleTags, bearingTags, baseTags)` merges tags from all three components into `TagRuntimeData[]`: + +**Variables:** + +| Variable | Type | Description | +|----------|------|-------------| +| componentTags | `TagType[][]` | Tags array from each of the 3 components | +| stackByTag | `Dictionary` | Running count of tag occurrences | +| TotalStack | int | `Max(1, occurrenceCount)` per tag — guaranteed at least 1 | + +**Output:** `TagRuntimeData[]` sorted by `TagType`. `TotalStack` represents how many of the 3 components carry this tag. Flat unique tag list (`Tags[]`) is derived by `FlattenUniqueTags(TagRuntimeData[])`. + +**Example** — Muzzle=[Fire], Bearing=[Ice], Base=[Fire]: +`{ Fire: 2, Ice: 1 }` → `[{ Fire, TotalStack=2 }, { Ice, TotalStack=1 }]` + +## Edge Cases + +- **If the same component instance ID is passed for multiple slots**: Assembly fails. `TryGetComponentById` searches type-specific lists, so the second slot lookup fails and returns `false`. + +- **If any component is already assembled into a tower** (`IsAssembledIntoTower = true`): Assembly fails immediately. Components may only belong to one tower at a time. + +- **If any component's DR config row is missing** (`DRMuzzleComp`, `DRBearingComp`, or `DRBaseComp` returns `null` for the component's `ConfigId`): Assembly fails. Components without valid data table entries cannot be assembled. + +- **If all three components have duplicate tags** (e.g., all have `TagType.Fire`): `TagRuntimeData` is produced with `TotalStack = 3`. Stack count equals the number of components carrying that tag (max 3). + +- **If one or more components have empty or null tags arrays**: `AggregateTowerTags` skips null/empty arrays. The resulting tower only has tags from components with non-empty arrays. + +- **If all three components have empty/null tags**: Tower has no tags. `AggregateTowerTags` returns `Array.Empty()`. + +- **If tags contain `TagType.None` or invalid enum values**: These are filtered out by `AggregateTowerTags` via `if (tagType == TagType.None || !Enum.IsDefined(typeof(TagType), tagType)) continue;`. Invalid tags do not appear in output. + +- **If the combat roster is full (4 towers) and user attempts to add another**: `TryAddParticipantTower` returns `ParticipantTowerAssignFailureReason.ParticipantAreaFull`. The tower is not added. + +- **If a tower with 0-endurance component is attempted to be rostered**: `CombatParticipantTowerValidationService.ValidateTower` returns a validation failure (`BrokenMuzzleComponent`/`BrokenBearingComponent`/`BrokenBaseComponent`). `TryAddParticipantTower` returns `ParticipantTowerAssignResult` with `FailureReason = InvalidTower`. The tower cannot participate. + +- **If a tower in the combat roster has a component reach 0 endurance mid-combat**: The tower remains in `ParticipantTowerInstanceIds` but becomes degraded. `CombatParticipantTowerValidationService.ValidateParticipantTowers` marks it invalid on the next validation. No automatic removal from roster occurs. + +- **If a tower's component stat arrays are shorter than 5 elements**: `ResolveRarityBaseValue` uses `Clamp(rarityIndex, 0, array.Length - 1)`. If the array is empty, the stat defaults to 0. + +- **If per-level delta is negative** (e.g., `AttackSpeedPerLevel = -0.25`): The formula `baseValue + perLevel * i` correctly handles negative values. Level 5 stat will be lower than Level 1 stat for that dimension. + +- **If a tower's rarity resolves to a boundary value** (e.g., `(Green + Blue + Purple) / 3 = 3.0`): `Mathf.RoundToInt(3.0f) = 3` (Blue). Standard rounding applies. + +## Dependencies + +### Upstream Dependencies (what Tower Assembly depends on) + +| System | Type | Interface | Status | +|--------|------|-----------|--------| +| **Node System** | Hard | Assembly Phase triggers Tower Assembly. `PlayerInventoryComponent.TryAssembleTower` and roster management are called during Assembly Phase. | GDD exists (`design/gdd/node-system.md`) | +| **Inventory** | Hard | `PlayerInventoryComponent` owns all component and tower state. Assembly reads/writes via `BackpackInventoryData`. | Implemented | +| **DataTable (DRMuzzleComp, DRBearingComp, DRBaseComp)** | Hard | Per-level delta lookups during stat building. Missing rows cause assembly failure. | Implemented | + +### Downstream Dependents (what depends on Tower Assembly) + +| System | Type | Interface | Status | +|--------|------|-----------|--------| +| **Node System** | Soft | Reads assembled tower configs during Assembly Phase; passes roster to combat. | GDD exists | +| **Combat System** | Hard | `PlayerInventoryComponent.GetParticipantTowerSnapshot()` returns up to 4 rostered towers with their stats. Combat reads `TowerStatsData` for damage calculations. | GDD exists (`docs/CombatNodeArchitecture.md`) | +| **Progression** | Soft | May read aggregate tower assembly stats across runs. Pending Progression GDD. | Not yet designed | + +### Provisional Assumptions +- `TryDisassembleTower` method is not yet implemented — the design doc R3 specifies it should exist. Implementation is pending. +- Repair mechanism for 0-endurance components is out of scope for this GDD (see Open Questions). +- `CombatParticipantTowerValidationService` handles degraded tower detection — this is owned by the Combat System GDD. + +## Tuning Knobs + +| Knob | Default | Safe Range | Extreme: Too Low | Extreme: Too High | +|------|---------|-----------|-----------------|------------------| +| `TowerLevelCount` | 5 | 3–10 | Fewer upgrade tiers — less meaningful progression within a tower's lifetime | More tiers — stat arrays grow; UI scalability issues; balance complexity | +| `MaxParticipantTowerCount` | 4 | 2–6 | Too few towers — limited tactical variety in combat roster | Too many — combat UI cluttered; player decision paralysis | +| Per-level delta (`DRMuzzleComp.AttackDamagePerLevel`, etc.) | varies by row | varies | Too high → towers scale exponentially; late-game dominance | Too low → leveling feels pointless; stats converge | +| Rarity base arrays | varies by component row | varies | Too high → rarity gaps become massive | Too low → rarity feels meaningless | + +**Data-table-driven knobs** (not code constants): +- `DRMuzzleComp.AttackDamagePerLevel` — flat AttackDamage increase per tower level +- `DRBearingComp.RotateSpeedPerLevel` — rotation speed increase per level +- `DRBearingComp.AttackRangePerLevel` — range increase per level +- `DRBaseComp.AttackSpeedPerLevel` — attack speed change per level (can be negative) + +## Visual/Audio Requirements + +### VFX Event Specifications + +| Event | Visual Effect | Audio Cue | Duration | +|-------|--------------|-----------|----------| +| **Tower Assembled** | 6–10 geometric particles (triangles/diamonds) burst from assembly point, rarity-colored. Component icons collapse inward, tower card materializes with scale pulse (1.0x→1.15x→1.0x). Purple/Red rarity adds golden shimmer particles. | Ascending C5-E5-G5 arpeggio (200ms). Blue+ adds C6 for premium signal. | ~300ms | +| **Tower Added to Roster** | Roster slot glows rarity color (200ms). Tower icon animates to roster slot (200ms ease-out). Roster full: ring pulse from panel border. | Two-tone lock-in (G5→E5→G5, 80ms). Roster full adds C6. | ~250ms | +| **Tower Removed from Roster** | Slot fades from rarity color to empty (150ms). Tower icon animates back to inventory. Roster-full indicator: "-1" pulse above panel. | Descending G5→D5 (100ms) — "slot opened". | ~200ms | +| **Tower Degraded (0 Endurance)** | Red-orange geometric crack propagates across tower icon (300ms). Tower dims to 40% opacity, desaturated. Broken shard overlay icon. Roster slot flashes red-orange if affected. | Dissonant minor-2nd (C5→C5♭, 150ms) + descending power-down sweep (400Hz→200Hz, 200ms). | ~400ms | +| **Tower Disassembled** | Tower icon explodes into 3 component icons flying to inventory positions. Particle burst at tower's former position. Components pulse on arrival. | Reverse arpeggio G5→E5→C5 (200ms). 3x short clicks (30ms each) as components snap back. | ~300ms | + +### Rarity Color Palette + +| Rarity | Hex | VFX Color | Audio Signal | +|--------|-----|-----------|--------------| +| White | `#E8E8E8` | White particles | 1-note chime | +| Green | `#4ADE80` | Green particles | 2-note chime | +| Blue | `#60A5FA` | Blue particles | 3-note chime | +| Purple | `#C084FC` | Purple + shimmer | 4-note + shimmer VFX | +| Red | `#F87171` | Red + shimmer | 5-note + shimmer VFX | + +### Animation & Style Constraints +- **Particle shapes**: Triangles, diamonds, hexagons ONLY — no circles, no organic curves +- **Waveforms**: Clean sine or triangle waves — digital-mathematical character +- **Easing**: All UI animations use ease-out entry. No bounce, no elastic overshoot +- **Duration budget**: Assembly/disassemble 200–300ms; roster add/remove 200–250ms; degraded 300–400ms. Max 400ms per transition +- **Accessibility**: All audio cues have visual alternatives (color flash, icon change, screen pulse). Degraded state uses geometric crack overlay, not color alone +- **No simultaneous full-screen effects**: VFX is localized to the relevant card/icon; full-screen flashes reserved only for degraded warning at ≤30% opacity + +### DataTable Extension +`DRTowerAssemblySound` (or extension of `DRSound`): + +| SoundId | AssetName | Volume | Notes | +|---------|-----------|--------|-------| +| TowerAssemble | Tower_Assemble | 0.8 | C5-E5-G5 arpeggio, 200ms | +| TowerAssemblePremium | Tower_Assemble_Premium | 0.8 | C5-E5-G5-C6, 250ms (Blue+) | +| TowerRosterAdd | Tower_Roster_Add | 0.6 | G5-E5-G5, 80ms | +| TowerRosterRemove | Tower_Roster_Remove | 0.5 | G5-D5, 100ms | +| TowerDegrade | Tower_Degrade | 0.7 | Minor-2nd + power-down sweep, 350ms | +| TowerDisassemble | Tower_Disassemble | 0.7 | G5-E5-C5 reverse arpeggio, 200ms | + +## UI Requirements + +### Assembly Phase Screen + +**Trigger**: Auto-displayed after any node resolves (per Node System). + +**Content**: +- **Inventory Grid**: All owned unassembled components, grouped by type (Muzzle, Bearing, Base) +- **Tower Slots**: 3 assembly slots (Muzzle, Bearing, Base) — drop targets for components +- **Assembled Towers Panel**: All towers built so far in this run +- **Combat Roster**: 4 slots showing currently rostered towers (ready for next combat) +- **Next Node Preview**: 2 outgoing edge destinations visible during Assembly Phase (from Node System) +- **Ready Button**: Confirms Assembly Phase is complete; triggers Node Choice + +**Interactions**: +- Drag components from inventory into assembly slots +- Click "Assemble" button when 3 slots are filled → creates tower +- Click tower → shows tower stats, rarity, tags; options to "Add to Roster" or "Disassemble" +- Drag tower from Assembled Towers to Roster slots +- Click "Disassemble" on tower → free disassemble, components return to inventory +- Click "Ready" → proceeds to Node Choice + +**Empty State**: +- No unassembled components: inventory grid shows "No components — combat drops will appear here" +- No assembled towers: Assembled Towers panel shows "Assemble towers from components above" +- Roster empty: slots show dotted outline placeholder + +**Tower Info Tooltip/Panel** (on tower click): +- Tower name and rarity (color-coded) +- Stats: AttackDamage (5 levels), RotateSpeed, AttackRange, AttackSpeed +- Tags with stack counts +- Component sources (Muzzle/Bearing/Base names) +- Endurance bars for each component (0–100%) + +### Roster Management UI + +**Roster Slots** (4 slots): +- Each slot shows: tower icon, rarity color border, tower name +- Drag tower to roster slot to add +- Click "X" on rostered tower to remove from roster +- Degraded tower (0 endurance): slot shows crack overlay, cannot be deployed +- Roster full (4/4): slots show "FULL" indicator; drag-and-drop returns tower to Assembled Towers + +### Accessibility +- All rarity colors are paired with distinct iconography +- Degraded state uses geometric crack shape, not color alone +- Tag stack counts shown numerically, not just visually +- Component endurance shown as percentage + bar +- All interactions possible via keyboard (tab navigation, enter to confirm) + +### Assembly +- **GIVEN** the player has 3 unassembled components (Muzzle, Bearing, Base), **WHEN** `TryAssembleTower(muzzleId, bearingId, baseId)` is called, **THEN** a new `TowerItemData` is created with aggregated stats, rarity is computed correctly, tags are merged, and all three components' `IsAssembledIntoTower` flags are set to `true`. +- **GIVEN** a Muzzle component is already assembled into a tower, **WHEN** the player attempts to use that component in a new `TryAssembleTower` call, **THEN** the call returns `false` and no tower is created. +- **GIVEN** any component's DR config row is missing, **WHEN** `TryAssembleTower` is called, **THEN** the call returns `false` and no tower is created. + +### Disassembling +- **GIVEN** an assembled tower is not in the combat roster, **WHEN** `TryDisassembleTower(towerInstanceId)` is called, **THEN** the tower is removed from `inventory.Towers`, all three components' `IsAssembledIntoTower` flags are set to `false`, and their `Endurance` values are preserved. +- **GIVEN** an assembled tower is currently in the combat roster, **WHEN** `TryDisassembleTower(towerInstanceId)` is called, **THEN** the tower is automatically removed from the roster before disassembling. + +### Roster Management +- **GIVEN** fewer than 4 towers are in the roster, **WHEN** `TryAddParticipantTower(towerInstanceId)` is called with a valid non-degraded tower, **THEN** the tower is added to `ParticipantTowerInstanceIds`. +- **GIVEN** 4 towers are already in the roster, **WHEN** `TryAddParticipantTower` is called with a valid tower, **THEN** the call returns `ParticipantTowerAssignFailureReason.ParticipantAreaFull` and no change occurs. +- **GIVEN** a tower has a component with `Endurance = 0`, **WHEN** `TryAddParticipantTower` is called, **THEN** the call returns `ParticipantTowerAssignResult` with `FailureReason = InvalidTower` and the tower is not added. + +### Stats and Formulas +- **GIVEN** a Green-rarity Muzzle with `AttackDamage = [10, 20, 30, 40, 50]` and `AttackDamagePerLevel = 3`, **WHEN** a tower is assembled from it, **THEN** the tower's `AttackDamage` array is `[20, 23, 26, 29, 32]`. +- **GIVEN** a tower is assembled from Muzzle=Green, Bearing=Blue, Base=Purple, **WHEN** rarity is computed, **THEN** the tower rarity is Blue (average of 2+3+4 = 3.0). +- **GIVEN** a tower is assembled from Muzzle=[Fire], Bearing=[Ice], Base=[Fire], **WHEN** tags are aggregated, **THEN** the tower has Fire with `TotalStack=2` and Ice with `TotalStack=1`. + +### Endurance +- **GIVEN** a tower with all components at `Endurance > 0` is in the roster, **WHEN** combat ends and `ReduceTowerEndurance` is called, **THEN** all three components' endurance is reduced. +- **GIVEN** a component in an assembled tower reaches `Endurance = 0`, **WHEN** `TryAddParticipantTower` is called for that tower, **THEN** the call fails and the tower cannot be rostered. + +## Open Questions + +### 1. TryDisassembleTower Implementation Gap +**Status**: OPEN — The design doc (R3) specifies free disassembling, but `TryDisassembleTower` method does not exist in `PlayerInventoryComponent` or `PlayerInventoryTowerAssemblyService`. Implementation is needed. + +### 2. Auto-Cleanup of Degraded Rostered Towers +**Status**: OPEN — If a tower in the roster has a component reach 0 endurance mid-combat, the tower remains in `ParticipantTowerInstanceIds` but becomes non-functional. Should there be an automatic removal from roster when a tower becomes degraded? Currently no such mechanism exists. + +### 3. Repair Mechanism for 0-Endurance Components +**Status**: OUT OF SCOPE — Design doc R6 notes that 0-endurance components cannot be disassembled and must be repaired. Repair mechanism (e.g., gold cost to restore endurance) is out of scope for this GDD. A future Repair GDD should address this. + +### 4. Component Compatibility Rules +**Status**: RESOLVED — No affinity or compatibility rules are enforced. Any Muzzle+Bearing+Base combination is valid. `AttackMethodType` and `AttackPropertyType` are independent dimensions. + diff --git a/design/registry/entities.yaml b/design/registry/entities.yaml new file mode 100644 index 0000000..3192410 --- /dev/null +++ b/design/registry/entities.yaml @@ -0,0 +1,155 @@ +# Entity Registry +# Auto-generated from GDD design sessions. Do not edit manually. +# To update: run /design-system for each system, the registry is populated in Phase 5b. + +entries: + # ─── Tower Assembly (design/gdd/tower-assembly.md) ─────────────────────────── + + - name: TowerLevelCount + type: constant + value: 5 + unit: levels + source: design/gdd/tower-assembly.md + description: Fixed number of tower level tiers. All tower stat arrays have exactly 5 elements. + + - name: MaxParticipantTowerCount + type: constant + value: 4 + unit: towers + source: design/gdd/tower-assembly.md + description: Maximum number of towers that can be rostered for combat. + + - name: TowerEnduranceRange + type: constant + value: "0–100" + unit: percent + source: design/gdd/tower-assembly.md + description: Valid range for component Endurance. 0 means non-functional. + + - name: TowerStatScaling + type: formula + variables: + - name: rarityBaseArray + symbol: B + type: "int[5] or float[5]" + description: Per-rarity base values from component stat array + - name: rarity + symbol: R + type: RarityType + description: Component rarity (White=1 to Red=5) + - name: perLevel + symbol: P + type: int or float + description: Per-level increment from data table row + output: + range: "varies by component data table" + description: "statValue[i] = B[Clamp(R-1,0,4)] + P*i, for i in 0..4" + source: design/gdd/tower-assembly.md + + - name: TowerRarityResolution + type: formula + variables: + - name: muzzleRarity + symbol: mR + type: RarityType + description: Muzzle component rarity (White=1 to Red=5) + - name: bearingRarity + symbol: bR + type: RarityType + description: Bearing component rarity (White=1 to Red=5) + - name: baseRarity + symbol: baseR + type: RarityType + description: Base component rarity (White=1 to Red=5) + output: + range: "White..Red" + description: "Clamp(Round((mR + bR + baseR) / 3), White, Red)" + source: design/gdd/tower-assembly.md + + - name: TagAggregation + type: formula + variables: + - name: muzzleTags + type: "TagType[]" + description: Tags from Muzzle component + - name: bearingTags + type: "TagType[]" + description: Tags from Bearing component + - name: baseTags + type: "TagType[]" + description: Tags from Base component + output: + range: "TagRuntimeData[]" + description: "Merged tags with TotalStack = count of components carrying each tag. Min TotalStack=1." + source: design/gdd/tower-assembly.md + + # ─── Node System (design/gdd/node-system.md) ──────────────────────────────── + + - name: BossBonusGold + type: constant + value: 200 + unit: gold + source: design/gdd/node-system.md + description: Flat bonus gold awarded for defeating the Boss node. + + - name: TotalNodesPerRun + type: constant + value: 10 + unit: nodes + source: design/gdd/node-system.md + description: Fixed number of nodes per run. Node 10 is always BossCombat. + + # ─── Shop System (design/gdd/shop.md) ──────────────────────────────── + + - name: MaxPlayerGold + type: constant + value: 9999 + unit: gold + source: design/gdd/shop.md + description: Hard cap on player gold. AddGold silently discards excess above this value. + + # ─── Event System (design/gdd/event-system.md) ──────────────────────── + + - name: EventSelectionSeedFormula + type: formula + variables: + - name: runSeed + symbol: S + type: int + description: Run seed from RunNodeExecutionContext + - name: sequenceIndex + symbol: I + type: int + description: Node sequence index in the run + - name: nodeId + symbol: N + type: int + description: Unique node identifier + output: + range: "int" + description: "seed = (((S * 31) + I) * 31) + N" + source: design/gdd/event-system.md + + - name: EventProbabilityRollSeedFormula + type: formula + variables: + - name: runSeed + symbol: S + type: int + description: Run seed + - name: sequenceIndex + symbol: I + type: int + description: Node sequence index + - name: eventId + symbol: E + type: int + description: Event identifier from DREvent + - name: optionIndex + symbol: O + type: int + description: Option index within the event + output: + range: "int" + description: "seed = S + I + E + O + 0 + 17" + source: design/gdd/event-system.md diff --git a/production/session-state/active.md b/production/session-state/active.md new file mode 100644 index 0000000..3d9b5e9 --- /dev/null +++ b/production/session-state/active.md @@ -0,0 +1,16 @@ +# Active Session State + +## Current Task +Cross-GDD Review — COMPLETE + +## Status +- **Task**: /review-all-gdds — COMPLETED +- **Report**: design/gdd/gdd-cross-review-2026-04-29-v2.md +- **Verdict**: CONCERNS (3 blocking issues, 6 warnings) +- **Flagged GDDs**: node-system.md (Needs Revision), progression.md (Needs Revision), tower-assembly.md (flagged medium) + +## Session Extract — /review-all-gdds 2026-04-29 +- Verdict: CONCERNS +- Blocking issues: 3 — (C1) "no partial rewards" vs Progression loss-path gold, (G1) Boss exponential HP vs linear player power hard wall, (G2/G3) Run End gold display undefined + loss-run RecordRunEnd undefined +- Warnings: 6 — gold unbounded accumulation, no dominant loop defined, Tag stacking dominant strategy, lossy economy (~50% sell), event rewards same tier as shop, Assembly Phase 5-panel cognitive overload +- Recommended next: /design-system retrofit node-system.md or /design-system retrofit progression.md (resolve blocking C1+G3 first)