diff --git a/.claude/skills/ci-prep/SKILL.md b/.claude/skills/ci-prep/SKILL.md index dd3b646..61d9cb3 100644 --- a/.claude/skills/ci-prep/SKILL.md +++ b/.claude/skills/ci-prep/SKILL.md @@ -3,7 +3,7 @@ name: ci-prep description: Prepares the current branch for CI by running the exact same steps locally and fixing issues. If CI is already failing, fetches the GH Actions logs first to diagnose. Use before pushing, when CI is red, or when the user says "fix ci". argument-hint: "[--failing] [optional job name to focus on]" --- - + # CI Prep @@ -42,12 +42,12 @@ Read **every line** of `--log-failed` output. For each failure note the exact fi ## Step 2 — Analyze the CI workflow -1. Find the CI workflow file. Look in `.github/workflows/` for `ci.yml`. +1. Find the CI workflow file. Look in `.github/workflows/` for `ci.yml` (this repo's CI workflow). 2. Read the workflow file completely. Parse every job and every step. -3. Extract the ordered list of commands the CI actually runs (e.g., `make fmt-check`, `make lint`, `make spellcheck`, `make test EXCLUDE_CI=true`, `make build`, `make package`). -4. Note any environment variables, matrix strategies, or conditional steps that affect execution. +3. Extract the ordered list of commands the CI actually runs. In this spec-compliant repo that is `make fmt CHECK=1 → make lint → make test → make build → make package` (REPO-STANDARDS-SPEC [MAKE-TARGETS] plus the repo-specific `package` target). +4. Note any environment variables, matrix strategies, or conditional steps that affect execution. The separate `website` job runs Playwright tests against the 11ty site under `website/`. -**Do NOT assume the steps.** Extract what the CI *actually does*. +**Do NOT assume the steps.** Extract what the CI *actually does*. If you find extra targets beyond the 7 in [MAKE-TARGETS] that are not already in the `Repo-Specific Targets` section, flag them — they should be consolidated by the agent-pmo skill. ## Step 3 — Run each CI step locally, in order @@ -60,7 +60,7 @@ Work through failures in this priority order: For each command extracted from the CI workflow: -1. Run the command exactly as CI would run it. +1. Run the command exactly as CI would run it (adjusting only for local environment differences like not needing `actions/checkout`). 2. If the step fails, **stop and fix the issues** before continuing to the next step. 3. After fixing, re-run the same step to confirm it passes. 4. Move to the next step only after the current one succeeds. @@ -68,7 +68,7 @@ For each command extracted from the CI workflow: ### Hard constraints - **NEVER modify test files** — fix the source code, not the tests -- **NEVER add suppressions** (`// eslint-disable`, `// @ts-ignore`) +- **NEVER add suppressions** (`// eslint-disable`, `@ts-ignore`, `@ts-nocheck`) - **NEVER use `any` in TypeScript** to silence type errors - **NEVER delete or ignore failing tests** - **NEVER remove assertions** @@ -97,7 +97,7 @@ Once all CI steps pass locally: - Fix issues found in each step before moving to the next - Never skip steps or suppress errors - If the CI workflow has multiple jobs, run all of them (respecting dependency order) -- Skip steps that are CI-infrastructure-only (checkout, setup-node, cache steps, artifact uploads) — focus on the actual build/test/lint commands +- Skip steps that are CI-infrastructure-only (checkout, setup-node actions, cache steps, artifact uploads) — focus on the actual build/test/lint commands ## Success criteria diff --git a/.claude/skills/code-dedup/SKILL.md b/.claude/skills/code-dedup/SKILL.md index 4d5ddab..fc8e914 100644 --- a/.claude/skills/code-dedup/SKILL.md +++ b/.claude/skills/code-dedup/SKILL.md @@ -2,7 +2,7 @@ name: code-dedup description: Searches for duplicate code, duplicate tests, and dead code, then safely merges or removes them. Use when the user says "deduplicate", "find duplicates", "remove dead code", "DRY up", or "code dedup". Requires test coverage — refuses to touch untested code. --- - + # Code Dedup @@ -13,8 +13,8 @@ Carefully search for duplicate code, duplicate tests, and dead code across the r Before touching ANY code, verify these conditions. If any fail, stop and report why. 1. Run `make test` — all tests must pass. If tests fail, stop. Do not dedup a broken codebase. -2. Run `make coverage-check` — coverage must meet the repo's threshold. If it doesn't, stop. -3. Verify the project uses **static typing**. Check `tsconfig.json` has `"strict": true` — proceed. +2. `make test` is fail-fast AND enforces the coverage threshold from `coverage-thresholds.json`. If anything fails, stop and fix it before deduping. +3. This is a TypeScript repo. Confirm `tsconfig.json` has `"strict": true` (it does). Proceed. ## Steps @@ -34,8 +34,8 @@ Dedup Progress: Before deciding what to touch, understand what is tested. -1. Run `make test` and `make coverage-check` to confirm green baseline -2. Note the current coverage percentage — this is the floor. It must not drop. +1. Run `make test` to confirm green baseline. `make test` is fail-fast AND enforces the coverage thresholds from `coverage-thresholds.json` (REPO-STANDARDS-SPEC [TEST-RULES], [COVERAGE-THRESHOLDS-JSON]). It exits non-zero on any test failure OR coverage shortfall. +2. Note the current coverage percentages — these are the floor. They must not drop. 3. Identify which files/modules have coverage and which do not. Only files WITH coverage are candidates for dedup. ### Step 2 — Scan for dead code @@ -43,7 +43,10 @@ Before deciding what to touch, understand what is tested. Search for code that is never called, never imported, never referenced. 1. Look for unused exports, unused functions, unused classes, unused variables -2. Check for `noUnusedLocals`/`noUnusedParameters` in tsconfig, look for unexported functions with zero references +2. TypeScript-specific tooling: + - `tsconfig.json` has `noUnusedLocals`/`noUnusedParameters` — the compiler already warns on unused locals/params + - ESLint flags unused exports via `@typescript-eslint/no-unused-vars` + - Look for unexported functions with zero references within the module 3. For each candidate: **grep the entire codebase** for references (including tests, scripts, configs). Only mark as dead if truly zero references. 4. List all dead code found with file paths and line numbers. Do NOT delete yet. @@ -54,7 +57,7 @@ Search for code blocks that do the same thing in multiple places. 1. Look for functions/methods with identical or near-identical logic 2. Look for copy-pasted blocks (same structure, maybe different variable names) 3. Look for multiple implementations of the same algorithm or pattern -4. Check across module boundaries — duplicates often hide in different packages +4. Check across discovery providers in `src/discovery/` — duplicates often hide in similar providers 5. For each duplicate pair: note both locations, what they do, and how they differ (if at all) 6. List all duplicates found. Do NOT merge yet. @@ -63,8 +66,8 @@ Search for code blocks that do the same thing in multiple places. Search for tests that verify the same behavior. 1. Look for test functions with identical assertions against the same code paths -2. Look for test fixtures/helpers that are duplicated across test files -3. Look for integration tests that fully cover what a unit test also covers (keep the integration test, mark the unit test as redundant) +2. Look for test fixtures/helpers duplicated across test files +3. Look for integration tests that fully cover what a unit test also covers (keep the integration/E2E test, mark the unit test as redundant per CLAUDE.md rules) 4. List all duplicate tests found. Do NOT delete yet. ### Step 5 — Apply changes (one at a time) @@ -73,32 +76,33 @@ For each change, follow this cycle: **change → test → verify coverage → co #### 5a. Remove dead code - Delete dead code identified in Step 2 -- After each deletion: run `make test` and `make coverage-check` -- If tests fail or coverage drops: **revert immediately** and investigate +- After each deletion: run `make test` (fail-fast + coverage + threshold all in one) +- If `make test` exits non-zero: **revert immediately** and investigate +- Dead code removal should never break tests or drop coverage #### 5b. Merge duplicate code - For each duplicate pair: extract the shared logic into a single function/module - Update all call sites to use the shared version -- After each merge: run `make test` and `make coverage-check` -- If tests fail: **revert immediately** +- After each merge: run `make test` +- If tests fail: **revert immediately**. The duplicates may have subtle differences you missed. +- If coverage drops: the shared code must have equivalent test coverage. Add tests if needed before proceeding. #### 5c. Remove duplicate tests - Delete the redundant test (keep the more thorough one) -- After each deletion: run `make coverage-check` -- If coverage drops: **revert immediately** +- After each deletion: run `make test` +- If coverage drops below threshold, `make test` exits non-zero — **revert immediately**. The "duplicate" test was covering something the other wasn't. ### Step 6 — Final verification -1. Run `make test` — all tests must still pass -2. Run `make coverage-check` — coverage must be >= the baseline from Step 1 -3. Run `make lint` and `make fmt-check` — code must be clean -4. Report: what was removed, what was merged, final coverage vs baseline +1. Run `make lint` — ESLint + cspell must pass +2. Run `make test` — tests must pass AND coverage must remain ≥ the baseline from Step 1 +3. Report: what was removed, what was merged, final coverage vs baseline ## Rules -- **No test coverage = do not touch.** If a file has no tests covering it, leave it alone entirely. -- **Coverage must not drop.** The coverage floor from Step 1 is sacred. -- **One change at a time.** Make one dedup change, run tests, verify coverage. Never batch. -- **When in doubt, leave it.** If two code blocks look similar but you're not 100% sure they're functionally identical, leave both. -- **Preserve public API surface.** Do not change function signatures, class names, or module exports that external code depends on. -- **Three similar lines is fine.** Only dedup when the shared logic is substantial (>10 lines) or when there are 3+ copies. +- **No test coverage = do not touch.** If a file has no tests covering it, leave it alone entirely. You cannot safely dedup what you cannot verify. +- **Coverage must not drop.** If removing or merging code causes coverage to decrease, revert and investigate. The coverage floor from Step 1 is sacred. +- **One change at a time.** Make one dedup change, run tests, verify coverage. Never batch multiple dedup changes before testing. +- **When in doubt, leave it.** If two code blocks look similar but you're not 100% sure they're functionally identical, leave both. False dedup is worse than duplication. +- **Preserve public API surface.** Do not change exported function signatures, class names, or module exports. Internal refactoring only. +- **Three similar lines is fine.** Do not create abstractions for trivial duplication. Only dedup when the shared logic is substantial (>10 lines) or when there are 3+ copies. diff --git a/.claude/skills/spec-check/SKILL.md b/.claude/skills/spec-check/SKILL.md index 5d4aedc..e3b54c1 100644 --- a/.claude/skills/spec-check/SKILL.md +++ b/.claude/skills/spec-check/SKILL.md @@ -3,7 +3,7 @@ name: spec-check description: Audit spec/plan documents against the codebase. Ensures every spec section has implementing code, tests, and matching logic. Use when the user says "check specs", "spec audit", or "verify specs". argument-hint: "[optional spec ID or filename filter]" --- - + # spec-check @@ -69,6 +69,21 @@ Use Glob to find candidate files, then use Grep to confirm they contain spec IDs Spec IDs are **hierarchical descriptive slugs, NEVER numbered.** The format is `[GROUP-TOPIC]` or `[GROUP-TOPIC-DETAIL]`. The first word is the **group** — all sections sharing the same group MUST appear together in the spec's table of contents. IDs are uppercase, hyphen-separated, unique across the repo, and MUST NOT contain sequential numbers. +The hierarchy depth varies by repo: two words for simple repos (`[AUTH-LOGIN]`), three for most (`[AUTH-TOKEN-VERIFY]`), four for complex domains (`[AUTH-OAUTH-REFRESH-FLOW]`). The hierarchy mirrors the spec document's heading structure. + +Examples of valid spec IDs (note how groups cluster): +- `[AUTH-LOGIN]`, `[AUTH-TOKEN-VERIFY]`, `[AUTH-TOKEN-REFRESH]` — all in the AUTH group +- `[CI-TIMEOUT]`, `[CI-LINT]`, `[CI-RELEASE]` — all in the CI group +- `[LINT-ESLINT]`, `[LINT-RUFF]` — all in the LINT group +- `[FEAT-DARK-MODE]`, `[FEAT-SEARCH-FILTER]` — all in the FEAT group + +Examples of INVALID spec IDs: +- `[SPEC-001]` — numbered, meaningless +- `[FEAT-AUTH-01]` — trailing number +- `[REQ-003]` — sequential index, no group hierarchy +- `[CI-004]` — numbered, tells the reader nothing +- `[TIMEOUT]` — no group prefix, ungrouped + For each file, extract every spec ID and its associated section title (the heading text after the ID) and the full section content (everything until the next heading of equal or higher level). --- @@ -101,6 +116,34 @@ Search the entire codebase for the spec ID string, **excluding** these directori Use Grep with the literal spec ID (e.g., `[AUTH-TOKEN-VERIFY]`) to find references in code files. +Code files should contain comments referencing the spec ID. The search must catch **all** comment styles across languages: + +**C-style `//` comments** (JavaScript, TypeScript, Rust, C#, F#, Java, Kotlin, Go, Swift, Dart): +- `// Implements [AUTH-TOKEN-VERIFY]` +- `// [AUTH-TOKEN-VERIFY]` +- `// Tests [AUTH-TOKEN-VERIFY]` (also counts as a code reference) +- `/// Implements [AUTH-TOKEN-VERIFY]` (doc comments) + +**Hash `#` comments** (Python, Ruby, Shell/Bash, YAML, TOML): +- `# Implements [AUTH-TOKEN-VERIFY]` +- `# [AUTH-TOKEN-VERIFY]` +- `# Tests [AUTH-TOKEN-VERIFY]` + +**HTML/XML comments** (HTML, CSS, SVG, XML, XAML, JSX templates): +- `` +- `` + +**ML-style comments** (F#, OCaml): +- `(* Implements [AUTH-TOKEN-VERIFY] *)` + +**Lua comments:** +- `-- Implements [AUTH-TOKEN-VERIFY]` + +**CSS comments:** +- `/* Implements [AUTH-TOKEN-VERIFY] */` + +**The key rule:** any comment in any language containing the exact spec ID string (e.g., `[AUTH-TOKEN-VERIFY]`) counts as a valid code reference. The Grep search uses the literal spec ID string, so it naturally matches all comment styles. Do NOT restrict the search to specific comment prefixes — just search for the spec ID string itself. + **If NO code files reference the spec ID:** ``` @@ -118,12 +161,72 @@ this spec section, then re-run spec-check. #### Check B: Tests reference the spec ID Search test files for the spec ID. Test files are found in: -- `src/test/` +- `test/` +- `tests/` - `**/*.test.*` - `**/*.spec.*` +- `**/*_test.*` +- `**/test_*.*` +- `**/*Tests.*` +- `**/*Test.*` Use Grep to search these locations for the literal spec ID string. +Tests should contain the spec ID in comments, test names, or annotations. The search must catch **all** test frameworks across languages: + +**JavaScript/TypeScript** (Jest, Mocha, Vitest, Playwright): +- `// Tests [AUTH-TOKEN-VERIFY]` +- `describe('[AUTH-TOKEN-VERIFY] Authentication flow', () => ...)` +- `test('[AUTH-TOKEN-VERIFY] should verify token', () => ...)` +- `it('[AUTH-TOKEN-VERIFY] verifies token', () => ...)` + +**Python** (pytest, unittest): +- `# Tests [AUTH-TOKEN-VERIFY]` +- `def test_auth_token_verify_flow():` +- `class TestAuthTokenVerify:` + +**Rust:** +- `// Tests [AUTH-TOKEN-VERIFY]` +- `#[test] // Tests [AUTH-TOKEN-VERIFY]` + +**C#** (xUnit, NUnit, MSTest): +- `// Tests [AUTH-TOKEN-VERIFY]` +- `[Fact] // Tests [AUTH-TOKEN-VERIFY]` +- `[Test] // Tests [AUTH-TOKEN-VERIFY]` +- `[TestMethod] // Tests [AUTH-TOKEN-VERIFY]` + +**F#** (xUnit, Expecto): +- `// Tests [AUTH-TOKEN-VERIFY]` +- `[] // Tests [AUTH-TOKEN-VERIFY]` +- `testCase "[AUTH-TOKEN-VERIFY] description" <| fun () ->` + +**Java/Kotlin** (JUnit, TestNG): +- `// Tests [AUTH-TOKEN-VERIFY]` +- `@Test // Tests [AUTH-TOKEN-VERIFY]` + +**Go:** +- `// Tests [AUTH-TOKEN-VERIFY]` +- `func TestAuthTokenVerify(t *testing.T) { // Tests [AUTH-TOKEN-VERIFY]` + +**Swift** (XCTest): +- `// Tests [AUTH-TOKEN-VERIFY]` +- `func testAuthTokenVerify() { // Tests [AUTH-TOKEN-VERIFY]` + +**Dart** (flutter_test): +- `// Tests [AUTH-TOKEN-VERIFY]` +- `test('[AUTH-TOKEN-VERIFY] description', () { ... });` + +**Ruby** (RSpec, Minitest): +- `# Tests [AUTH-TOKEN-VERIFY]` +- `describe '[AUTH-TOKEN-VERIFY] Authentication' do` +- `it '[AUTH-TOKEN-VERIFY] verifies token' do` + +**Shell** (bats, shunit2): +- `# Tests [AUTH-TOKEN-VERIFY]` +- `@test "[AUTH-TOKEN-VERIFY] description" {` + +**The key rule:** same as Check A — search for the literal spec ID string in test files. Any occurrence of the exact spec ID in a test file counts. Do NOT restrict to specific patterns — just search for the spec ID string itself. + **If NO test files reference the spec ID:** ``` @@ -153,7 +256,26 @@ This is the most critical check. You must: - **Missing steps** — If the spec describes 5 steps but code only implements 3, that's a violation. - **Wrong defaults** — If the spec says "default to X" but code defaults to Y, that's a violation. -4. **If the code deviates from the spec**, report a detailed error with spec quotes and code references. +4. **If the code deviates from the spec**, report a detailed error: + +``` +SPEC VIOLATION: [AUTH-TOKEN-VERIFY] Code does not match spec. + +SPEC SAYS: +> "The authentication flow must verify the token expiry before checking permissions" +> (from docs/specs/AUTH-SPEC.md, line 42) + +CODE DOES: +> `if (hasPermission(user)) { verifyToken(token); }` (src/auth.ts:42) + +DEVIATION: The code checks permissions BEFORE verifying token expiry. +The spec explicitly requires token expiry verification FIRST. + +ACTION REQUIRED: Reorder the logic in src/auth.ts to verify token expiry +before checking permissions, as specified in [AUTH-TOKEN-VERIFY]. +``` + +**STOP HERE. Do not continue to other specs.** 5. **If the code matches the spec**, this check passes. Move to the next spec. @@ -180,6 +302,8 @@ spec-check PASSED. All specs verified. | Spec ID | Title | Code References | Test References | Logic Match | |----------------|--------------------------|-----------------|-----------------|-------------| | [AUTH-TOKEN-VERIFY] | Authentication flow | src/auth.ts | tests/auth.test.ts | PASS | +| [RATE-LIMIT-CONFIG] | Rate limiting | src/rate.ts | tests/rate.test.ts | PASS | +| ... | ... | ... | ... | ... | Checked N spec sections across M files. All have implementing code, tests, and matching logic. ``` @@ -199,7 +323,7 @@ Checked N spec sections across M files. All have implementing code, tests, and m - **Fail fast.** Stop on the first violation. One fix at a time. - **Be pedantic.** If the spec says it, the code must do it. No "close enough". -- **Quote everything.** Always quote the spec text and the code in error messages. +- **Quote everything.** Always quote the spec text and the code in error messages so the developer sees exactly what's wrong. - **Be actionable.** Every error must tell the developer what file to change and what to do. -- **Exclude docs from code search.** Markdown files are documentation, not implementation. -- **No numbered IDs.** Spec IDs are hierarchical descriptive slugs, NEVER sequential numbers. +- **Exclude docs from code search.** Markdown files are documentation, not implementation. Only search actual code files for spec references. +- **No numbered IDs.** Spec IDs are hierarchical descriptive slugs (`[AUTH-TOKEN-VERIFY]`), NEVER sequential numbers (`[SPEC-001]`). The first word is the group — sections sharing a group must be adjacent in the TOC. If you encounter numbered or ungrouped IDs, flag them as a violation. diff --git a/.claude/skills/submit-pr/SKILL.md b/.claude/skills/submit-pr/SKILL.md index cf7f14b..0263a52 100644 --- a/.claude/skills/submit-pr/SKILL.md +++ b/.claude/skills/submit-pr/SKILL.md @@ -2,9 +2,8 @@ name: submit-pr description: Creates a pull request with a well-structured description after verifying CI passes. Use when the user asks to submit, create, or open a pull request. disable-model-invocation: true -allowed-tools: Bash(git *), Bash(gh *) --- - + # Submit PR @@ -12,10 +11,12 @@ Create a pull request for the current branch with a well-structured description. ## Steps +*NOTE: if you already ran make ci in this session and it passed, you can skip step 1.* + 1. Run `make ci` — must pass completely before creating PR -2. **Generate the diff against main.** Run `git diff main...HEAD > /tmp/pr-diff.txt` to capture the full diff between the current branch and the head of main. This is the ONLY source of truth for what the PR contains. **Warning:** the diff can be very large. If the diff file exceeds context limits, process it in chunks rather than trying to load it all at once. +2. **Generate the diff against main.** Run `git diff main...HEAD > /tmp/pr-diff.txt` to capture the full diff between the current branch and the head of main. This is the ONLY source of truth for what the PR contains. **Warning:** the diff can be very large. If the diff file exceeds context limits, process it in chunks (e.g., read sections with `head`/`tail` or split by file) rather than trying to load it all at once. 3. **Derive the PR title and description SOLELY from the diff.** Read the diff output and summarize what changed. Ignore commit messages, branch names, and any other metadata — only the actual code/content diff matters. -4. Write PR body using the template in `.github/pull_request_template.md` +4. Write PR body using the template in `.github/PULL_REQUEST_TEMPLATE.md` 5. Fill in (based on the diff analysis from step 3): - TLDR: one sentence - What Was Added: new files, features, deps @@ -30,7 +31,6 @@ Create a pull request for the current branch with a well-structured description. - Never create a PR if `make ci` fails - PR description must be specific and tight — no vague placeholders - Link to the relevant GitHub issue if one exists -- DO NOT include yourself as a coauthor! ## Success criteria diff --git a/.claude/skills/upgrade-packages/SKILL.md b/.claude/skills/upgrade-packages/SKILL.md index 2fc3294..11826ed 100644 --- a/.claude/skills/upgrade-packages/SKILL.md +++ b/.claude/skills/upgrade-packages/SKILL.md @@ -1,9 +1,9 @@ --- name: upgrade-packages -description: Upgrade all dependencies/packages to their latest versions. Use when the user says "upgrade packages", "update dependencies", "bump versions", "update packages", or "upgrade deps". +description: Upgrade all dependencies/packages to their latest versions for the detected language(s). Use when the user says "upgrade packages", "update dependencies", "bump versions", "update packages", or "upgrade deps". argument-hint: "[--check-only] [--major] [package-name]" --- - + # Upgrade Packages @@ -15,61 +15,241 @@ Upgrade all project dependencies to their latest compatible (or latest major, if - `--major` — Include major version bumps (breaking changes). Without this flag, stay within semver-compatible ranges. - Any other argument is treated as a specific package name to upgrade (instead of all packages). -## Step 1 — Detect package manager +## Step 1 — Detect language and package manager -This is a TypeScript/Node.js project using npm (`package-lock.json`). +Inspect the repo root and subdirectories for manifest files. Identify ALL that apply: + +| Manifest file | Language | Package manager | +|---|---|---| +| `Cargo.toml` | Rust | cargo | +| `package.json` | Node.js / TypeScript | npm / yarn / pnpm (check lockfile) | +| `pyproject.toml` | Python | pip / uv / poetry (check `[build-system]` or `[tool.poetry]`) | +| `requirements.txt` | Python | pip | +| `setup.py` / `setup.cfg` | Python | pip | +| `pubspec.yaml` | Dart / Flutter | pub | +| `*.csproj` / `*.fsproj` / `*.sln` | C# / F# | NuGet (dotnet) | +| `Directory.Build.props` | C# / F# | NuGet (dotnet) | +| `go.mod` | Go | go modules | +| `Gemfile` | Ruby | bundler | +| `composer.json` | PHP | composer | +| `build.gradle` / `build.gradle.kts` | Java / Kotlin | gradle | +| `pom.xml` | Java | maven | + +If multiple languages are present, process each one in order. + +**If you cannot detect any manifest file, stop and tell the user.** ## Step 2 — List outdated packages +Run the appropriate command to list what's outdated BEFORE upgrading anything. Show the user what will change. + +### Rust +```bash +cargo outdated # install: cargo install cargo-outdated +cargo update --dry-run +``` +**Read the docs:** https://doc.rust-lang.org/cargo/commands/cargo-update.html + +### Node.js (npm) ```bash npm outdated ``` +If using yarn: `yarn outdated`. If using pnpm: `pnpm outdated`. + +**Read the docs:** +- npm: https://docs.npmjs.com/cli/v10/commands/npm-update +- yarn: https://yarnpkg.com/cli/up +- pnpm: https://pnpm.io/cli/update + +### Python (pip) +```bash +pip list --outdated +``` +If using uv: `uv pip list --outdated`. If using poetry: `poetry show --outdated`. + +**Read the docs:** +- pip: https://pip.pypa.io/en/stable/cli/pip_install/#cmdoption-U +- uv: https://docs.astral.sh/uv/reference/cli/#uv-pip-install +- poetry: https://python-poetry.org/docs/cli/#update + +### Dart / Flutter +```bash +dart pub outdated +# or for Flutter projects: +flutter pub outdated +``` +**Read the docs:** https://dart.dev/tools/pub/cmd/pub-outdated + +### C# / F# (NuGet) +```bash +dotnet list package --outdated +``` +For transitive dependencies too: `dotnet list package --outdated --include-transitive` -**Read the docs:** https://docs.npmjs.com/cli/v10/commands/npm-update +**Read the docs:** https://learn.microsoft.com/en-us/dotnet/core/tools/dotnet-list-package + +### Go +```bash +go list -m -u all +``` +**Read the docs:** https://go.dev/ref/mod#go-get + +### Ruby (Bundler) +```bash +bundle outdated +``` +**Read the docs:** https://bundler.io/man/bundle-update.1.html + +### PHP (Composer) +```bash +composer outdated +``` +**Read the docs:** https://getcomposer.org/doc/03-cli.md#update-u-upgrade + +### Java / Kotlin (Gradle) +```bash +./gradlew dependencyUpdates # requires ben-manes/gradle-versions-plugin +``` +**Read the docs:** https://docs.gradle.org/current/userguide/dependency_management.html + +### Java (Maven) +```bash +mvn versions:display-dependency-updates +``` +**Read the docs:** https://www.mojohaus.org/versions/versions-maven-plugin/display-dependency-updates-mojo.html If `--check-only` was passed, **stop here** and report the outdated list. ## Step 3 — Read the official upgrade docs -**Before running any upgrade command, fetch and read the official documentation URL above.** Use WebFetch. Do not guess at flags. +**Before running any upgrade command, you MUST fetch and read the official documentation URL listed above for the detected package manager.** Use WebFetch to retrieve the page. This ensures you use the correct flags and understand the behavior. Do not guess at flags or options from memory. ## Step 4 — Upgrade packages +Run the upgrade. If a specific package name was given as an argument, upgrade only that package. + +### Rust +```bash +cargo update # semver-compatible updates +# --major flag: +cargo update --breaking # major version bumps (cargo 1.84+) +``` +For workspace members, run from workspace root. + +### Node.js (npm) +```bash +npm update # semver-compatible (within package.json ranges) +# --major flag: +npx npm-check-updates -u && npm install # bump package.json to latest majors +``` +If using yarn: `yarn up` / `yarn up -R '**'`. If using pnpm: `pnpm update` / `pnpm update --latest`. + +### Python (pip) +For `requirements.txt`: +```bash +pip install --upgrade -r requirements.txt +pip freeze > requirements.txt # pin new versions +``` +For `pyproject.toml` with pip: update version specifiers manually, then `pip install -e ".[dev]"`. +For uv: `uv pip install --upgrade -r requirements.txt` or `uv lock --upgrade`. +For poetry: `poetry update` / `poetry update --latest` (with `--major` flag). + +### Dart / Flutter ```bash -npm update # semver-compatible +dart pub upgrade # semver-compatible # --major flag: -npx npm-check-updates -u && npm install # bump to latest majors +dart pub upgrade --major-versions # bump to latest majors +``` +For Flutter: replace `dart` with `flutter`. + +### C# / F# (NuGet) +There is NO single `dotnet upgrade-all` command. You must upgrade each package individually: +```bash +# For each outdated package from Step 2: +dotnet add package # upgrades to latest +# Or with specific version: +dotnet add package --version +``` +For `Directory.Build.props`, edit the version numbers directly in the XML. + +**Read the docs:** https://learn.microsoft.com/en-us/dotnet/core/tools/dotnet-add-package + +Alternatively, use the dotnet-outdated global tool: +```bash +dotnet tool install --global dotnet-outdated-tool +dotnet outdated --upgrade +``` +**Read the docs:** https://github.com/dotnet-outdated/dotnet-outdated + +### Go +```bash +go get -u ./... # update all dependencies +go mod tidy # clean up go.sum +``` +For a specific package: `go get -u @latest`. + +### Ruby (Bundler) +```bash +bundle update # all gems +# specific gem: +bundle update +``` + +### PHP (Composer) +```bash +composer update # all packages +# specific package: +composer update +``` +With `--major`: edit `composer.json` version constraints first, then `composer update`. + +### Java / Kotlin (Gradle) +Edit version numbers in `build.gradle` / `build.gradle.kts` / version catalogs (`libs.versions.toml`), then: +```bash +./gradlew dependencies # verify resolution +``` + +### Java (Maven) +```bash +mvn versions:use-latest-releases # update pom.xml to latest releases +mvn versions:commit # remove backup pom ``` ## Step 5 — Verify the upgrade +After upgrading, run the project's build and test suite to confirm nothing broke: + ```bash make ci ``` +If `make ci` is not available, run whatever build/test commands the project uses (check the Makefile, CI workflow, or CLAUDE.md). + If tests fail: 1. Read the failure output carefully -2. Check the changelog / migration guide for the upgraded packages +2. Check the changelog / migration guide for the upgraded packages (fetch the release notes URL if available) 3. Fix breaking changes in the code 4. Re-run tests -5. If stuck after 3 attempts on the same failure, report it to the user +5. If stuck after 3 attempts on the same failure, report it to the user with the error details and the package that caused it ## Step 6 — Report +Provide a summary: + - Packages upgraded (old version -> new version) -- Packages skipped (and why) +- Packages skipped (and why, e.g., major version bump without `--major` flag) - Build/test result after upgrade - Any breaking changes that were fixed -- Any packages that could not be upgraded +- Any packages that could not be upgraded (with error details) ## Rules - **Always list outdated packages first** before upgrading anything -- **Always read the official docs** before running upgrade commands +- **Always read the official docs** for the package manager before running upgrade commands - **Always run tests after upgrading** to catch breakage immediately -- **Never remove packages** unless explicitly deprecated and replaced +- **Never remove packages** unless they were explicitly deprecated and replaced - **Never downgrade packages** unless rolling back a broken upgrade -- **Never modify lockfiles manually** — let npm regenerate them +- **Never modify lockfiles manually** (package-lock.json, yarn.lock, Cargo.lock, etc.) — let the package manager regenerate them - **Commit nothing** — leave changes in the working tree for the user to review ## Success criteria diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 1952241..b0576ec 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,48 +1,68 @@ +# agent-pmo:424c8f8 name: CI on: pull_request: branches: [main] +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + jobs: ci: + name: CI runs-on: ubuntu-latest + timeout-minutes: 10 steps: - uses: actions/checkout@v4 - uses: actions/setup-node@v4 with: - node-version: 20 - cache: npm + node-version: '20' + cache: 'npm' - run: npm ci - - name: Format - run: make format + - name: Format check + run: make fmt CHECK=1 - name: Lint run: make lint - - name: Spell check - run: make spellcheck + # make test = fail-fast + coverage + threshold enforcement ([TEST-RULES]). + # Thresholds live in coverage-thresholds.json ([COVERAGE-THRESHOLDS-JSON]). + - name: Test + run: make test-exclude-ci + + - name: Upload coverage + uses: actions/upload-artifact@v4 + if: always() + with: + name: coverage-report + path: coverage/ + retention-days: 7 - name: Build run: make build - - name: Test - run: make test EXCLUDE_CI=true - - name: Package run: make package + website: + name: Website tests runs-on: ubuntu-latest + # TIMEOUT EXCEPTION: Playwright install + browser tests commonly exceed 10 min + timeout-minutes: 20 steps: - uses: actions/checkout@v4 - uses: actions/setup-node@v4 with: - node-version: 20 + node-version: '20' + cache: 'npm' + cache-dependency-path: website/package-lock.json - name: Install dependencies run: npm ci diff --git a/.vscode-test.mjs b/.vscode-test.mjs index e3a589c..86d39be 100644 --- a/.vscode-test.mjs +++ b/.vscode-test.mjs @@ -27,7 +27,6 @@ export default defineConfig({ slow: 10000 }, launchArgs: [ - '--disable-gpu', '--user-data-dir', userDataDir ] }], diff --git a/.vscode/extensions.json b/.vscode/extensions.json new file mode 100644 index 0000000..3983fa0 --- /dev/null +++ b/.vscode/extensions.json @@ -0,0 +1,8 @@ +{ + "_agent_pmo": "424c8f8", + "recommendations": [ + "nimblesite.commandtree", + "nimblesite.too-many-cooks", + "nimblesite.typeDiagram" + ] +} diff --git a/Agents.md b/Agents.md index 2bc847e..e186551 100644 --- a/Agents.md +++ b/Agents.md @@ -1,4 +1,2 @@ - -# Agents - -See [CLAUDE.md](CLAUDE.md) for all project instructions, coding rules, testing guidelines, and command reference. + +@CLAUDE.md diff --git a/Claude.md b/Claude.md index 4ea22e9..b70a591 100644 --- a/Claude.md +++ b/Claude.md @@ -1,8 +1,10 @@ - + # CommandTree — Agent Instructions ⚠️ CRITICAL: **Reduce token usage.** Check file size before loading. Write less. Delete fluff and dead code. Alert user when context is loaded with pointless files. ⚠️ +⚠️ ASKING THE USER IF THEY WANT TO PROCEED, OR TO CLARIFY IS ⛔️ ILLEGAL. JUST DO IT!! ⚠️ + > Read this entire file before writing any code. > These rules are NON-NEGOTIABLE. Violations will be rejected in review. @@ -133,22 +135,22 @@ A "fake test" is any test that passes without actually verifying behavior: ## Build Commands (cross-platform via GNU Make) +Seven standard targets — see REPO-STANDARDS-SPEC [MAKE-TARGETS]: + ```bash -make build # compile everything -make test # run tests with coverage -make lint # run all linters -make fmt # format all code -make fmt-check # check formatting (CI uses this) -make clean # remove build artifacts -make check # lint + test (pre-commit) -make ci # fmt-check + lint + spellcheck + test + build + package -make coverage # generate and open coverage report -make coverage-check # assert coverage thresholds -make spellcheck # run cspell spell checker -make package # build VSIX package -make setup # post-create dev environment setup +make build # compile TypeScript +make test # FAIL-FAST tests + coverage + threshold enforcement (the only test entry point) +make lint # ESLint + cspell (read-only, no formatting) +make fmt # Prettier format in-place; `make fmt CHECK=1` for read-only check +make clean # remove build artifacts +make ci # lint + test + build +make setup # post-create dev environment setup ``` +Repo-specific: `make package` builds the VSIX. + +Coverage thresholds live in `coverage-thresholds.json` (`default_threshold`) — the single source of truth per [COVERAGE-THRESHOLDS-JSON]. Never set thresholds via env vars, GitHub repo variables, or CI YAML. Ratchet only — never lower. + ## Critical Docs ### VS Code SDK diff --git a/Makefile b/Makefile index 622e204..abe3f88 100644 --- a/Makefile +++ b/Makefile @@ -1,14 +1,14 @@ -# agent-pmo:5547fd2 +# agent-pmo:424c8f8 # ============================================================================= # Standard Makefile — CommandTree # Cross-platform: Linux, macOS, Windows (via GNU Make) # ============================================================================= -.PHONY: build test lint fmt fmt-check format clean check ci coverage coverage-check setup spellcheck package +.PHONY: build test lint fmt clean ci setup package test-exclude-ci help -# ----------------------------------------------------------------------------- +# --------------------------------------------------------------------------- # OS Detection -# ----------------------------------------------------------------------------- +# --------------------------------------------------------------------------- ifeq ($(OS),Windows_NT) SHELL := powershell.exe .SHELLFLAGS := -NoProfile -Command @@ -20,142 +20,94 @@ else MKDIR = mkdir -p endif -# Coverage threshold (override in CI via env var) -COVERAGE_THRESHOLD ?= 80 +# --------------------------------------------------------------------------- +# Coverage — single source of truth is coverage-thresholds.json +# See REPO-STANDARDS-SPEC [COVERAGE-THRESHOLDS-JSON]. +# --------------------------------------------------------------------------- +COVERAGE_THRESHOLDS_FILE := coverage-thresholds.json + +UNAME := $(shell uname 2>/dev/null) +VSCODE_TEST_CMD = npx vscode-test --coverage +VSCODE_TEST_EXCLUDE_CMD = npx vscode-test --coverage --grep @exclude-ci --invert +ifeq ($(UNAME),Linux) +VSCODE_TEST = xvfb-run -a $(VSCODE_TEST_CMD) +VSCODE_TEST_EXCLUDE = xvfb-run -a $(VSCODE_TEST_EXCLUDE_CMD) +else +VSCODE_TEST = $(VSCODE_TEST_CMD) +VSCODE_TEST_EXCLUDE = $(VSCODE_TEST_EXCLUDE_CMD) +endif # ============================================================================= -# PRIMARY TARGETS (uniform interface — do not rename) +# Standard Targets (exactly 7 — see REPO-STANDARDS-SPEC [MAKE-TARGETS]) # ============================================================================= ## build: Compile/assemble all artifacts build: @echo "==> Building..." - $(MAKE) _build + npx tsc -p ./ -## test: Run full test suite with coverage -test: - @echo "==> Testing..." - $(MAKE) _test +## test: Fail-fast tests + coverage + threshold enforcement ([TEST-RULES]). +test: build + @echo "==> Testing (fail-fast + coverage + threshold)..." + npm run test:unit + $(VSCODE_TEST) + $(MAKE) _coverage_check -## lint: Run all linters (fails on any warning) +## lint: Run all linters/analyzers (read-only). Does NOT format. lint: @echo "==> Linting..." - $(MAKE) _lint + npx eslint src + npx cspell "src/**/*.ts" -## fmt: Format all code in-place +## fmt: Format all code in-place. Pass CHECK=1 for read-only check mode. fmt: - @echo "==> Formatting..." - $(MAKE) _fmt - -## fmt-check: Check formatting without modifying -fmt-check: - @echo "==> Checking format..." - $(MAKE) _fmt_check + @echo "==> Formatting$(if $(CHECK), (check mode),)..." + npx prettier $(if $(CHECK),--check,--write) "src/**/*.ts" ## clean: Remove all build artifacts clean: @echo "==> Cleaning..." - $(MAKE) _clean - -## check: lint + test (pre-commit) -check: lint test + $(RM) out coverage .vscode-test ## ci: lint + test + build (full CI simulation) -ci: fmt-check lint spellcheck test build package - -## coverage: Generate coverage report -coverage: - @echo "==> Coverage report..." - $(MAKE) _coverage - -## coverage-check: Assert thresholds (exits non-zero if below) -coverage-check: - @echo "==> Checking coverage thresholds..." - $(MAKE) _coverage_check +ci: lint test build -## setup: Post-create dev environment setup +## setup: Post-create dev environment setup (devcontainer hook) setup: @echo "==> Setting up development environment..." - $(MAKE) _setup + npm ci @echo "==> Setup complete. Run 'make ci' to validate." +# Private recipe — called from `test`. Do not expose as a public target. +_coverage_check: + node tools/check-coverage.mjs + # ============================================================================= -# CUSTOM TARGETS (project-specific) +# Repo-Specific Targets # ============================================================================= -## format: Alias for fmt (backwards compatibility) -format: fmt - -## spellcheck: Run cspell spell checker -spellcheck: - npx cspell "src/**/*.ts" - ## package: Build VSIX package package: build npx vsce package -# ============================================================================= -# TYPESCRIPT/NODE IMPLEMENTATION -# ============================================================================= - -UNAME := $(shell uname 2>/dev/null) -EXCLUDE_CI ?= false - -VSCODE_TEST_CMD = npx vscode-test --coverage -ifeq ($(EXCLUDE_CI),true) -VSCODE_TEST_CMD += --grep @exclude-ci --invert -endif - -ifeq ($(UNAME),Linux) -VSCODE_TEST = xvfb-run -a $(VSCODE_TEST_CMD) -else -VSCODE_TEST = $(VSCODE_TEST_CMD) -endif - -_build: - npx tsc -p ./ - -_test: _build +## test-exclude-ci: Run tests EXCLUDING those tagged @exclude-ci (fail-fast + coverage + threshold) +test-exclude-ci: build + @echo "==> Testing (excluding @exclude-ci, fail-fast + coverage + threshold)..." npm run test:unit - $(VSCODE_TEST) - node tools/check-coverage.mjs - -_lint: - npx eslint src - -_fmt: - npx prettier --write "src/**/*.ts" - -_fmt_check: - npx prettier --check "src/**/*.ts" - -_clean: - $(RM) out coverage .vscode-test - -_coverage: - @echo "==> HTML report: coverage/index.html" - -_coverage_check: - node tools/check-coverage.mjs - -_setup: - npm ci + $(VSCODE_TEST_EXCLUDE) + $(MAKE) _coverage_check -# ============================================================================= -# HELP -# ============================================================================= +## help: List available targets help: - @echo "Available targets:" - @echo " build - Compile/assemble all artifacts" - @echo " test - Run full test suite with coverage" - @echo " lint - Run all linters (errors mode)" - @echo " fmt - Format all code in-place" - @echo " fmt-check - Check formatting (no modification)" - @echo " clean - Remove build artifacts" - @echo " check - lint + test (pre-commit)" - @echo " ci - fmt-check + lint + spellcheck + test + build + package" - @echo " coverage - Generate and open coverage report" - @echo " coverage-check - Assert coverage thresholds" - @echo " spellcheck - Run cspell spell checker" - @echo " package - Build VSIX package" - @echo " setup - Post-create dev environment setup" + @echo "Standard targets:" + @echo " build - Compile TypeScript" + @echo " test - Fail-fast tests + coverage + threshold enforcement" + @echo " lint - ESLint + cspell (read-only)" + @echo " fmt - Prettier (CHECK=1 for verify-only)" + @echo " clean - Remove build artifacts" + @echo " ci - lint + test + build" + @echo " setup - Post-create dev environment setup" + @echo "" + @echo "Repo-specific:" + @echo " package - Build VSIX package" + @echo " test-exclude-ci - Run tests excluding those tagged @exclude-ci" diff --git a/coverage-thresholds.json b/coverage-thresholds.json index cfbb862..aabde1d 100644 --- a/coverage-thresholds.json +++ b/coverage-thresholds.json @@ -1,6 +1,6 @@ { - "lines": 83, - "functions": 80.81, - "branches": 76.9, - "statements": 83 + "lines": 84.88, + "functions": 84.07, + "branches": 79.83, + "statements": 84.88 } diff --git a/eslint.config.mjs b/eslint.config.mjs index d84ab11..3b76c3a 100644 --- a/eslint.config.mjs +++ b/eslint.config.mjs @@ -31,6 +31,14 @@ export default tseslint.config( rules: { // ALL RULES SET TO ERROR - NO WARNINGS + // Additional critical rules (NOT enabled by any preset) + "@typescript-eslint/no-deprecated": "error", + "@typescript-eslint/consistent-return": "error", + "@typescript-eslint/prefer-literal-enum-member": "error", + "@typescript-eslint/require-array-sort-compare": "error", + // Object destructuring only — `arr[0]` is idiomatic and noisier than a destructure. + "@typescript-eslint/prefer-destructuring": ["error", { array: false, object: true }], + // TypeScript strict rules "@typescript-eslint/no-explicit-any": "error", "@typescript-eslint/explicit-function-return-type": "error", diff --git a/src/CommandTreeProvider.ts b/src/CommandTreeProvider.ts index 3b4aa0a..e7045ae 100644 --- a/src/CommandTreeProvider.ts +++ b/src/CommandTreeProvider.ts @@ -1,4 +1,5 @@ import * as vscode from "vscode"; +import { isPhonyTask, isPrivateTask } from "./models/TaskItem"; import type { CommandItem, CategoryDef } from "./models/TaskItem"; import type { CommandTreeItem } from "./models/TaskItem"; import type { DiscoveryResult } from "./discovery"; @@ -6,7 +7,7 @@ import { discoverAllTasks, flattenTasks, getExcludePatterns, CATEGORY_DEFS } fro import { TagConfig } from "./config/TagConfig"; import { logger } from "./utils/logger"; import { buildNestedFolderItems } from "./tree/folderTree"; -import { createCommandNode, createCategoryNode } from "./tree/nodeFactory"; +import { createCategoryNode, createTaskNodes } from "./tree/nodeFactory"; import { getAllRows } from "./db/db"; import type { CommandRow } from "./db/db"; import { getDbOrThrow } from "./db/lifecycle"; @@ -166,7 +167,7 @@ export class CommandTreeProvider implements vscode.TreeDataProvider createCommandNode(t)); + const children = createTaskNodes(sorted); return createCategoryNode({ label: `${def.label} (${tasks.length})`, children, @@ -183,15 +184,45 @@ export class CommandTreeProvider implements vscode.TreeDataProvider number { const order = this.getSortOrder(); if (order === "folder") { - return (a, b) => a.category.localeCompare(b.category) || a.label.localeCompare(b.label); + return (a, b) => + a.category.localeCompare(b.category) || + this.comparePrivateTasks(a, b) || + this.compareMakeTaskPriority(a, b) || + a.label.localeCompare(b.label); } if (order === "type") { - return (a, b) => a.type.localeCompare(b.type) || a.label.localeCompare(b.label); + return (a, b) => + a.type.localeCompare(b.type) || + this.comparePrivateTasks(a, b) || + this.compareMakeTaskPriority(a, b) || + a.label.localeCompare(b.label); } - return (a, b) => a.label.localeCompare(b.label); + return (a, b) => + this.comparePrivateTasks(a, b) || this.compareMakeTaskPriority(a, b) || a.label.localeCompare(b.label); } private applyTagFilter(tasks: CommandItem[]): CommandItem[] { diff --git a/src/discovery/docker.ts b/src/discovery/docker.ts index 89e5ff4..e01a01a 100644 --- a/src/discovery/docker.ts +++ b/src/discovery/docker.ts @@ -164,9 +164,7 @@ function parseDockerComposeServices(content: string): string[] { continue; } const indent = leadingSpaces(line); - const result = processLine({ trimmed, indent, inServices, servicesIndent, services }); - inServices = result.inServices; - servicesIndent = result.servicesIndent; + ({ inServices, servicesIndent } = processLine({ trimmed, indent, inServices, servicesIndent, services })); } return services; diff --git a/src/discovery/make.ts b/src/discovery/make.ts index 9d2f23d..652185c 100644 --- a/src/discovery/make.ts +++ b/src/discovery/make.ts @@ -1,6 +1,6 @@ import * as vscode from "vscode"; import * as path from "path"; -import type { CommandItem, IconDef, CategoryDef } from "../models/TaskItem"; +import type { CommandItem, MutableCommandItem, IconDef, CategoryDef } from "../models/TaskItem"; import { generateCommandId, simplifyPath } from "../models/TaskItem"; import { readFileContent } from "../utils/fileUtils"; @@ -28,6 +28,7 @@ export async function discoverMakeTargets(workspaceRoot: string, excludePatterns for (const file of allFiles) { const content = await readFileContent(file); + const phonyTargets = parsePhonyTargets(content); const targets = parseMakeTargets(content); const makeDir = path.dirname(file.fsPath); const category = simplifyPath(file.fsPath, workspaceRoot); @@ -38,7 +39,7 @@ export async function discoverMakeTargets(workspaceRoot: string, excludePatterns continue; } - commands.push({ + const command: MutableCommandItem = { id: generateCommandId("make", file.fsPath, name), label: name, type: "make", @@ -48,7 +49,13 @@ export async function discoverMakeTargets(workspaceRoot: string, excludePatterns filePath: file.fsPath, tags: [], line, - }); + }; + + if (phonyTargets.has(name)) { + command.isPhony = true; + } + + commands.push(command); } } @@ -60,6 +67,54 @@ interface MakeTarget { readonly line: number; } +function addPhonyTargets(line: string, phonyTargets: Set): void { + for (const name of line.split(/\s+/)) { + if (name !== "") { + phonyTargets.add(name); + } + } +} + +function trimContinuation(line: string): string { + return line.endsWith("\\") ? line.slice(0, -1).trim() : line; +} + +function isContinuationLine(line: string): boolean { + return line.endsWith("\\"); +} + +function readPhonyLine(line: string): string | undefined { + const trimmed = line.trim(); + if (!trimmed.startsWith(".PHONY:")) { + return undefined; + } + return trimmed.slice(".PHONY:".length).trim(); +} + +function parsePhonyTargets(content: string): ReadonlySet { + const phonyTargets = new Set(); + let collecting = false; + + for (const line of content.split("\n")) { + const trimmed = line.trim(); + if (collecting) { + addPhonyTargets(trimContinuation(trimmed), phonyTargets); + collecting = isContinuationLine(trimmed); + continue; + } + + const phonyLine = readPhonyLine(line); + if (phonyLine === undefined) { + continue; + } + + addPhonyTargets(trimContinuation(phonyLine), phonyTargets); + collecting = isContinuationLine(phonyLine); + } + + return phonyTargets; +} + /** * Parses Makefile to extract target names and their line numbers. */ diff --git a/src/extension.ts b/src/extension.ts index bf31431..8888d5e 100644 --- a/src/extension.ts +++ b/src/extension.ts @@ -11,6 +11,7 @@ import { createVSCodeFileSystem } from "./semantic/vscodeAdapters"; import { forceSelectModel } from "./semantic/summariser"; import { syncTagsFromConfig } from "./tags/tagSync"; import { setupFileWatchers } from "./watchers"; +import { PrivateTaskDecorationProvider } from "./tree/PrivateTaskDecorationProvider"; let treeProvider: CommandTreeProvider; let quickTasksProvider: QuickTasksProvider; @@ -26,7 +27,7 @@ export async function activate(context: vscode.ExtensionContext): Promise undefined); } - private waitForShellIntegration(terminal: vscode.Terminal, command: string): void { - let resolved = false; - const listener = vscode.window.onDidChangeTerminalShellIntegration(({ terminal: t, shellIntegration }) => { - if (t === terminal && !resolved) { - resolved = true; - listener.dispose(); - this.safeSendText(terminal, command, shellIntegration); - } + private async executeWhenReady(terminal: vscode.Terminal, command: string): Promise { + await terminal.processId; + await new Promise((resolve) => { + setTimeout(resolve, XTERM_PAINT_DELAY_MS); + }); + const si = terminal.shellIntegration ?? (await this.awaitShellIntegration(terminal)); + this.safeSendText(terminal, command, si); + } + + private async awaitShellIntegration(terminal: vscode.Terminal): Promise { + return await new Promise((resolve) => { + let done = false; + const listener = vscode.window.onDidChangeTerminalShellIntegration(({ terminal: t, shellIntegration }) => { + if (t === terminal && !done) { + done = true; + listener.dispose(); + resolve(shellIntegration); + } + }); + setTimeout(() => { + if (!done) { + done = true; + listener.dispose(); + resolve(undefined); + } + }, SHELL_INTEGRATION_TIMEOUT_MS); }); - setTimeout(() => { - if (!resolved) { - resolved = true; - listener.dispose(); - this.safeSendText(terminal, command); - } - }, SHELL_INTEGRATION_TIMEOUT_MS); } /** @@ -225,7 +244,7 @@ export class TaskRunner { * Builds the full command string with formatted parameters. */ private buildCommand(task: CommandItem, params: Array<{ def: ParamDef; value: string }>): string { - let command = task.command; + let { command } = task; const parts: string[] = []; for (const { def, value } of params) { @@ -265,6 +284,10 @@ export class TaskRunner { case "dashdash-args": { return `-- ${value}`; } + default: { + const exhaustive: never = format; + return exhaustive; + } } } } diff --git a/src/test/e2e/commands.e2e.test.ts b/src/test/e2e/commands.e2e.test.ts index 8764d52..5cfeb46 100644 --- a/src/test/e2e/commands.e2e.test.ts +++ b/src/test/e2e/commands.e2e.test.ts @@ -335,7 +335,7 @@ suite("Commands and UI E2E Tests", () => { const packageJson = readPackageJson(); - const commands = packageJson.contributes.commands; + const { commands } = packageJson.contributes; const refreshCmd = commands.find((c) => c.command === "commandtree.refresh"); assert.ok(refreshCmd?.icon === "$(refresh)", "Refresh should have refresh icon"); diff --git a/src/test/e2e/configuration.e2e.test.ts b/src/test/e2e/configuration.e2e.test.ts index 9c1d465..a0506a7 100644 --- a/src/test/e2e/configuration.e2e.test.ts +++ b/src/test/e2e/configuration.e2e.test.ts @@ -105,8 +105,7 @@ suite("Configuration and File Watchers E2E Tests", () => { this.timeout(10000); const packageJson = readExtensionPackageJson(); - const enumDescriptions = - packageJson.contributes.configuration.properties["commandtree.sortOrder"].enumDescriptions; + const { enumDescriptions } = packageJson.contributes.configuration.properties["commandtree.sortOrder"]; assert.ok(enumDescriptions, "enumDescriptions should exist"); assert.ok(enumDescriptions.length === 3, "Should have 3 descriptions"); diff --git a/src/test/e2e/execution.e2e.test.ts b/src/test/e2e/execution.e2e.test.ts index 3f67ac1..67e0c05 100644 --- a/src/test/e2e/execution.e2e.test.ts +++ b/src/test/e2e/execution.e2e.test.ts @@ -9,6 +9,7 @@ import * as assert from "assert"; import * as vscode from "vscode"; import * as fs from "fs"; +import * as os from "os"; import * as path from "path"; import { activateExtension, sleep, getFixturePath, createMockTaskItem } from "../helpers/helpers"; import type { TestContext } from "../helpers/helpers"; @@ -17,6 +18,64 @@ interface PackageJson { scripts?: Record; } +interface RendererLogWatcher { + capture: () => string[]; +} + +function findCurrentRendererLog(): string | undefined { + // @vscode/test-cli sets --user-data-dir to .vscode-test/user-data; logs go under logs//window1/renderer.log + const searchRoots = [ + path.resolve(process.cwd(), ".vscode-test/user-data/logs"), + path.resolve(process.cwd(), "../.vscode-test/user-data/logs"), + path.resolve(process.cwd(), "../../.vscode-test/user-data/logs"), + ]; + for (const root of searchRoots) { + if (!fs.existsSync(root)) { + continue; + } + const sessions = fs + .readdirSync(root) + .filter((n) => /^\d{8}T\d{6}$/.test(n)) + .sort(); + for (let i = sessions.length - 1; i >= 0; i--) { + const logPath = path.join(root, sessions[i] ?? "", "window1", "renderer.log"); + if (fs.existsSync(logPath)) { + return logPath; + } + } + } + return undefined; +} + +function watchRendererLog(): RendererLogWatcher { + const logPath = findCurrentRendererLog(); + const baselineSize = logPath !== undefined && fs.existsSync(logPath) ? fs.statSync(logPath).size : 0; + return { + capture: (): string[] => { + if (logPath === undefined || !fs.existsSync(logPath)) { + throw new Error( + `Renderer log not found — cannot verify absence of xterm errors. Searched under .vscode-test/user-data/logs` + ); + } + const fd = fs.openSync(logPath, "r"); + try { + const currentSize = fs.statSync(logPath).size; + const length = Math.max(0, currentSize - baselineSize); + if (length === 0) { + return []; + } + const buf = Buffer.alloc(length); + fs.readSync(fd, buf, 0, length, baselineSize); + const text = buf.toString("utf8"); + const lines = text.split("\n"); + return lines.filter((l) => l.includes("dimensions") || l.includes("TypeError")); + } finally { + fs.closeSync(fd); + } + }, + }; +} + // Spec: command-execution suite("Command Execution E2E Tests", () => { let context: TestContext; @@ -139,7 +198,7 @@ suite("Command Execution E2E Tests", () => { this.timeout(10000); const packageJson = JSON.parse(fs.readFileSync(getFixturePath("package.json"), "utf8")) as PackageJson; - const scripts = packageJson.scripts; + const { scripts } = packageJson; assert.ok(scripts !== undefined, "Should have scripts object"); assert.ok(scripts["build"] !== undefined, "Should have build script"); @@ -622,7 +681,7 @@ suite("Command Execution E2E Tests", () => { // Spec: command-execution suite("Terminal Execution Modes", () => { test("runInCurrentTerminal creates terminal when none exists", async function () { - this.timeout(15000); + this.timeout(20000); for (const t of vscode.window.terminals) { t.dispose(); @@ -632,25 +691,73 @@ suite("Command Execution E2E Tests", () => { const initialCount = vscode.window.terminals.length; assert.strictEqual(initialCount, 0, "Should start with no terminals"); + const markerFile = path.join(os.tmpdir(), `commandtree-test-create-${Date.now()}-${Math.random()}.marker`); + if (fs.existsSync(markerFile)) { + fs.unlinkSync(markerFile); + } + const shellTask = createMockTaskItem({ type: "shell", label: "Create Terminal Test", - command: 'echo "terminal created"', + command: `echo created > "${markerFile}"`, cwd: context.workspaceRoot, filePath: path.join(context.workspaceRoot, "scripts/test.sh"), }); const commandTreeItem = { data: shellTask }; - await vscode.commands.executeCommand("commandtree.runInCurrentTerminal", commandTreeItem); - await sleep(1500); + const rendererLog = watchRendererLog(); + + const shellExecEvents: string[] = []; + const shellExecListener = vscode.window.onDidStartTerminalShellExecution((e) => { + shellExecEvents.push(e.execution.commandLine.value); + }); + const terminalStates: string[] = []; + const stateListener = vscode.window.onDidChangeTerminalState((t) => { + terminalStates.push(`${t.name}:interacted=${String(t.state.isInteractedWith)}`); + }); + + try { + await vscode.commands.executeCommand("commandtree.runInCurrentTerminal", commandTreeItem); + await sleep(5000); + } finally { + shellExecListener.dispose(); + stateListener.dispose(); + } const finalCount = vscode.window.terminals.length; assert.ok(finalCount >= 1, "Should create a terminal when none exists"); + assert.strictEqual(finalCount, 1, "Should create exactly one terminal when none existed"); assert.ok(vscode.window.activeTerminal !== undefined, "Created terminal should be active"); + assert.ok( + vscode.window.activeTerminal.name.startsWith("CommandTree:"), + `Active terminal should be the CommandTree one, got: ${vscode.window.activeTerminal.name}` + ); + assert.ok( + vscode.window.activeTerminal.name.includes("Create Terminal Test"), + `Active terminal name should include task label, got: ${vscode.window.activeTerminal.name}` + ); + assert.strictEqual(vscode.window.activeTerminal.exitStatus, undefined, "Terminal should still be running"); + + const rendererErrors = rendererLog.capture(); + assert.deepStrictEqual( + rendererErrors, + [], + `VS Code renderer.log must not contain xterm 'dimensions' or TypeError lines from this command. Got ${rendererErrors.length} line(s):\n${rendererErrors.join("\n")}` + ); + + assert.ok( + fs.existsSync(markerFile), + `Marker file should exist at ${markerFile} — proves the shell command actually executed. If sendText threw the xterm 'dimensions' TypeError, this file is never written.` + ); + const markerContents = fs.readFileSync(markerFile, "utf8").trim(); + assert.strictEqual(markerContents, "created", "Marker file should contain the echoed text 'created'"); + const markerStat = fs.statSync(markerFile); + assert.ok(markerStat.size > 0, "Marker file should be non-empty (shell command produced output)"); + fs.unlinkSync(markerFile); }); test("runInCurrentTerminal reuses existing active terminal", async function () { - this.timeout(15000); + this.timeout(20000); const existingTerminal = vscode.window.createTerminal("Existing Test Terminal"); existingTerminal.show(); @@ -658,20 +765,61 @@ suite("Command Execution E2E Tests", () => { const terminalCountBefore = vscode.window.terminals.length; + const markerFile = path.join(os.tmpdir(), `commandtree-test-reuse-${Date.now()}-${Math.random()}.marker`); + if (fs.existsSync(markerFile)) { + fs.unlinkSync(markerFile); + } + const shellTask = createMockTaskItem({ type: "shell", label: "Reuse Terminal Test", - command: 'echo "reusing terminal"', + command: `echo reused > "${markerFile}"`, cwd: context.workspaceRoot, filePath: path.join(context.workspaceRoot, "scripts/test.sh"), }); const commandTreeItem = { data: shellTask }; - await vscode.commands.executeCommand("commandtree.runInCurrentTerminal", commandTreeItem); - await sleep(1000); + const rendererLog = watchRendererLog(); + + const shellExecEvents: string[] = []; + const shellExecListener = vscode.window.onDidStartTerminalShellExecution((e) => { + shellExecEvents.push(e.execution.commandLine.value); + }); + + try { + await vscode.commands.executeCommand("commandtree.runInCurrentTerminal", commandTreeItem); + await sleep(5000); + } finally { + shellExecListener.dispose(); + } const terminalCountAfter = vscode.window.terminals.length; assert.strictEqual(terminalCountAfter, terminalCountBefore, "Should reuse existing terminal, not create new one"); + assert.ok(vscode.window.activeTerminal !== undefined, "An active terminal should exist after reuse"); + assert.strictEqual( + vscode.window.activeTerminal.name, + "Existing Test Terminal", + "Reused terminal must be the pre-existing one (its name is preserved — no new CommandTree terminal created)" + ); + + const rendererErrors = rendererLog.capture(); + assert.deepStrictEqual( + rendererErrors, + [], + `VS Code renderer.log must not contain xterm 'dimensions' or TypeError lines during terminal reuse. Got ${rendererErrors.length} line(s):\n${rendererErrors.join("\n")}` + ); + + assert.ok( + fs.existsSync(markerFile), + `Marker file should exist at ${markerFile} — proves the shell command actually executed in the reused terminal. If sendText threw the xterm 'dimensions' TypeError, this file is never written.` + ); + const markerContents = fs.readFileSync(markerFile, "utf8").trim(); + assert.strictEqual(markerContents, "reused", "Marker file should contain the echoed text 'reused'"); + const markerStat = fs.statSync(markerFile); + assert.ok(markerStat.size > 0, "Marker file should be non-empty (shell command produced output)"); + fs.unlinkSync(markerFile); + + existingTerminal.dispose(); }); test("new terminal has CommandTree prefix in name", async function () { @@ -695,7 +843,7 @@ suite("Command Execution E2E Tests", () => { await vscode.commands.executeCommand("commandtree.run", commandTreeItem); await sleep(3000); - const terminals = vscode.window.terminals; + const { terminals } = vscode.window; const commandTreeTerminal = terminals.find((t) => t.name.includes("CommandTree")); assert.ok( commandTreeTerminal !== undefined, diff --git a/src/test/e2e/markdown.e2e.test.ts b/src/test/e2e/markdown.e2e.test.ts index 7c690a4..3ff3c2d 100644 --- a/src/test/e2e/markdown.e2e.test.ts +++ b/src/test/e2e/markdown.e2e.test.ts @@ -80,7 +80,7 @@ suite("Markdown Discovery and Preview E2E Tests", () => { assert.ok(readmeItem, "Should find README.md item"); assert.ok(isCommandItem(readmeItem.data), "README.md must be a command node"); - const description = readmeItem.data.description; + const { description } = readmeItem.data; assert.ok(description !== undefined && description.length > 0, "Should have a description"); assert.ok(description.includes("Test Project Documentation"), "Description should come from first heading"); }); @@ -103,7 +103,7 @@ suite("Markdown Discovery and Preview E2E Tests", () => { assert.ok(readmeItem, "Should find README.md item"); assert.ok(isCommandItem(readmeItem.data), "README.md must be a command node"); - const filePath = readmeItem.data.filePath; + const { filePath } = readmeItem.data; assert.ok(filePath.length > 0, "Should have a file path"); assert.ok(filePath.endsWith("README.md"), "File path should end with README.md"); }); @@ -192,7 +192,7 @@ suite("Markdown Discovery and Preview E2E Tests", () => { assert.ok(readmeItem, "Should find README.md item"); - const contextValue = readmeItem.contextValue; + const { contextValue } = readmeItem; assert.ok(contextValue?.includes("markdown") === true, "Context value should include 'markdown'"); }); diff --git a/src/test/e2e/runner.e2e.test.ts b/src/test/e2e/runner.e2e.test.ts index ce9c8ae..88577bc 100644 --- a/src/test/e2e/runner.e2e.test.ts +++ b/src/test/e2e/runner.e2e.test.ts @@ -658,11 +658,11 @@ suite("Command Runner E2E Tests", () => { }); assert.ok(task.params !== undefined, "Should have params"); - const params = task.params; + const { params } = task; const param = params[0]; assert.ok(param !== undefined, "Should have param"); assert.ok(param.options !== undefined, "Param should have options"); - const options = param.options; + const { options } = param; assert.strictEqual(options.length, 3, "Should have 3 options"); }); @@ -963,7 +963,7 @@ suite("Command Runner E2E Tests", () => { await sleep(4000); assert.ok(vscode.window.terminals.length > 0, "Terminal should exist after running command"); - const activeTerminal = vscode.window.activeTerminal; + const { activeTerminal } = vscode.window; assert.ok(activeTerminal !== undefined, "Should have active terminal"); assert.strictEqual( activeTerminal.exitStatus, diff --git a/src/test/e2e/treeview.e2e.test.ts b/src/test/e2e/treeview.e2e.test.ts index 167e5d7..b16b64b 100644 --- a/src/test/e2e/treeview.e2e.test.ts +++ b/src/test/e2e/treeview.e2e.test.ts @@ -15,6 +15,9 @@ import { getLabelString, collectLeafItems, collectLeafTasks, + refreshTasks, + writeFile, + deleteFile, } from "../helpers/helpers"; import { type CommandTreeItem, isCommandItem } from "../../models/TaskItem"; @@ -245,4 +248,262 @@ suite("TreeView E2E Tests", () => { ); }); }); + + suite("Private Make And Mise Tasks", () => { + const makeRelativePath = "private-targets/Makefile"; + const miseRelativePath = "private-targets/mise.toml"; + const privateDivider = "─────────────────────────"; + const publicLabels = ["alpha_public", "zeta_public"]; + const privateLabels = ["_beta_private", "_omega_private"]; + + function getThemeColorId(item: CommandTreeItem): string | undefined { + const { iconPath } = item; + return iconPath instanceof vscode.ThemeIcon ? iconPath.color?.id : undefined; + } + + async function getItemsForFile(type: "make" | "mise", relativePath: string): Promise { + const provider = getCommandTreeProvider(); + const items = await collectLeafItems(provider); + return items.filter( + (item) => isCommandItem(item.data) && item.data.type === type && item.data.filePath.endsWith(relativePath) + ); + } + + async function getFolderChildrenForCategory( + categoryLabel: string, + folderLabel: string + ): Promise { + const provider = getCommandTreeProvider(); + const categories = await provider.getChildren(); + const category = categories.find((item) => getLabelString(item.label).includes(categoryLabel)); + assert.ok(category !== undefined, `Should find category ${categoryLabel}`); + + const children = await provider.getChildren(category); + const folder = children.find((item) => getLabelString(item.label) === folderLabel); + assert.ok(folder !== undefined, `Should find folder ${folderLabel}`); + + return await provider.getChildren(folder); + } + + setup(async function () { + this.timeout(15000); + + writeFile( + makeRelativePath, + [ + "alpha_public:", + "\t@echo alpha", + "", + "zeta_public:", + "\t@echo zeta", + "", + "_beta_private:", + "\t@echo beta", + "", + "_omega_private:", + "\t@echo omega", + ].join("\n") + ); + + writeFile( + miseRelativePath, + [ + "[tasks.alpha_public]", + 'run = "echo alpha"', + "", + "[tasks.zeta_public]", + 'run = "echo zeta"', + "", + "[tasks._beta_private]", + 'run = "echo beta"', + "", + "[tasks._omega_private]", + 'run = "echo omega"', + ].join("\n") + ); + + await refreshTasks(); + }); + + teardown(async function () { + this.timeout(15000); + deleteFile(makeRelativePath); + deleteFile(miseRelativePath); + await refreshTasks(); + }); + + test("make private targets sort after public ones and render muted", async function () { + this.timeout(15000); + + const items = await getItemsForFile("make", makeRelativePath); + const labels = items.map((item) => getLabelString(item.label)); + const folderChildren = await getFolderChildrenForCategory("Make Targets", "private-targets"); + const folderLabels = folderChildren.map((item) => getLabelString(item.label)); + + assert.deepStrictEqual( + folderLabels, + [...publicLabels, privateDivider, ...privateLabels], + "Make targets should insert a divider between public and _-prefixed private targets" + ); + + assert.deepStrictEqual( + labels, + [...publicLabels, ...privateLabels], + "Make targets should keep public targets first and move _-prefixed private targets below them" + ); + + const privateItems = items.filter((item) => privateLabels.includes(getLabelString(item.label))); + assert.strictEqual(privateItems.length, privateLabels.length, "Should find all private make targets"); + + for (const item of privateItems) { + const description = typeof item.description === "string" ? item.description : ""; + assert.ok( + description.includes("private"), + `Private make target ${getLabelString(item.label)} should be visibly marked as private` + ); + assert.strictEqual( + getThemeColorId(item), + "descriptionForeground", + `Private make target ${getLabelString(item.label)} should use a muted icon color` + ); + assert.strictEqual( + item.resourceUri?.scheme, + "commandtree-private", + `Private make target ${getLabelString(item.label)} must carry a private resourceUri so the label renders muted` + ); + } + }); + + test("mise private tasks sort after public ones and render muted", async function () { + this.timeout(15000); + + const items = await getItemsForFile("mise", miseRelativePath); + const labels = items.map((item) => getLabelString(item.label)); + const folderChildren = await getFolderChildrenForCategory("Mise Tasks", "private-targets"); + const folderLabels = folderChildren.map((item) => getLabelString(item.label)); + + assert.deepStrictEqual( + folderLabels, + [...publicLabels, privateDivider, ...privateLabels], + "Mise tasks should insert a divider between public and _-prefixed private tasks" + ); + + assert.deepStrictEqual( + labels, + [...publicLabels, ...privateLabels], + "Mise tasks should keep public tasks first and move _-prefixed private tasks below them" + ); + + const privateItems = items.filter((item) => privateLabels.includes(getLabelString(item.label))); + assert.strictEqual(privateItems.length, privateLabels.length, "Should find all private mise tasks"); + + for (const item of privateItems) { + const description = typeof item.description === "string" ? item.description : ""; + assert.ok( + description.includes("private"), + `Private mise task ${getLabelString(item.label)} should be visibly marked as private` + ); + assert.strictEqual( + getThemeColorId(item), + "descriptionForeground", + `Private mise task ${getLabelString(item.label)} should use a muted icon color` + ); + assert.strictEqual( + item.resourceUri?.scheme, + "commandtree-private", + `Private mise task ${getLabelString(item.label)} must carry a private resourceUri so the label renders muted` + ); + } + }); + }); + + suite("Make Target Conventions", () => { + const makeRelativePath = "make-conventions/Makefile"; + const privateDivider = "─────────────────────────"; + + async function getFolderChildrenForCategory( + categoryLabel: string, + folderLabel: string + ): Promise { + const provider = getCommandTreeProvider(); + const categories = await provider.getChildren(); + const category = categories.find((item) => getLabelString(item.label).includes(categoryLabel)); + assert.ok(category !== undefined, `Should find category ${categoryLabel}`); + + const children = await provider.getChildren(category); + const folder = children.find((item) => getLabelString(item.label) === folderLabel); + assert.ok(folder !== undefined, `Should find folder ${folderLabel}`); + + return await provider.getChildren(folder); + } + + async function getMakeItemsForFile(relativePath: string): Promise { + const provider = getCommandTreeProvider(); + const items = await collectLeafItems(provider); + return items.filter( + (item) => isCommandItem(item.data) && item.data.type === "make" && item.data.filePath.endsWith(relativePath) + ); + } + + setup(async function () { + this.timeout(15000); + + writeFile( + makeRelativePath, + [ + ".PHONY: help build _private", + "", + "aaa_file:", + '\t@echo "file target"', + "", + "help:", + '\t@echo "help target"', + "", + "build:", + '\t@echo "build target"', + "", + "%.o: %.c", + '\t@echo "pattern rule"', + "", + ".DEFAULT:", + '\t@echo "special target"', + "", + "_private:", + '\t@echo "private target"', + ].join("\n") + ); + + await refreshTasks(); + }); + + teardown(async function () { + this.timeout(15000); + deleteFile(makeRelativePath); + await refreshTasks(); + }); + + test("make help is pinned to the top, phony targets sort before non-phony ones, and special targets stay hidden", async function () { + this.timeout(15000); + + const folderChildren = await getFolderChildrenForCategory("Make Targets", "make-conventions"); + const folderLabels = folderChildren.map((item) => getLabelString(item.label)); + const items = await getMakeItemsForFile(makeRelativePath); + const labels = items.map((item) => getLabelString(item.label)); + + assert.deepStrictEqual( + folderLabels, + ["help", "build", "aaa_file", privateDivider, "_private"], + "Make targets should pin help first, prefer phony public targets over non-phony ones, and separate private targets" + ); + + assert.deepStrictEqual( + labels, + ["help", "build", "aaa_file", "_private"], + "Only invokable make targets should remain after hiding special and pattern rules" + ); + + assert.ok(!labels.includes("%.o"), "Pattern rules should be hidden from Make discovery"); + assert.ok(!labels.includes(".DEFAULT"), "Dot-prefixed special targets should be hidden from Make discovery"); + }); + }); }); diff --git a/src/test/helpers/helpers.ts b/src/test/helpers/helpers.ts index fde35e2..b834776 100644 --- a/src/test/helpers/helpers.ts +++ b/src/test/helpers/helpers.ts @@ -24,7 +24,7 @@ export async function activateExtension(): Promise { await extension.activate(); } - const workspaceFolders = vscode.workspace.workspaceFolders; + const { workspaceFolders } = vscode.workspace; if (!workspaceFolders || workspaceFolders.length === 0) { throw new Error("No workspace folder open"); } @@ -66,7 +66,7 @@ export async function sleep(ms: number): Promise { } export function getFixturePath(relativePath: string): string { - const workspaceFolders = vscode.workspace.workspaceFolders; + const { workspaceFolders } = vscode.workspace; if (!workspaceFolders || workspaceFolders.length === 0) { throw new Error("No workspace folder open"); } diff --git a/src/test/unit/taskRunner.unit.test.ts b/src/test/unit/taskRunner.unit.test.ts index 9811898..abe52cb 100644 --- a/src/test/unit/taskRunner.unit.test.ts +++ b/src/test/unit/taskRunner.unit.test.ts @@ -32,6 +32,10 @@ function formatParam(def: ParamDef, value: string): string { case "dashdash-args": { return `-- ${value}`; } + default: { + const exhaustive: never = format; + return exhaustive; + } } } diff --git a/src/tree/PrivateTaskDecorationProvider.ts b/src/tree/PrivateTaskDecorationProvider.ts new file mode 100644 index 0000000..c890558 --- /dev/null +++ b/src/tree/PrivateTaskDecorationProvider.ts @@ -0,0 +1,18 @@ +import * as vscode from "vscode"; + +export const PRIVATE_TASK_URI_SCHEME = "commandtree-private"; + +const PRIVATE_TASK_COLOR = new vscode.ThemeColor("descriptionForeground"); + +export class PrivateTaskDecorationProvider implements vscode.FileDecorationProvider { + public provideFileDecoration(uri: vscode.Uri): vscode.FileDecoration | undefined { + if (uri.scheme !== PRIVATE_TASK_URI_SCHEME) { + return undefined; + } + return { color: PRIVATE_TASK_COLOR, tooltip: "Private task" }; + } +} + +export function buildPrivateTaskUri(taskId: string): vscode.Uri { + return vscode.Uri.parse(`${PRIVATE_TASK_URI_SCHEME}:/${encodeURIComponent(taskId)}`); +} diff --git a/src/tree/folderTree.ts b/src/tree/folderTree.ts index d3b8433..3e6bcfb 100644 --- a/src/tree/folderTree.ts +++ b/src/tree/folderTree.ts @@ -2,7 +2,7 @@ import type { CommandItem } from "../models/TaskItem"; import type { CommandTreeItem } from "../models/TaskItem"; import type { DirNode } from "./dirTree"; import { groupByFullDir, buildDirTree, needsFolderWrapper, getFolderLabel } from "./dirTree"; -import { createCommandNode, createFolderNode } from "./nodeFactory"; +import { createFolderNode, createTaskNodes } from "./nodeFactory"; /** * Renders a DirNode as a folder CommandTreeItem. @@ -20,7 +20,7 @@ function renderFolder({ }): CommandTreeItem { const label = getFolderLabel(node.dir, parentDir); const folderId = `${parentTreeId}/${label}`; - const taskItems = sortTasks(node.tasks).map((t) => createCommandNode(t)); + const taskItems = createTaskNodes(sortTasks(node.tasks)); const subItems = node.subdirs.map((sub) => renderFolder({ node: sub, @@ -66,7 +66,7 @@ export function buildNestedFolderItems({ }) ); } - result.push(...sortTasks(node.tasks).map((t) => createCommandNode(t))); + result.push(...createTaskNodes(sortTasks(node.tasks))); } else if (needsFolderWrapper(node, rootNodes.length)) { result.push( renderFolder({ @@ -77,7 +77,7 @@ export function buildNestedFolderItems({ }) ); } else { - result.push(...sortTasks(node.tasks).map((t) => createCommandNode(t))); + result.push(...createTaskNodes(sortTasks(node.tasks))); } } diff --git a/src/tree/nodeFactory.ts b/src/tree/nodeFactory.ts index 0fdd319..6fe4a31 100644 --- a/src/tree/nodeFactory.ts +++ b/src/tree/nodeFactory.ts @@ -1,14 +1,25 @@ import * as vscode from "vscode"; +import { isPrivateTask } from "../models/TaskItem"; import type { CommandItem, CommandType, IconDef } from "../models/TaskItem"; import { CommandTreeItem } from "../models/TaskItem"; import { ICON_REGISTRY } from "../discovery"; +import { buildPrivateTaskUri } from "./PrivateTaskDecorationProvider"; const DEFAULT_FOLDER_ICON = new vscode.ThemeIcon("folder"); +const PRIVATE_TASK_COLOR = new vscode.ThemeColor("descriptionForeground"); +const PRIVATE_TASK_DIVIDER = "─────────────────────────"; function toThemeIcon(def: IconDef): vscode.ThemeIcon { return new vscode.ThemeIcon(def.icon, new vscode.ThemeColor(def.color)); } +function getTaskIcon(task: CommandItem): vscode.ThemeIcon { + if (isPrivateTask(task)) { + return new vscode.ThemeIcon(ICON_REGISTRY[task.type].icon, PRIVATE_TASK_COLOR); + } + return toThemeIcon(ICON_REGISTRY[task.type]); +} + function resolveContextValue(task: CommandItem): string { const isQuick = task.tags.includes("quick"); const isMarkdown = task.type === "markdown"; @@ -48,8 +59,9 @@ function buildTooltip(task: CommandItem): vscode.MarkdownString { } function buildDescription(task: CommandItem): string { + const privateMarker = isPrivateTask(task) ? " private" : ""; const tagStr = task.tags.length > 0 ? ` [${task.tags.join(", ")}]` : ""; - return `${task.category}${tagStr}`; + return `${task.category}${privateMarker}${tagStr}`; } export function createCommandNode(task: CommandItem): CommandTreeItem { @@ -62,7 +74,7 @@ export function createCommandNode(task: CommandItem): CommandTreeItem { id: task.id, contextValue: resolveContextValue(task), tooltip: buildTooltip(task), - iconPath: toThemeIcon(ICON_REGISTRY[task.type]), + iconPath: getTaskIcon(task), description: buildDescription(task), command: { command: "vscode.open", @@ -72,9 +84,21 @@ export function createCommandNode(task: CommandItem): CommandTreeItem { ? [vscode.Uri.file(task.filePath), { selection: new vscode.Range(task.line - 1, 0, task.line - 1, 0) }] : [vscode.Uri.file(task.filePath)], }, + ...(isPrivateTask(task) ? { resourceUri: buildPrivateTaskUri(task.id) } : {}), }); } +export function createTaskNodes(tasks: CommandItem[]): CommandTreeItem[] { + const firstPrivateIndex = tasks.findIndex((task) => isPrivateTask(task)); + if (firstPrivateIndex <= 0 || firstPrivateIndex === tasks.length) { + return tasks.map((task) => createCommandNode(task)); + } + + const publicNodes = tasks.slice(0, firstPrivateIndex).map((task) => createCommandNode(task)); + const privateNodes = tasks.slice(firstPrivateIndex).map((task) => createCommandNode(task)); + return [...publicNodes, createDividerNode(PRIVATE_TASK_DIVIDER), ...privateNodes]; +} + export function createCategoryNode({ label, children, @@ -122,3 +146,13 @@ export function createPlaceholderNode(message: string): CommandTreeItem { contextValue: "placeholder", }); } + +export function createDividerNode(label: string): CommandTreeItem { + return new CommandTreeItem({ + label, + data: { nodeType: "folder" }, + children: [], + id: `divider:${label}`, + contextValue: "divider", + }); +} diff --git a/tools/check-coverage.mjs b/tools/check-coverage.mjs index 4fe3ccf..f994b91 100644 --- a/tools/check-coverage.mjs +++ b/tools/check-coverage.mjs @@ -1,3 +1,4 @@ +// agent-pmo:424c8f8 import { readFileSync, existsSync, readdirSync, writeFileSync } from 'fs'; const METRICS = ['lines', 'functions', 'branches', 'statements'];