diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 99216af..0c31c4d 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -28,6 +28,23 @@ jobs: - name: Extract version id: get_version run: echo "VERSION=${GITHUB_REF#refs/tags/}" >> "$GITHUB_OUTPUT" + + - name: Verify release metadata matches tag + env: + TAG_NAME: ${{ steps.get_version.outputs.VERSION }} + run: | + set -euo pipefail + expected_version="${TAG_NAME#v}" + cargo_version="$(python3 -c "import tomllib; from pathlib import Path; print(tomllib.loads(Path('Cargo.toml').read_text())['package']['version'])")" + chart_app_version="$(python3 -c "import re; from pathlib import Path; c=Path('charts/diffscope/Chart.yaml').read_text(); m=re.search(r'^appVersion:\s*\"?(.*?)\"?\s*\$', c, re.MULTILINE); (exit('Chart.yaml missing appVersion') if not m else print(m.group(1)))")" + test "$cargo_version" = "$expected_version" || { + echo "Cargo.toml version ($cargo_version) does not match tag ($expected_version)" + exit 1 + } + test "$chart_app_version" = "$expected_version" || { + echo "Chart appVersion ($chart_app_version) does not match tag ($expected_version)" + exit 1 + } - name: Create Release id: create_release @@ -264,4 +281,4 @@ jobs: - name: Upload SBOM to release env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: gh release upload ${{ github.ref_name }} sbom-diffscope.spdx.json --clobber || true \ No newline at end of file + run: gh release upload ${{ github.ref_name }} sbom-diffscope.spdx.json --clobber || true diff --git a/charts/diffscope/Chart.yaml b/charts/diffscope/Chart.yaml index 6abd2c4..66017e0 100644 --- a/charts/diffscope/Chart.yaml +++ b/charts/diffscope/Chart.yaml @@ -3,7 +3,7 @@ name: diffscope description: AI-powered code review engine with smart analysis and professional reporting type: application version: 0.1.0 -appVersion: "0.5.3" +appVersion: "0.5.26" home: https://github.com/evalops/diffscope sources: - https://github.com/evalops/diffscope diff --git a/eval/dag-runtime-smoke-or.json b/eval/dag-runtime-smoke-or.json index 6213b78..b970318 100644 --- a/eval/dag-runtime-smoke-or.json +++ b/eval/dag-runtime-smoke-or.json @@ -2195,4 +2195,4 @@ ] } ] -} \ No newline at end of file +} diff --git a/eval/frontier-e2e-or.json b/eval/frontier-e2e-or.json index a6e1aa1..bec7f0f 100644 --- a/eval/frontier-e2e-or.json +++ b/eval/frontier-e2e-or.json @@ -9981,4 +9981,4 @@ ] } ] -} \ No newline at end of file +} diff --git a/eval/frontier-smoke-or.json b/eval/frontier-smoke-or.json index b84535e..b263e4b 100644 --- a/eval/frontier-smoke-or.json +++ b/eval/frontier-smoke-or.json @@ -5904,4 +5904,4 @@ ] } ] -} \ No newline at end of file +} diff --git a/src/adapters/anthropic.rs b/src/adapters/anthropic.rs index b7cdfaf..7884f35 100644 --- a/src/adapters/anthropic.rs +++ b/src/adapters/anthropic.rs @@ -407,8 +407,7 @@ mod tests { let err_msg = format!("{:#}", result.unwrap_err()); assert!( err_msg.contains("401") || err_msg.contains("Unauthorized"), - "Error should mention 401 or Unauthorized, got: {}", - err_msg + "Error should mention 401 or Unauthorized, got: {err_msg}" ); mock.assert_async().await; } @@ -502,8 +501,7 @@ mod tests { let err = result.unwrap_err().to_string(); assert!( err.contains("Unsupported content type"), - "Error should mention unsupported type, got: {}", - err + "Error should mention unsupported type, got: {err}" ); } @@ -534,8 +532,7 @@ mod tests { let err = result.unwrap_err().to_string(); assert!( err.contains("empty content"), - "Error should mention empty content: {}", - err + "Error should mention empty content: {err}" ); } diff --git a/src/adapters/ollama.rs b/src/adapters/ollama.rs index f66da52..d38d0c4 100644 --- a/src/adapters/ollama.rs +++ b/src/adapters/ollama.rs @@ -211,11 +211,10 @@ mod tests { fn chat_response_body(content: &str, model: &str, done: bool) -> String { format!( r#"{{ - "message": {{"role": "assistant", "content": "{}"}}, - "model": "{}", - "done": {} - }}"#, - content, model, done + "message": {{"role": "assistant", "content": "{content}"}}, + "model": "{model}", + "done": {done} + }}"# ) } @@ -227,13 +226,12 @@ mod tests { ) -> String { format!( r#"{{ - "message": {{"role": "assistant", "content": "{}"}}, - "model": "{}", + "message": {{"role": "assistant", "content": "{content}"}}, + "model": "{model}", "done": true, - "prompt_eval_count": {}, - "eval_count": {} - }}"#, - content, model, prompt_eval, eval + "prompt_eval_count": {prompt_eval}, + "eval_count": {eval} + }}"# ) } diff --git a/src/adapters/openai.rs b/src/adapters/openai.rs index 979defd..9d74c94 100644 --- a/src/adapters/openai.rs +++ b/src/adapters/openai.rs @@ -771,8 +771,7 @@ mod tests { let err_msg = format!("{:#}", result.unwrap_err()); assert!( err_msg.contains("401") || err_msg.contains("Unauthorized"), - "Error should mention 401 or Unauthorized, got: {}", - err_msg + "Error should mention 401 or Unauthorized, got: {err_msg}" ); mock.assert_async().await; } @@ -814,8 +813,7 @@ mod tests { let err_msg = format!("{:#}", result.unwrap_err()); assert!( err_msg.contains("429") || err_msg.contains("Rate limited"), - "Error should mention rate limiting, got: {}", - err_msg + "Error should mention rate limiting, got: {err_msg}" ); mock.assert_async().await; } @@ -866,8 +864,7 @@ mod tests { let err = result.unwrap_err().to_string(); assert!( err.contains("empty choices"), - "Error should mention empty choices: {}", - err + "Error should mention empty choices: {err}" ); } diff --git a/src/commands/doctor/command/display/config.rs b/src/commands/doctor/command/display/config.rs index bfe7a98..c4583a2 100644 --- a/src/commands/doctor/command/display/config.rs +++ b/src/commands/doctor/command/display/config.rs @@ -27,17 +27,14 @@ pub(in super::super) fn print_configuration(config: &Config) { } ); if let Some(cw) = config.context_window { - println!(" Context: {} tokens", cw); + println!(" Context: {cw} tokens"); } println!(); } pub(in super::super) fn print_unreachable(base_url: &str) -> Result<()> { println!("UNREACHABLE"); - println!( - "\nCannot reach {}. Make sure your LLM server is running.", - base_url - ); + println!("\nCannot reach {base_url}. Make sure your LLM server is running."); println!("\nQuick start:"); println!(" Ollama: ollama serve"); println!(" vLLM: vllm serve "); diff --git a/src/commands/doctor/command/display/endpoint.rs b/src/commands/doctor/command/display/endpoint.rs index 962088e..2bef90e 100644 --- a/src/commands/doctor/command/display/endpoint.rs +++ b/src/commands/doctor/command/display/endpoint.rs @@ -1,7 +1,7 @@ use crate::core::offline::LocalModel; pub(in super::super) fn print_endpoint_models(endpoint_type: &str, models: &[LocalModel]) { - println!("\nEndpoint type: {}", endpoint_type); + println!("\nEndpoint type: {endpoint_type}"); println!("\nAvailable models ({}):", models.len()); if models.is_empty() { println!(" (none found)"); @@ -25,7 +25,7 @@ fn format_model_size_info(model: &LocalModel) -> String { + &model .quantization .as_ref() - .map(|quantization| format!(", {}", quantization)) + .map(|quantization| format!(", {quantization}")) .unwrap_or_default() + ")" } diff --git a/src/commands/doctor/command/display/inference.rs b/src/commands/doctor/command/display/inference.rs index 83a0137..0e7dbd4 100644 --- a/src/commands/doctor/command/display/inference.rs +++ b/src/commands/doctor/command/display/inference.rs @@ -9,13 +9,10 @@ pub(in super::super) fn print_recommended_model_summary( readiness: &ReadinessCheck, ) { println!("\nRecommended for code review: {}", recommended.name); - println!(" Estimated RAM: ~{}MB", estimated_ram_mb); + println!(" Estimated RAM: ~{estimated_ram_mb}MB"); if let Some(ctx_size) = detected_context_window { - println!( - " Context window: {} tokens (detected from model)", - ctx_size - ); + println!(" Context window: {ctx_size} tokens (detected from model)"); } if readiness.ready { @@ -23,7 +20,7 @@ pub(in super::super) fn print_recommended_model_summary( } else { println!("\nStatus: NOT READY"); for warning in &readiness.warnings { - println!(" Warning: {}", warning); + println!(" Warning: {warning}"); } } } @@ -41,14 +38,11 @@ pub(in super::super) fn print_inference_success(elapsed: Duration, tokens_per_se pub(in super::super) fn print_inference_failure(error: &impl std::fmt::Display) { println!("FAILED"); - println!(" Error: {}", error); + println!(" Error: {error}"); println!(" The model may still be loading. Try again in a moment."); } pub(in super::super) fn print_usage(base_url: &str, model_flag: &str) { println!("\nUsage:"); - println!( - " git diff | diffscope review --base-url {} --model {}", - base_url, model_flag - ); + println!(" git diff | diffscope review --base-url {base_url} --model {model_flag}"); } diff --git a/src/commands/doctor/command/probe.rs b/src/commands/doctor/command/probe.rs index a0106c8..30f1e0b 100644 --- a/src/commands/doctor/command/probe.rs +++ b/src/commands/doctor/command/probe.rs @@ -18,7 +18,7 @@ impl EndpointProbe { pub(super) fn model_flag(&self, model_name: &str) -> String { if self.is_ollama() { - format!("ollama:{}", model_name) + format!("ollama:{model_name}") } else { model_name.to_string() } @@ -49,7 +49,7 @@ pub(super) async fn probe_endpoint( } async fn probe_ollama_endpoint(client: &Client, base_url: &str) -> Result>> { - let url = format!("{}/api/tags", base_url); + let url = format!("{base_url}/api/tags"); let response = match client.get(&url).send().await { Ok(response) => response, Err(_) => return Ok(None), @@ -65,7 +65,7 @@ async fn probe_ollama_endpoint(client: &Client, base_url: &str) -> Result