Skip to content
42 changes: 28 additions & 14 deletions src/adapters/anthropic.rs
Original file line number Diff line number Diff line change
Expand Up @@ -121,14 +121,18 @@ impl LLMAdapter for AnthropicAdapter {
.content
.first()
.map(|c| {
// Verify it's a text content type
if c.content_type == "text" {
c.text.clone()
Ok(c.text.clone())
} else {
format!("Unsupported content type: {}", c.content_type)
Err(anyhow::anyhow!(
"Unsupported content type: {}",
c.content_type
))
}
})
.unwrap_or_default();
.ok_or_else(|| {
anyhow::anyhow!("Anthropic returned empty content array — no content generated")
})??;

Ok(LLMResponse {
content,
Expand Down Expand Up @@ -292,7 +296,7 @@ mod tests {
}

#[tokio::test]
async fn test_unsupported_content_type() {
async fn test_unsupported_content_type_returns_error() {
let mut server = mockito::Server::new_async().await;
let _mock = server
.mock("POST", "/messages")
Expand All @@ -311,17 +315,20 @@ mod tests {
let adapter = AnthropicAdapter::new(test_config(&server.url())).unwrap();
let result = adapter.complete(test_request()).await;

assert!(result.is_ok());
let response = result.unwrap();
assert!(
response.content.contains("Unsupported content type"),
"Expected 'Unsupported content type' in response, got: {}",
response.content
result.is_err(),
"Unsupported content type should return an error"
);
let err = result.unwrap_err().to_string();
assert!(
err.contains("Unsupported content type"),
"Error should mention unsupported type, got: {}",
err
);
}

#[tokio::test]
async fn test_empty_content_array() {
async fn test_empty_content_array_returns_error() {
let mut server = mockito::Server::new_async().await;
let _mock = server
.mock("POST", "/messages")
Expand All @@ -340,9 +347,16 @@ mod tests {
let adapter = AnthropicAdapter::new(test_config(&server.url())).unwrap();
let result = adapter.complete(test_request()).await;

assert!(result.is_ok());
let response = result.unwrap();
assert_eq!(response.content, "");
assert!(
result.is_err(),
"Empty content array should return an error, not silently succeed"
);
let err = result.unwrap_err().to_string();
assert!(
err.contains("empty content"),
"Error should mention empty content: {}",
err
);
}

#[test]
Expand Down
19 changes: 14 additions & 5 deletions src/adapters/openai.rs
Original file line number Diff line number Diff line change
Expand Up @@ -196,7 +196,9 @@ impl OpenAIAdapter {
.choices
.first()
.map(|c| c.message.content.clone())
.unwrap_or_default();
.ok_or_else(|| {
anyhow::anyhow!("OpenAI returned empty choices array — no content generated")
})?;

Ok(LLMResponse {
content,
Expand Down Expand Up @@ -416,7 +418,7 @@ mod tests {
}

#[tokio::test]
async fn test_empty_choices_returns_empty_content() {
async fn test_empty_choices_returns_error() {
let mut server = mockito::Server::new_async().await;
let _mock = server
.mock("POST", "/chat/completions")
Expand All @@ -435,9 +437,16 @@ mod tests {
let adapter = OpenAIAdapter::new(test_config(&server.url())).unwrap();
let result = adapter.complete(test_request()).await;

assert!(result.is_ok());
let response = result.unwrap();
assert_eq!(response.content, "");
assert!(
result.is_err(),
"Empty choices should return an error, not silently succeed"
);
let err = result.unwrap_err().to_string();
assert!(
err.contains("empty choices"),
"Error should mention empty choices: {}",
err
);
}

#[test]
Expand Down
14 changes: 13 additions & 1 deletion src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1061,7 +1061,7 @@ impl Config {
}

fn default_model() -> String {
"claude-sonnet-4-6".to_string()
"claude-opus-4-6".to_string()
}

fn default_temperature() -> f32 {
Expand Down Expand Up @@ -1764,4 +1764,16 @@ temperature: 0.3
assert!(config.model_embedding.is_none());
assert!(config.fallback_models.is_empty());
}

#[test]
fn test_default_model_is_frontier() {
// Per CLAUDE.md: "Always use frontier models (Opus) for AI-powered features
// — never default to Sonnet or smaller"
let model = default_model();
assert!(
model.contains("opus"),
"Default model should be Opus (frontier), got: {}",
model
);
}
}
54 changes: 54 additions & 0 deletions src/core/comment.rs
Original file line number Diff line number Diff line change
Expand Up @@ -449,12 +449,23 @@ impl CommentSynthesizer {
}

fn deduplicate_comments(comments: &mut Vec<Comment>) {
let severity_rank = |s: &Severity| match s {
Severity::Error => 0,
Severity::Warning => 1,
Severity::Info => 2,
Severity::Suggestion => 3,
};

// Sort by file/line/content, then by severity (highest first)
comments.sort_by(|a, b| {
a.file_path
.cmp(&b.file_path)
.then(a.line_number.cmp(&b.line_number))
.then(a.content.cmp(&b.content))
.then(severity_rank(&a.severity).cmp(&severity_rank(&b.severity)))
});
// dedup_by keeps the first element (b) of consecutive duplicates,
// which is the highest severity due to our sort order
comments.dedup_by(|a, b| {
a.file_path == b.file_path && a.line_number == b.line_number && a.content == b.content
});
Expand Down Expand Up @@ -548,6 +559,49 @@ pub struct RawComment {
mod tests {
use super::*;

#[test]
fn test_deduplicate_preserves_highest_severity() {
// Regression: dedup_by keeps the first element of a consecutive pair,
// but doesn't consider severity. If Warning comes before Error
// (due to stable sort on file/line/content), the Error is dropped.
let raw_comments = vec![
RawComment {
file_path: PathBuf::from("src/lib.rs"),
line_number: 10,
content: "Missing null check".to_string(),
rule_id: None,
suggestion: None,
severity: Some(Severity::Warning),
category: Some(Category::Bug),
confidence: Some(0.8),
fix_effort: None,
tags: Vec::new(),
code_suggestion: None,
},
RawComment {
file_path: PathBuf::from("src/lib.rs"),
line_number: 10,
content: "Missing null check".to_string(),
rule_id: None,
suggestion: None,
severity: Some(Severity::Error),
category: Some(Category::Bug),
confidence: Some(0.9),
fix_effort: None,
tags: Vec::new(),
code_suggestion: None,
},
];

let comments = CommentSynthesizer::synthesize(raw_comments).unwrap();
assert_eq!(comments.len(), 1, "Should deduplicate to one comment");
assert_eq!(
comments[0].severity,
Severity::Error,
"Should keep the higher severity (Error), not the lower (Warning)"
);
}

#[test]
fn test_severity_display() {
assert_eq!(Severity::Error.to_string(), "Error");
Expand Down
31 changes: 31 additions & 0 deletions src/core/composable_pipeline.rs
Original file line number Diff line number Diff line change
Expand Up @@ -211,12 +211,21 @@ impl PipelineStage for DeduplicateStage {
}

fn execute(&self, ctx: &mut PipelineContext) -> Result<()> {
use crate::core::comment::Severity;
let severity_rank = |s: &Severity| match s {
Severity::Error => 0,
Severity::Warning => 1,
Severity::Info => 2,
Severity::Suggestion => 3,
};
ctx.comments.sort_by(|a, b| {
a.file_path
.cmp(&b.file_path)
.then(a.line_number.cmp(&b.line_number))
.then(a.content.cmp(&b.content))
.then(severity_rank(&a.severity).cmp(&severity_rank(&b.severity)))
});
// dedup_by keeps b (the earlier element); our sort puts highest severity first
ctx.comments.dedup_by(|a, b| {
a.file_path == b.file_path && a.line_number == b.line_number && a.content == b.content
});
Expand Down Expand Up @@ -688,6 +697,28 @@ mod tests {
assert_eq!(ctx.comments.len(), 2);
}

// Regression: DeduplicateStage must keep the highest severity when deduplicating
#[test]
fn test_deduplicate_preserves_highest_severity() {
let mut ctx = PipelineContext::new();
let mut info = make_comment("test.rs", 10, "duplicate issue", 0.7);
info.severity = Severity::Info;
let mut error = make_comment("test.rs", 10, "duplicate issue", 0.7);
error.severity = Severity::Error;
// Insert lower severity first to ensure sort fixes order
ctx.comments.push(info);
ctx.comments.push(error);

let stage = DeduplicateStage;
stage.execute(&mut ctx).unwrap();
assert_eq!(ctx.comments.len(), 1);
assert_eq!(
ctx.comments[0].severity,
Severity::Error,
"DeduplicateStage should keep the highest severity"
);
}

// Regression: SortBySeverityStage must sort Error > Warning > Info
#[test]
fn test_sort_by_severity_order() {
Expand Down
2 changes: 1 addition & 1 deletion src/core/context.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ pub struct LLMContextChunk {
pub line_range: Option<(usize, usize)>,
}

#[derive(Debug, Clone, Serialize, Deserialize)]
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub enum ContextType {
FileContent,
Definition,
Expand Down
83 changes: 71 additions & 12 deletions src/core/diff_parser.rs
Original file line number Diff line number Diff line change
Expand Up @@ -181,8 +181,8 @@ impl DiffParser {
new_content: Some(new_content.to_string()),
hunks,
is_binary: false,
is_deleted: false,
is_new: false,
is_deleted: new_content.is_empty() && !old_content.is_empty(),
is_new: old_content.is_empty() && !new_content.is_empty(),
})
}

Expand Down Expand Up @@ -368,16 +368,17 @@ impl DiffParser {
*i += 1;
continue;
}
if line.is_empty() {
*i += 1;
continue;
}

let (change_type, content) = match line.chars().next() {
Some('+') => (ChangeType::Added, &line[1..]),
Some('-') => (ChangeType::Removed, &line[1..]),
Some(' ') => (ChangeType::Context, &line[1..]),
_ => (ChangeType::Context, line),
let (change_type, content) = if line.is_empty() {
// Some diff tools emit truly empty lines for empty context lines
// (omitting the leading space). Treat as context to keep line numbers in sync.
(ChangeType::Context, "")
} else {
match line.chars().next() {
Some('+') => (ChangeType::Added, &line[1..]),
Some('-') => (ChangeType::Removed, &line[1..]),
Some(' ') => (ChangeType::Context, &line[1..]),
_ => (ChangeType::Context, line),
}
};

let diff_line = match change_type {
Expand Down Expand Up @@ -540,4 +541,62 @@ index 0000000..f735c20\n\
assert!(diffs[0].is_new);
assert!(!diffs[0].is_deleted);
}

#[test]
fn test_parse_hunk_empty_lines_not_skipped() {
// Regression: empty lines in diff body were previously skipped.
// An empty line (no leading space) in some diff tools represents an empty
// context line. The parser should treat it as context, not skip it.
let diff_text = "\
diff --git a/test.txt b/test.txt\n\
index abc..def 100644\n\
--- a/test.txt\n\
+++ b/test.txt\n\
@@ -1,4 +1,4 @@\n\
line1\n\
\n\
-old_line3\n\
+new_line3\n";
// The empty line (between "line1" and "-old_line3") should be treated as
// context line 2. Without it, line numbers after the empty line are wrong.
let diffs = DiffParser::parse_unified_diff(diff_text).unwrap();
assert_eq!(diffs.len(), 1);
let hunk = &diffs[0].hunks[0];

// Find the removed line — it should be on line 3, not line 2
let removed = hunk
.changes
.iter()
.find(|c| c.change_type == ChangeType::Removed)
.expect("Should have a removed line");
assert_eq!(
removed.old_line_no,
Some(3),
"Removed line should be on old line 3 (after the empty context line 2), got {:?}",
removed.old_line_no
);
}

#[test]
fn test_parse_text_diff_sets_is_new_for_new_file() {
// Regression: parse_text_diff must set is_new when old_content is empty
let diff =
DiffParser::parse_text_diff("", "new content\n", PathBuf::from("new_file.rs")).unwrap();
assert!(
diff.is_new,
"parse_text_diff with empty old content should set is_new=true"
);
}

#[test]
fn test_parse_text_diff_sets_is_deleted_for_deleted_file() {
// Regression: parse_text_diff must set is_deleted when new_content is empty
let diff =
DiffParser::parse_text_diff("old content\n", "", PathBuf::from("deleted_file.rs"))
.unwrap();
assert!(
diff.is_deleted,
"parse_text_diff with empty new content should set is_deleted=true"
);
}
}
Loading
Loading