Skip to content
Merged
14 changes: 13 additions & 1 deletion .github/workflows/diffscope.yml
Original file line number Diff line number Diff line change
Expand Up @@ -27,14 +27,26 @@ jobs:
git fetch origin ${{ github.base_ref }} --depth=1
git diff origin/${{ github.base_ref }}...HEAD > pr.diff

- name: Check API key
id: check_key
run: |
if [ -z "${{ secrets.OPENAI_API_KEY }}" ]; then
echo "skip=true" >> "$GITHUB_OUTPUT"
echo "::notice::DiffScope review skipped — OPENAI_API_KEY secret not configured"
else
echo "skip=false" >> "$GITHUB_OUTPUT"
fi
Comment on lines +33 to +38

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🔴 GitHub Actions secret directly interpolated into shell command allows injection

At line 33, ${{ secrets.OPENAI_API_KEY }} is expanded by GitHub Actions' template engine before the shell runs, meaning the raw secret value is pasted directly into the if [ -z "..." ] command. If the secret contains shell-special characters (e.g., ", `, $(...), ;, |), it can break the script or, worse, execute arbitrary commands.

Safe pattern using environment variable

The standard fix is to pass the secret through an environment variable:

- name: Check API key
  id: check_key
  env:
    KEY: ${{ secrets.OPENAI_API_KEY }}
  run: |
    if [ -z "$KEY" ]; then
      echo "skip=true" >> "$GITHUB_OUTPUT"
    else
      echo "skip=false" >> "$GITHUB_OUTPUT"
    fi

This way the value is never interpolated into the script text itself.

Suggested change
if [ -z "${{ secrets.OPENAI_API_KEY }}" ]; then
echo "skip=true" >> "$GITHUB_OUTPUT"
echo "::notice::DiffScope review skipped — OPENAI_API_KEY secret not configured"
else
echo "skip=false" >> "$GITHUB_OUTPUT"
fi
if [ -z "$OPENAI_KEY" ]; then
echo "skip=true" >> "$GITHUB_OUTPUT"
echo "::notice::DiffScope review skipped — OPENAI_API_KEY secret not configured"
else
echo "skip=false" >> "$GITHUB_OUTPUT"
fi
Open in Devin Review

Was this helpful? React with 👍 or 👎 to provide feedback.


- name: Run DiffScope
if: steps.check_key.outputs.skip != 'true'
uses: docker://ghcr.io/haasonsaas/diffscope:latest
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
with:
args: review --diff pr.diff --output-format json --output comments.json

- name: Post comments
if: steps.check_key.outputs.skip != 'true'
uses: actions/github-script@v7
with:
script: |
Expand Down
4 changes: 2 additions & 2 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

197 changes: 197 additions & 0 deletions src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,24 @@ use std::collections::HashMap;
use std::path::{Path, PathBuf};
use tracing::warn;

/// Identifies the role a model plays in the review pipeline.
///
/// Different tasks benefit from different model tiers: cheap/fast models
/// for triage and summarization, frontier models for deep review, and
/// specialised models for reasoning or embeddings.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum ModelRole {
/// The main review model (default).
Primary,
/// Cheap/fast model for triage, summarization, NL translation.
Weak,
/// Reasoning-capable model for complex analysis and self-reflection.
Reasoning,
/// Embedding model for RAG indexing.
Embedding,
}

#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ProviderConfig {
#[serde(default)]
Expand All @@ -29,6 +47,22 @@ pub struct Config {
#[serde(default = "default_model")]
pub model: String,

/// Cheap/fast model for triage, summarization, NL translation.
#[serde(default)]
pub model_weak: Option<String>,

/// Reasoning-capable model for complex analysis and self-reflection.
#[serde(default)]
pub model_reasoning: Option<String>,

/// Embedding model for RAG indexing.
#[serde(default)]
pub model_embedding: Option<String>,

/// Fallback models tried in order when the primary model fails.
#[serde(default)]
pub fallback_models: Vec<String>,

#[serde(default = "default_temperature")]
pub temperature: f32,

Expand Down Expand Up @@ -303,6 +337,10 @@ impl Default for Config {
fn default() -> Self {
Self {
model: default_model(),
model_weak: None,
model_reasoning: None,
model_embedding: None,
fallback_models: Vec::new(),
temperature: default_temperature(),
max_tokens: default_max_tokens(),
max_context_chars: default_max_context_chars(),
Expand Down Expand Up @@ -869,6 +907,23 @@ impl Config {
}
}

/// Get the model name for a specific role, falling back to the primary model.
pub fn model_for_role(&self, role: ModelRole) -> &str {
match role {
ModelRole::Primary => &self.model,
ModelRole::Weak => self.model_weak.as_deref().unwrap_or(&self.model),
ModelRole::Reasoning => self.model_reasoning.as_deref().unwrap_or(&self.model),
ModelRole::Embedding => self.model_embedding.as_deref().unwrap_or(&self.model),
}
}

/// Build a ModelConfig for a specific role.
pub fn to_model_config_for_role(&self, role: ModelRole) -> crate::adapters::llm::ModelConfig {
let mut config = self.to_model_config();
config.model_name = self.model_for_role(role).to_string();
config
}

/// Resolve which provider to use based on configuration.
///
/// Returns `(api_key, base_url, adapter)` by checking:
Expand Down Expand Up @@ -1509,4 +1564,146 @@ mod tests {
Some("rust-analyzer")
);
}

#[test]
fn test_model_role_primary_returns_model() {
let config = Config {
model: "claude-sonnet-4-6".to_string(),
..Config::default()
};
assert_eq!(
config.model_for_role(ModelRole::Primary),
"claude-sonnet-4-6"
);
}

#[test]
fn test_model_role_weak_fallback_to_primary() {
let config = Config {
model: "claude-sonnet-4-6".to_string(),
model_weak: None,
..Config::default()
};
assert_eq!(config.model_for_role(ModelRole::Weak), "claude-sonnet-4-6");
}

#[test]
fn test_model_role_weak_explicit() {
let config = Config {
model: "claude-sonnet-4-6".to_string(),
model_weak: Some("claude-haiku-4-5".to_string()),
..Config::default()
};
assert_eq!(config.model_for_role(ModelRole::Weak), "claude-haiku-4-5");
}

#[test]
fn test_model_role_reasoning_fallback() {
let config = Config {
model: "claude-sonnet-4-6".to_string(),
model_reasoning: None,
..Config::default()
};
assert_eq!(
config.model_for_role(ModelRole::Reasoning),
"claude-sonnet-4-6"
);
}

#[test]
fn test_model_role_reasoning_explicit() {
let config = Config {
model: "claude-sonnet-4-6".to_string(),
model_reasoning: Some("claude-opus-4-6".to_string()),
..Config::default()
};
assert_eq!(
config.model_for_role(ModelRole::Reasoning),
"claude-opus-4-6"
);
}

#[test]
fn test_model_role_embedding_default() {
let config = Config {
model: "claude-sonnet-4-6".to_string(),
model_embedding: None,
..Config::default()
};
// Falls back to primary model when no embedding model configured
assert_eq!(
config.model_for_role(ModelRole::Embedding),
"claude-sonnet-4-6"
);
}

#[test]
fn test_model_role_embedding_explicit() {
let config = Config {
model: "claude-sonnet-4-6".to_string(),
model_embedding: Some("custom-embedding-model".to_string()),
..Config::default()
};
assert_eq!(
config.model_for_role(ModelRole::Embedding),
"custom-embedding-model"
);
}

#[test]
fn test_to_model_config_for_role_uses_correct_model() {
let config = Config {
model: "claude-sonnet-4-6".to_string(),
model_weak: Some("claude-haiku-4-5".to_string()),
..Config::default()
};
let primary_config = config.to_model_config_for_role(ModelRole::Primary);
assert_eq!(primary_config.model_name, "claude-sonnet-4-6");

let weak_config = config.to_model_config_for_role(ModelRole::Weak);
assert_eq!(weak_config.model_name, "claude-haiku-4-5");
}

#[test]
fn test_fallback_models_default_empty() {
let config = Config::default();
assert!(config.fallback_models.is_empty());
}

#[test]
fn test_config_deserialization_with_model_roles() {
let yaml = r#"
model: claude-sonnet-4-6
model_weak: claude-haiku-4-5
model_reasoning: claude-opus-4-6
model_embedding: text-embedding-3-small
fallback_models:
- gpt-4o
- claude-sonnet-4-6
"#;
let config: Config = serde_yaml::from_str(yaml).unwrap();
assert_eq!(config.model, "claude-sonnet-4-6");
assert_eq!(config.model_weak, Some("claude-haiku-4-5".to_string()));
assert_eq!(config.model_reasoning, Some("claude-opus-4-6".to_string()));
assert_eq!(
config.model_embedding,
Some("text-embedding-3-small".to_string())
);
assert_eq!(config.fallback_models.len(), 2);
}

#[test]
fn test_config_deserialization_without_model_roles() {
// Existing configs without new fields should still work
let yaml = r#"
model: claude-sonnet-4-6
temperature: 0.3
"#;
let config: Config = serde_yaml::from_str(yaml).unwrap();
assert_eq!(config.model, "claude-sonnet-4-6");
assert!(config.model_weak.is_none());
assert!(config.model_reasoning.is_none());
assert!(config.model_embedding.is_none());
assert!(config.fallback_models.is_empty());
}
}
39 changes: 36 additions & 3 deletions src/core/context.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ use std::collections::HashSet;
use std::path::Path;
use std::path::PathBuf;

use crate::core::function_chunker::find_enclosing_boundary_line;
use crate::core::SymbolIndex;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LLMContextChunk {
Expand Down Expand Up @@ -50,8 +51,18 @@ impl ContextFetcher {
}
let start = start.max(1);
let end = end.max(start);
let start_idx = start.saturating_sub(1);
let end_idx = end.min(file_lines.len());

// Dynamic context: expand start to enclosing function boundary
let expanded_start = find_enclosing_boundary_line(&content, file_path, start, 10)
.filter(|&boundary| boundary >= start.saturating_sub(10))
.unwrap_or_else(|| start.saturating_sub(5)); // fallback: 5 lines before
let expanded_start = expanded_start.max(1);

// Asymmetric: less context after (1 extra line)
let expanded_end = (end + 1).min(file_lines.len());

let start_idx = expanded_start.saturating_sub(1);
let end_idx = expanded_end.min(file_lines.len());

if start_idx < file_lines.len() {
let chunk_content = truncate_with_notice(
Expand All @@ -62,7 +73,7 @@ impl ContextFetcher {
file_path: file_path.clone(),
content: chunk_content,
context_type: ContextType::FileContent,
line_range: Some((start, end)),
line_range: Some((expanded_start, expanded_end)),
});
}
}
Expand Down Expand Up @@ -330,6 +341,28 @@ mod tests {
let merged = merge_ranges(&ranges);
assert_eq!(merged, vec![(1, 10)]);
}

#[tokio::test]
async fn test_fetch_context_expands_to_function_boundary() {
// Create a temp file with a function
let dir = tempfile::tempdir().unwrap();
let file_path = dir.path().join("test.rs");
let content = "use std::io;\n\npub fn process(x: i32) -> bool {\n let y = x + 1;\n y > 0\n}\n\npub fn other() {\n println!(\"hi\");\n}\n";
std::fs::write(&file_path, content).unwrap();

let fetcher = ContextFetcher::new(dir.path().to_path_buf());
let relative = PathBuf::from("test.rs");
// Request context for line 4-5 (inside process function)
let chunks = fetcher
.fetch_context_for_file(&relative, &[(4, 5)])
.await
.unwrap();

assert!(!chunks.is_empty());
// Should expand to include the function signature (line 3)
let chunk = &chunks[0];
assert!(chunk.content.contains("pub fn process"));
}
}

async fn read_file_lossy(path: &Path) -> Result<String> {
Expand Down
Loading
Loading