Skip to content

Commit 3ff9073

Browse files
committed
Use the token_limit param
Signed-off-by: Michael Yuan <[email protected]>
1 parent de8a7ef commit 3ff9073

File tree

2 files changed

+12
-9
lines changed

2 files changed

+12
-9
lines changed

README.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -71,6 +71,7 @@ And the LLM API service you want to use to review the PRs.
7171

7272
* `llm_api_endpoint` : The OpenAI compatible API service endpoint for the LLM to conduct code reviews.
7373
* `llm_model_name` : The model name required by the API service.
74+
* `llm_ctx_size` : The context window size of the selected model.
7475
* `llm_api_key` : Optional: The API key if required by the LLM service provider.
7576

7677
Click on the **Build** button.

src/github-pr-summary.rs

Lines changed: 11 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,6 @@ use dotenv::dotenv;
22
use flowsnet_platform_sdk::logger;
33
use github_flows::{
44
event_handler, get_octo, listen_to_event,
5-
// octocrab::models::{events::payload::EventPayload, reactions::ReactionContent},
65
octocrab::models::events::payload::{EventPayload, IssueCommentEventAction, PullRequestEventAction},
76
octocrab::models::CommentId,
87
GithubLogin,
@@ -13,10 +12,6 @@ use llmservice_flows::{
1312
};
1413
use std::env;
1514

16-
// The soft character limit of the input context size
17-
// THe codestral has a context length of 16k tokens, and we allow 16k chars of context here
18-
static CHAR_SOFT_LIMIT : usize = 16384;
19-
2015
#[no_mangle]
2116
#[tokio::main(flavor = "current_thread")]
2217
pub async fn on_deploy() {
@@ -41,8 +36,13 @@ async fn handler(payload: EventPayload) {
4136
let trigger_phrase = env::var("trigger_phrase").unwrap_or("flows summarize".to_string());
4237
let llm_api_endpoint = env::var("llm_api_endpoint").unwrap_or("https://api.openai.com/v1".to_string());
4338
let llm_model_name = env::var("llm_model_name").unwrap_or("gpt-4o".to_string());
39+
let llm_ctx_size = env::var("llm_ctx_size").unwrap_or("16384".to_string()).parse::<u32>().unwrap_or(0);
4440
let llm_api_key = env::var("llm_api_key").unwrap_or("LLAMAEDGE".to_string());
4541

42+
// The soft character limit of the input context size
43+
// This is measured in chars. We set it to be 2x llm_ctx_size, which is measured in tokens.
44+
let ctx_size_char : usize = (2 * llm_ctx_size).try_into().unwrap_or(0);
45+
4646
let mut new_commit : bool = false;
4747
let (title, pull_number, _contributor) = match payload {
4848
EventPayload::PullRequestEvent(e) => {
@@ -137,8 +137,8 @@ async fn handler(payload: EventPayload) {
137137
// Start a new commit
138138
current_commit.clear();
139139
}
140-
// Append the line to the current commit if the current commit is less than CHAR_SOFT_LIMIT
141-
if current_commit.len() < CHAR_SOFT_LIMIT {
140+
// Append the line to the current commit if the current commit is less than ctx_size_char
141+
if current_commit.len() < ctx_size_char {
142142
current_commit.push_str(line);
143143
current_commit.push('\n');
144144
}
@@ -166,14 +166,15 @@ async fn handler(payload: EventPayload) {
166166
log::debug!("Sending patch to LLM: {}", commit_hash);
167167
let co = ChatOptions {
168168
model: Some(&llm_model_name),
169+
token_limit: llm_ctx_size,
169170
restart: true,
170171
system_prompt: Some(system),
171172
..Default::default()
172173
};
173-
let question = "The following is a GitHub patch. Please summarize the key changes in concise points. Start with the most important findings.\n\n".to_string() + truncate(commit, CHAR_SOFT_LIMIT);
174+
let question = "The following is a GitHub patch. Please summarize the key changes in concise points. Start with the most important findings.\n\n".to_string() + truncate(commit, ctx_size_char);
174175
match lf.chat_completion(&chat_id, &question, &co).await {
175176
Ok(r) => {
176-
if reviews_text.len() < CHAR_SOFT_LIMIT {
177+
if reviews_text.len() < ctx_size_char {
177178
reviews_text.push_str("------\n");
178179
reviews_text.push_str(&r.choice);
179180
reviews_text.push_str("\n");
@@ -197,6 +198,7 @@ async fn handler(payload: EventPayload) {
197198
log::debug!("Sending all reviews to LLM for summarization");
198199
let co = ChatOptions {
199200
model: Some(&llm_model_name),
201+
token_limit: llm_ctx_size,
200202
restart: true,
201203
system_prompt: Some(system),
202204
..Default::default()

0 commit comments

Comments
 (0)