Skip to content

Commit cce2eff

Browse files
committed
disable cache when retrying
1 parent 060bc9d commit cce2eff

File tree

3 files changed

+7
-7
lines changed

3 files changed

+7
-7
lines changed

nodes.py

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -173,11 +173,10 @@ def exec(self, prep_res):
173173
- 5 # path/to/another.js
174174
# ... up to {max_abstraction_num} abstractions
175175
```"""
176-
response = call_llm(prompt, use_cache=use_cache) # Pass use_cache parameter
176+
response = call_llm(prompt, use_cache=(use_cache and self.cur_retry == 0)) # Use cache only if enabled and not retrying
177177

178178
# --- Validation ---
179179
yaml_str = response.strip().split("```yaml")[1].split("```")[0].strip()
180-
# add whitespace to fix llm generation error(except -)
181180
abstractions = yaml.safe_load(yaml_str)
182181

183182
if not isinstance(abstractions, list):
@@ -345,7 +344,7 @@ def exec(self, prep_res):
345344
346345
Now, provide the YAML output:
347346
"""
348-
response = call_llm(prompt, use_cache=use_cache)
347+
response = call_llm(prompt, use_cache=(use_cache and self.cur_retry == 0)) # Use cache only if enabled and not retrying
349348

350349
# --- Validation ---
351350
yaml_str = response.strip().split("```yaml")[1].split("```")[0].strip()
@@ -487,7 +486,7 @@ def exec(self, prep_res):
487486
488487
Now, provide the YAML output:
489488
"""
490-
response = call_llm(prompt)
489+
response = call_llm(prompt, use_cache=(use_cache and self.cur_retry == 0)) # Use cache only if enabled and not retrying
491490

492491
# --- Validation ---
493492
yaml_str = response.strip().split("```yaml")[1].split("```")[0].strip()
@@ -616,6 +615,7 @@ def prep(self, shared):
616615
"prev_chapter": prev_chapter, # Add previous chapter info (uses potentially translated name)
617616
"next_chapter": next_chapter, # Add next chapter info (uses potentially translated name)
618617
"language": language, # Add language for multi-language support
618+
"use_cache": use_cache, # Pass use_cache flag
619619
# previous_chapters_summary will be added dynamically in exec
620620
}
621621
)
@@ -638,6 +638,7 @@ def exec(self, item):
638638
chapter_num = item["chapter_num"]
639639
project_name = item.get("project_name")
640640
language = item.get("language", "english")
641+
use_cache = item.get("use_cache", True) # Read use_cache from item
641642
print(f"Writing chapter {chapter_num} for: {abstraction_name} using LLM...")
642643

643644
# Prepare file context string from the map
@@ -722,7 +723,7 @@ def exec(self, item):
722723
723724
Now, directly provide a super beginner-friendly Markdown output (DON'T need ```markdown``` tags):
724725
"""
725-
chapter_content = call_llm(prompt)
726+
chapter_content = call_llm(prompt, use_cache=(use_cache and self.cur_retry == 0)) # Use cache only if enabled and not retrying
726727
# Basic validation/cleanup
727728
actual_heading = f"# Chapter {chapter_num}: {abstraction_name}" # Use potentially translated name
728729
if not chapter_content.strip().startswith(f"# Chapter {chapter_num}"):

utils/call_llm.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,6 @@
33
import logging
44
import json
55
from datetime import datetime
6-
import requests
76

87
# Configure logging
98
log_directory = os.getenv("LOG_DIR", "logs")

utils/crawl_github_files.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -156,7 +156,7 @@ def fetch_branches(owner: str, repo: str):
156156
return []
157157

158158
if response.status_code != 200:
159-
print(f"Error fetching the branches of {owner}/{path}: {response.status_code} - {response.text}")
159+
print(f"Error fetching the branches of {owner}/{repo}: {response.status_code} - {response.text}")
160160
return []
161161

162162
return response.json()

0 commit comments

Comments
 (0)