Skip to content

Commit 0e90c1b

Browse files
committed
Add executecode plugin
1 parent a46533e commit 0e90c1b

File tree

4 files changed

+199
-148
lines changed

4 files changed

+199
-148
lines changed
Lines changed: 97 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,97 @@
1+
import re
2+
from typing import Tuple, List
3+
import nbformat
4+
from nbconvert.preprocessors import ExecutePreprocessor
5+
import os
6+
import tempfile
7+
import json
8+
9+
SLUG = "executecode"
10+
11+
def extract_python_code(text: str) -> List[str]:
12+
"""Extract Python code blocks from text."""
13+
pattern = r'```python\s*(.*?)\s*```'
14+
return re.findall(pattern, text, re.DOTALL)
15+
16+
def execute_code(code: str) -> str:
17+
"""Execute Python code in a Jupyter notebook environment."""
18+
with tempfile.NamedTemporaryFile(suffix='.ipynb', delete=False) as tmp:
19+
notebook = nbformat.v4.new_notebook()
20+
notebook['cells'] = [nbformat.v4.new_code_cell(code)]
21+
nbformat.write(notebook, tmp)
22+
23+
try:
24+
with open(tmp.name) as f:
25+
nb = nbformat.read(f, as_version=4)
26+
ep = ExecutePreprocessor(timeout=30, kernel_name='python3')
27+
ep.preprocess(nb, {'metadata': {'path': './'}})
28+
29+
# Extract the output
30+
output = ""
31+
for cell in nb.cells:
32+
if cell.cell_type == 'code' and cell.outputs:
33+
for output_item in cell.outputs:
34+
if output_item.output_type == 'stream':
35+
output += output_item.text
36+
elif output_item.output_type == 'execute_result':
37+
output += str(output_item.data.get('text/plain', ''))
38+
39+
return output.strip()
40+
finally:
41+
os.unlink(tmp.name)
42+
43+
def should_execute_request_code(query: str) -> bool:
44+
"""Decide whether to execute code from the request based on the query."""
45+
keywords = ['run', 'execute', 'output', 'result']
46+
return any(keyword in query.lower() for keyword in keywords)
47+
48+
def run(system_prompt: str, initial_query: str, client, model: str) -> Tuple[str, int]:
49+
query, request_code = extract_python_code(initial_query)[0] if extract_python_code(initial_query) else (initial_query, "")
50+
51+
if should_execute_request_code(query) and request_code:
52+
# Execute code from the request
53+
code_output = execute_code(request_code)
54+
context = f"Query: {query}\nCode:\n```python\n{request_code}\n```\nOutput:\n{code_output}"
55+
56+
messages = [
57+
{"role": "system", "content": system_prompt},
58+
{"role": "user", "content": context}
59+
]
60+
61+
response = client.chat.completions.create(
62+
model=model,
63+
messages=messages,
64+
)
65+
66+
return response.choices[0].message.content.strip(), response.usage.completion_tokens
67+
else:
68+
# Get initial response from the model
69+
messages = [
70+
{"role": "system", "content": system_prompt},
71+
{"role": "user", "content": initial_query}
72+
]
73+
74+
response = client.chat.completions.create(
75+
model=model,
76+
messages=messages,
77+
)
78+
79+
initial_response = response.choices[0].message.content.strip()
80+
response_code = extract_python_code(initial_response)
81+
82+
if response_code:
83+
# Execute code from the response
84+
code_output = execute_code(response_code[0])
85+
context = f"Initial response:\n{initial_response}\n\nCode output:\n{code_output}"
86+
87+
messages.append({"role": "assistant", "content": initial_response})
88+
messages.append({"role": "user", "content": f"Based on the code execution output, please provide a final response:\n{context}"})
89+
90+
final_response = client.chat.completions.create(
91+
model=model,
92+
messages=messages,
93+
)
94+
95+
return final_response.choices[0].message.content.strip(), response.usage.completion_tokens + final_response.usage.completion_tokens
96+
else:
97+
return initial_response, response.usage.completion_tokens

optillm/plugins/memory_plugin.py

Lines changed: 42 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ def add(self, item: str):
2020
self.items.append(item)
2121
self.vectors = None # Reset vectors to force recalculation
2222

23-
def get_relevant(self, query: str, n: int = 5) -> List[str]:
23+
def get_relevant(self, query: str, n: int = 10) -> List[str]:
2424
if not self.items:
2525
return []
2626

@@ -49,26 +49,46 @@ def extract_query(text: str) -> Tuple[str, str]:
4949
query = "What is the main point of this text?"
5050
return query, context
5151

52-
def extract_key_information(text: str, client, model: str) -> List[str]:
53-
# print(f"Prompt : {text}")
54-
prompt = f"""Extract key information from the following text. Provide a list of important facts or concepts, each on a new line:
52+
def classify_margin(margin):
53+
return margin.startswith("YES#")
5554

55+
def extract_key_information(system_message, text: str, query: str, client, model: str) -> List[str]:
56+
# print(f"Prompt : {text}")
57+
messages = [
58+
{"role": "system", "content": system_message},
59+
{"role": "user", "content": f"""
60+
'''text
5661
{text}
57-
58-
Key information:"""
62+
'''
63+
Copy over all context relevant to the query: {query}
64+
Provide the answer in the format: <YES/NO>#<Relevant context>.
65+
Here are rules:
66+
- If you don't know how to answer the query - start your answer with NO#
67+
- If the text is not related to the query - start your answer with NO#
68+
- If you can extract relevant information - start your answer with YES#
69+
- If the text does not mention the person by name - start your answer with NO#
70+
Example answers:
71+
- YES#Western philosophy originated in Ancient Greece in the 6th century BCE with the pre-Socratics.
72+
- NO#No relevant context.
73+
"""}
74+
]
5975

6076
try:
6177
response = client.chat.completions.create(
6278
model=model,
63-
messages=[{"role": "user", "content": prompt}],
79+
messages=messages,
6480
max_tokens=1000
6581
)
66-
key_info = response.choices[0].message.content.strip().split('\n')
82+
key_info = response.choices[0].message.content.strip()
6783
except Exception as e:
6884
print(f"Error parsing content: {str(e)}")
6985
return [],0
86+
margins = []
87+
88+
if classify_margin(key_info):
89+
margins.append(key_info.split("#", 1)[1])
7090

71-
return [info.strip('- ') for info in key_info if info.strip()], response.usage.completion_tokens
91+
return margins, response.usage.completion_tokens
7292

7393
def run(system_prompt: str, initial_query: str, client, model: str) -> Tuple[str, int]:
7494
memory = Memory()
@@ -80,7 +100,7 @@ def run(system_prompt: str, initial_query: str, client, model: str) -> Tuple[str
80100
for i in range(0, len(context), chunk_size):
81101
chunk = context[i:i+chunk_size]
82102
# print(f"chunk: {chunk}")
83-
key_info, tokens = extract_key_information(chunk, client, model)
103+
key_info, tokens = extract_key_information(system_prompt, chunk, query, client, model)
84104
#print(f"key info: {key_info}")
85105
completion_tokens += tokens
86106
for info in key_info:
@@ -90,16 +110,22 @@ def run(system_prompt: str, initial_query: str, client, model: str) -> Tuple[str
90110
relevant_info = memory.get_relevant(query)
91111
# print(f"relevant_info : {relevant_info}")
92112
# Generate response using relevant information
93-
prompt = f"""System: {system_prompt}
94-
95-
Context: {' '.join(relevant_info)}
96-
113+
messages = [
114+
{"role": "system", "content": system_prompt},
115+
{"role": "user", "content": f"""
116+
117+
I asked my assistant to read and analyse the above content page by page to help you complete this task. These are margin notes left on each page:
118+
'''text
119+
{relevant_info}
120+
'''
121+
Read again the note(s), take a deep breath and answer the query.
97122
{query}
98-
"""
123+
"""}
124+
]
99125

100126
response = client.chat.completions.create(
101127
model=model,
102-
messages=[{"role": "user", "content": prompt}],
128+
messages=messages,
103129
)
104130
# print(f"response : {response}")
105131
final_response = response.choices[0].message.content.strip()

optillm/plugins/readurls_plugin.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ def extract_urls(text: str) -> List[str]:
2222

2323
return cleaned_urls
2424

25-
def fetch_webpage_content(url: str, max_length: int = 40000) -> str:
25+
def fetch_webpage_content(url: str, max_length: int = 100000) -> str:
2626
try:
2727
headers = {
2828
'User-Agent': 'optillm/0.0.1 (hhttps://github.com/codelion/optillm)'

0 commit comments

Comments
 (0)