Skip to content

Commit

Permalink
Merge pull request #206 from gitautoai/wes
Browse files Browse the repository at this point in the history
Fix: write_pr_body encountered an BadRequestError: Error code: 400 - {'error': {'message': "This model's maximum context length is 128000 tokens. However, your messages resulted in 142586 tokens. Please reduce the length of the messages.", 'type': 'invalid_request_error', 'param': 'messages', 'code': 'context_length_exceeded'}}
  • Loading branch information
hiroshinishio authored Jul 8, 2024
2 parents cb24b1a + db8762c commit 02b39b8
Show file tree
Hide file tree
Showing 5 changed files with 52 additions and 33 deletions.
1 change: 1 addition & 0 deletions config.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ def get_env_var(name: str) -> str:
OPENAI_FINAL_STATUSES: list[str] = ["cancelled", "completed", "expired", "failed"]
OPENAI_MAX_ARRAY_LENGTH = 32 # https://community.openai.com/t/assistant-threads-create-400-messages-array-too-long/754574/1
OPENAI_MAX_STRING_LENGTH = 256000 # https://community.openai.com/t/assistant-threads-create-400-messages-array-too-long/754574/5
OPENAI_MAX_CONTEXT_TOKENS = 128000 # https://gitauto-ai.sentry.io/issues/5582421515/events/9a09416e714c4a66bf1bd86916702be2/?project=4506865231200256&referrer=issue_details.related_trace_issue
OPENAI_MAX_TOKENS = 4096
OPENAI_MODEL_ID = "gpt-4o"
OPENAI_ORG_ID: str = get_env_var(name="OPENAI_ORG_ID")
Expand Down
63 changes: 32 additions & 31 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,61 +1,61 @@
annotated-types==0.7.0
anyio==4.4.0
astroid==3.2.2 # required by pylint
astroid==3.2.2
black==24.4.2
certifi==2024.6.2
cffi==1.16.0
cfgv==3.4.0 # required by pre-commit
cfgv==3.4.0
charset-normalizer==3.3.2
click==8.1.7
colorama==0.4.6
cryptography==42.0.8
deprecation==2.1.0
dill==0.3.8 # required by pylint
distlib==0.3.8 # required by virtualenv
dill==0.3.8
distlib==0.3.8
distro==1.9.0
dnspython==2.6.1 # required by email-validator
email_validator==2.2.0 # required by fastapi
dnspython==2.6.1
email_validator==2.2.0
exceptiongroup==1.2.1
fastapi==0.111.0
fastapi-cli==0.0.4 # required by fastapi
filelock==3.15.4 # required by virtualenv
fastapi-cli==0.0.4
filelock==3.15.4
gotrue==2.5.4
h11==0.14.0
httpcore==1.0.5
httpx==0.27.0
identify==2.5.36 # required by pre-commit
identify==2.5.36
idna==3.7
isort==5.13.2 # required by pylint
Jinja2==3.1.4 # required by fastapi
isort==5.13.2
Jinja2==3.1.4
mangum==0.17.0
markdown-it-py==3.0.0 # required by rich
MarkupSafe==2.1.5 # required by Jinja2
mccabe==0.7.0 # required by pylint
mdurl==0.1.2 # required by markdown-it-py
mypy-extensions==1.0.0 # required by black
nodeenv==1.9.1 # required by pre-commit
markdown-it-py==3.0.0
MarkupSafe==2.1.5
mccabe==0.7.0
mdurl==0.1.2
mypy-extensions==1.0.0
nodeenv==1.9.1
openai==1.35.7
orjson==3.10.5 # required by fastapi
orjson==3.10.5
packaging==24.1
pathspec==0.12.1 # required by black
platformdirs==4.2.2 # required by pylint
pathspec==0.12.1
platformdirs==4.2.2
postgrest==0.16.8
pre-commit==3.7.1
pycparser==2.22
pydantic==2.8.0 # required by fastapi, gotrue, openai, postgrest
pydantic_core==2.20.0 # required by pydantic
Pygments==2.18.0 # required by rich
pydantic==2.8.0
pydantic_core==2.20.0
Pygments==2.18.0
PyJWT==2.8.0
pylint==3.2.5
python-dateutil==2.9.0.post0
python-dotenv==1.0.1
python-multipart==0.0.9 # required by fastapi
PyYAML==6.0.1 # required by pre-commit
python-multipart==0.0.9
PyYAML==6.0.1
realtime==1.0.6
requests==2.32.3
rich==13.7.1 # required by typer
rich==13.7.1
sentry-sdk==2.7.1
shellingham==1.5.4 # required by typer
shellingham==1.5.4
six==1.16.0
sniffio==1.3.1
starlette==0.37.2
Expand All @@ -64,12 +64,13 @@ StrEnum==0.4.15
stripe==10.1.0
supabase==2.5.1
supafunc==0.4.6
tomlkit==0.12.5 # required by pylint
tiktoken==0.7.0
tomlkit==0.12.5
tqdm==4.66.4
typer==0.12.3 # required by fastapi-cli
typer==0.12.3
typing_extensions==4.12.2
ujson==5.10.0 # required by fastapi
ujson==5.10.0
urllib3==2.2.2
uvicorn==0.30.1
virtualenv==20.26.3 # required by pre-commit
virtualenv==20.26.3
websockets==12.0
2 changes: 1 addition & 1 deletion services/openai/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -255,7 +255,7 @@ def submit_message(

def wait_on_run(run: Run, thread: Thread, token: str, run_name: str) -> tuple[Run, str]:
"""https://cookbook.openai.com/examples/assistants_api_overview_python"""
print(f"Run `{run_name}` status before loop: { run.status}")
print(f"Run `{run_name}` status before loop: {run.status}")
client: OpenAI = create_openai_client()
input_data = ""
while run.status not in OPENAI_FINAL_STATUSES:
Expand Down
4 changes: 3 additions & 1 deletion services/openai/chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,17 +6,19 @@
from config import OPENAI_MODEL_ID, OPENAI_TEMPERATURE
from services.openai.init import create_openai_client
from services.openai.instructions import SYSTEM_INSTRUCTION_FOR_WRITING_PR
from services.openai.truncate import truncate_message
from utils.handle_exceptions import handle_exceptions


@handle_exceptions(raise_on_error=True)
def write_pr_body(input_message: str) -> str:
"""https://platform.openai.com/docs/api-reference/chat/create"""
client: OpenAI = create_openai_client()
truncated_msg: str = truncate_message(input_message=input_message)
completion: ChatCompletion = client.chat.completions.create(
messages=[
{"role": "system", "content": SYSTEM_INSTRUCTION_FOR_WRITING_PR},
{"role": "user", "content": input_message},
{"role": "user", "content": truncated_msg if truncated_msg else input_message},
],
model=OPENAI_MODEL_ID,
n=1,
Expand Down
15 changes: 15 additions & 0 deletions services/openai/truncate.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
import tiktoken
from config import OPENAI_MAX_CONTEXT_TOKENS, OPENAI_MODEL_ID
from utils.handle_exceptions import handle_exceptions


@handle_exceptions(default_return_value="", raise_on_error=False)
def truncate_message(input_message: str) -> str:
encoding: tiktoken.Encoding = tiktoken.encoding_for_model(
model_name=OPENAI_MODEL_ID
)
tokens: list[int] = encoding.encode(text=input_message)
if len(tokens) > OPENAI_MAX_CONTEXT_TOKENS:
tokens = tokens[:OPENAI_MAX_CONTEXT_TOKENS]
truncated_message: str = encoding.decode(tokens=tokens)
return truncated_message

0 comments on commit 02b39b8

Please sign in to comment.