Skip to content

Commit

Permalink
chore: Update environment variables in mini_llm.py and env.template
Browse files Browse the repository at this point in the history
Using SLOW_MODEL
  • Loading branch information
kanak8278 committed Jul 26, 2024
1 parent 0b2ff45 commit 394a894
Show file tree
Hide file tree
Showing 2 changed files with 8 additions and 5 deletions.
5 changes: 4 additions & 1 deletion env.template
Original file line number Diff line number Diff line change
@@ -1,5 +1,8 @@
OPENAI_API_KEY=
MODEL_NAME=
FAST_MODEL=
# Internally we use SLOW_MODEL for the DSL generation
SLOW_MODEL=


#AZURE_OPENAI_API_KEY=
#AZURE_OPENAI_ENDPOINT=
Expand Down
8 changes: 4 additions & 4 deletions nl2dsl/utils/mini_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,10 +15,10 @@
else:
client = OpenAI()

GPT_MODEL = os.getenv('MODEL_NAME')
SLOW_MODEL = os.getenv('SLOW_MODEL')

if GPT_MODEL is None:
raise EnvironmentError("MODEL_NAME environment variable is not set.")
if SLOW_MODEL is None:
raise EnvironmentError("SLOW_MODEL environment variable is not set.")

@retry(wait=wait_random_exponential(multiplier=1, max=40), stop=stop_after_attempt(10))
def mini_llm(utterance, model="gpt-4-turbo", temperature=0.3):
Expand All @@ -35,7 +35,7 @@ def mini_llm(utterance, model="gpt-4-turbo", temperature=0.3):

@retry(wait=wait_random_exponential(multiplier=1, max=40), stop=stop_after_attempt(10))
def chat_completion_request(
messages, tools=None, tool_choice=None, model=GPT_MODEL, response_format=None
messages, tools=None, tool_choice=None, model=SLOW_MODEL, response_format=None
):
response = client.chat.completions.create(
model=model,
Expand Down

0 comments on commit 394a894

Please sign in to comment.