@@ -53,7 +53,7 @@ async def get_next_action(model, messages, objective, session_id):
5353 if model == "claude-3" :
5454 operation = await call_claude_3_with_ocr (messages , objective , model )
5555 return operation , None
56- operation = call_ollama_llava (model , messages )
56+ operation = call_ollama (model , messages )
5757 return operation , None
5858
5959def call_gpt_4o (messages ):
@@ -554,9 +554,9 @@ async def call_gpt_4o_labeled(messages, objective, model):
554554 traceback .print_exc ()
555555 return call_gpt_4o (messages )
556556
557- def call_ollama_llava (model , messages ):
557+ def call_ollama (model , messages ):
558558 if config .verbose :
559- print (f"[call_ollama_llava ] model { model } " )
559+ print (f"[call_ollama ] model { model } " )
560560 time .sleep (1 )
561561 try :
562562 model = config .initialize_ollama ()
@@ -575,7 +575,7 @@ def call_ollama_llava(model, messages):
575575
576576 if config .verbose :
577577 print (
578- "[call_ollama_llava ] user_prompt" ,
578+ "[call_ollama ] user_prompt" ,
579579 user_prompt ,
580580 )
581581
@@ -603,7 +603,7 @@ def call_ollama_llava(model, messages):
603603 assistant_message = {"role" : "assistant" , "content" : content }
604604 if config .verbose :
605605 print (
606- "[call_ollama_llava ] content" ,
606+ "[call_ollama ] content" ,
607607 content ,
608608 )
609609 content = json .loads (content )
@@ -629,7 +629,7 @@ def call_ollama_llava(model, messages):
629629 )
630630 if config .verbose :
631631 traceback .print_exc ()
632- return call_ollama_llava (model , messages )
632+ return call_ollama (model , messages )
633633
634634
635635async def call_claude_3_with_ocr (messages , objective , model ):
0 commit comments