From 00915e675ba17789c83840d1f79d69e94f5e4449 Mon Sep 17 00:00:00 2001 From: Isaak Buslovich Date: Tue, 21 Nov 2023 08:03:43 +0100 Subject: [PATCH] Real Time response --- main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/main.py b/main.py index ec0a51c..16634cb 100644 --- a/main.py +++ b/main.py @@ -14,7 +14,7 @@ def create_arg_parser(): def get_llama_response(llm, prompt): """Get response from Llama model.""" try: - output = llm(prompt, max_tokens=60, stop=["Q:", "\n"], echo=False) + output = llm(prompt, max_tokens=60, stop=["Q:", "\n"], echo=True) return output.get('choices', [{}])[0].get('text', "No response generated.") except Exception as e: return f"Error generating response: {e}"