Real Time response

This commit is contained in:
Isaak Buslovich 2023-11-21 08:03:43 +01:00
parent 27b9d53516
commit 00915e675b

View File

@ -14,7 +14,7 @@ def create_arg_parser():
def get_llama_response(llm, prompt):
"""Get response from Llama model."""
try:
output = llm(prompt, max_tokens=60, stop=["Q:", "\n"], echo=False)
output = llm(prompt, max_tokens=60, stop=["Q:", "\n"], echo=True)
return output.get('choices', [{}])[0].get('text', "No response generated.")
except Exception as e:
return f"Error generating response: {e}"