Hi,
I try to connect azure OpeanAI using Langchain framework , If Iam using turbo-16k model I gotta followinng error?
openai.error.InvalidRequestError: The completion operation does not work with the specified model, gpt-35-turbo-16k. Please choose different model and try again. You can learn more about which models can be used with each operation here: https://go.microsoft.com/fwlink/?linkid=2197993. #9038
Traceback (most recent call last):File "D:\Corent\AI\LangChain\azure\azure_connection.py", line 14, inprint(llm("Tell me joke"))^^^^^^^^^^^^^^^^^^^File "D:\Corent\AI\LangChain\azure\venv\Lib\site-packages\langchain\llms\base.py", line 802, in callself.generate(File "D:\Corent\AI\LangChain\azure\venv\Lib\site-packages\langchain\llms\base.py", line 598, in generateoutput = self._generate_helper(^^^^^^^^^^^^^^^^^^^^^^File "D:\Corent\AI\LangChain\azure\venv\Lib\site-packages\langchain\llms\base.py", line 504, in _generate_helperraise eFile "D:\Corent\AI\LangChain\azure\venv\Lib\site-packages\langchain\llms\base.py", line 491, in _generate_helperself.generate(File "D:\Corent\AI\LangChain\azure\venv\Lib\site-packages\langchain\llms\openai.py", line 384, in generateresponse = completion_with_retry(^^^^^^^^^^^^^^^^^^^^^^File "D:\Corent\AI\LangChain\azure\venv\Lib\site-packages\langchain\llms\openai.py", line 116, in completion_with_retryreturn completion_with_retry(**kwargs)^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^File "D:\Corent\AI\LangChain\azure\venv\Lib\site-packages\tenacity_init.py", line 289, in wrapped_freturn self(f, *args, **kw)^^^^^^^^^^^^^^^^^^^^File "D:\Corent\AI\LangChain\azure\venv\Lib\site-packages\tenacity_init.py", line 379, in calldo = self.iter(retry_state=retry_state)^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^File "D:\Corent\AI\LangChain\azure\venv\Lib\site-packages\tenacity_init.py", line 314, in iterreturn fut.result()^^^^^^^^^^^^File "C:\Users\donbosco\AppData\Local\Programs\Python\Python311\Lib\concurrent\futures_base.py", line 449, in resultreturn self.__get_result()^^^^^^^^^^^^^^^^^^^File "C:\Users\donbosco\AppData\Local\Programs\Python\Python311\Lib\concurrent\futures_base.py", line 401, in __get_resultraise self.exceptionFile "D:\Corent\AI\LangChain\azure\venv\Lib\site-packages\tenacity_init.py", line 382, in callresult = fn(*args, **kwargs)^^^^^^^^^^^^^^^^^^^File "D:\Corent\AI\LangChain\azure\venv\Lib\site-packages\langchain\llms\openai.py", line 114, in _completion_with_retryreturn llm.client.create(**kwargs)^^^^^^^^^^^^^^^^^^^^^^^^^^^File "D:\Corent\AI\LangChain\azure\venv\Lib\site-packages\openai\api_resources\completion.py", line 25, in createreturn super().create(*args, **kwargs)^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^File "D:\Corent\AI\LangChain\azure\venv\Lib\site-packages\openai\api_resources\abstract\engine_api_resource.py", line 153, in createresponse, _, api_key = requestor.request(^^^^^^^^^^^^^^^^^^File "D:\Corent\AI\LangChain\azure\venv\Lib\site-packages\openai\api_requestor.py", line 298, in requestresp, got_stream = self._interpret_response(result, stream)^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^File "D:\Corent\AI\LangChain\azure\venv\Lib\site-packages\openai\api_requestor.py", line 700, in _interpret_responseself._interpret_response_line(File "D:\Corent\AI\LangChain\azure\venv\Lib\site-packages\openai\api_requestor.py", line 763, in _interpret_response_lineraise self.handle_error_response(openai.error.InvalidRequestError: The completion operation does not work with the specified model, gpt-35-turbo-16k. Please choose different model and try again. You can learn more about which models can be used with each operation here: https://go.microsoft.com/fwlink/?linkid=2197993.| |
|
👀1