from langchain_openai import AzureChatOpenAI
import asyncio
from langchain_core.messages import HumanMessage
llm = AzureChatOpenAI(
api_key="xxxx",
azure_endpoint="https://xxxxxx.openai.azure.com/",
api_version="2024-08-01-preview",
openai_api_type="azure",
azure_deployment="gpt-4o",
model_name="gpt-4o",
temperature=0,
stream=True,
stream_options={"include_usage": True},
# model_kwargs={"stream_options": {"include_usage": True}}
)
req = [HumanMessage(
content=[{'type': 'text', 'text': "what's the pic describe"},
{'type': 'image_url', 'image_url': {
"url": "https://upload.wikimedia.org/wikipedia/commons/thumb/7/70/Snow_Man.jpg/500px-Snow_Man.jpg"}}])]
req_2 = [HumanMessage(content=[{'type': 'text', 'text': 'tell me a joke'}])]
async def fetch_joke():
async for event in llm.astream_events(req, version="v2"):
if event["event"] == "on_chat_model_end":
print(f'Token usage: {event["data"]["output"].usage_metadata}\n')
elif event["event"] == "on_chat_model_stream":
chunk = event["data"]["chunk"]
print(chunk)
else:
pass
asyncio.run(fetch_joke())
when chat with image, it occured a error
but no image is ok