Hi Gabriel Henrique Medeiros Santos
I referred the code available as reference here to replicate the issue,
Used Gpt-4o-mini model /Brazil South as part of trial and tested both Api_version="2024-08-01-preview", and API version: 2025-04-01-preview
I am able to procure the view the Sinewave image in the end.
Suggestion based on trials
Please make sure that you are using Supported models as mentioned here.
Please test with API version api_version="2024-08-01-preview" to see if the issue is specific to API version
Please share deployment name and image link in private message if the issue persists.
Please test with another available region.
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Azure OpenAI Assistants API (Python SDK) – Minimal, clean, end-to-end example.
Flow:
1) Create assistant (with Code Interpreter tool)
2) Create thread
3) Add a user message
4) Create & poll a run until terminal state
5) List thread messages, print assistant text, and save any image output
Requirements:
- pip install openai
- Environment variables:
AZURE_OPENAI_API_KEY -> your Azure OpenAI key
AZURE_OPENAI_ENDPOINT -> e.g. https://<resource>.openai.azure.com
- Replace `MODEL_DEPLOYMENT_NAME` with your deployed model name (e.g., "gpt-4o-mini")
Notes:
- Messages list is returned newest-first; assistant reply is typically at index 0.
- Code Interpreter outputs can include files (images). This sample saves the first image.
"""
import os
import json
import time
from typing import Optional
from openai import AzureOpenAI
from PIL import Image # pillow is optional, used here to open image after download
# ---------- Configuration ----------
API_KEY = os.getenv("AZURE_OPENAI_API_KEY")
AZURE_ENDPOINT = os.getenv("AZURE_OPENAI_ENDPOINT")
API_VERSION = "2025-04-01-preview" # use the Preview that matches your resource
MODEL_DEPLOYMENT_NAME = "gpt-4o-mini" # <-- change to your deployment name
if not API_KEY or not AZURE_ENDPOINT:
raise RuntimeError("Please set AZURE_OPENAI_API_KEY and AZURE_OPENAI_ENDPOINT env vars.")
client = AzureOpenAI(
api_key=API_KEY,
api_version=API_VERSION,
azure_endpoint=AZURE_ENDPOINT,
)
# ---------- 1) Create an Assistant ----------
def create_assistant() -> str:
print("[*] Creating assistant with Code Interpreter…")
assistant = client.beta.assistants.create(
name="Data Visualization",
instructions=(
"You are a helpful AI assistant who makes interesting visualizations based on data. "
"You have access to a sandboxed environment for writing and testing code. "
"When you are asked to create a visualization you should follow these steps:\n"
"1. Write the code.\n"
"2. Anytime you write new code display a preview of the code to show your work.\n"
"3. Run the code to confirm that it runs.\n"
"4. If the code is successful display the visualization.\n"
"5. If the code is unsuccessful display the error message and try to revise the code and rerun."
),
tools=[{"type": "code_interpreter"}],
model=MODEL_DEPLOYMENT_NAME, # must be your deployment name, not a plain model family
)
print(f"[+] Assistant created: {assistant.id}")
return assistant.id
# ---------- 2) Create a Thread ----------
def create_thread() -> str:
print("[*] Creating thread…")
thread = client.beta.threads.create()
print(f"[+] Thread created: {thread.id}")
return thread.id
# ---------- 3) Add a user message ----------
def add_user_message(thread_id: str, text: str) -> None:
print(f"[*] Adding user message: {text}")
client.beta.threads.messages.create(
thread_id=thread_id,
role="user",
content=[{"type": "text", "text": text}],
)
# ---------- 4) Create & poll a run ----------
TERMINAL_STATES = {"completed", "failed", "cancelled", "expired"}
def create_run(thread_id: str, assistant_id: str) -> str:
print("[*] Creating run…")
run = client.beta.threads.runs.create(
thread_id=thread_id,
assistant_id=assistant_id,
# Optional: set limits if needed (ensure non-zero to avoid empty output)
# max_prompt_tokens=4000,
# max_completion_tokens=1024,
)
print(f"[+] Run created: {run.id}")
return run.id
def poll_run(thread_id: str, run_id: str, poll_interval: float = 2.0) -> str:
print("[*] Polling run status… (Ctrl+C to stop)")
start = time.time()
while True:
run = client.beta.threads.runs.retrieve(thread_id=thread_id, run_id=run_id)
status = run.status
elapsed = time.time() - start
print(f" - Status: {status} | Elapsed: {int(elapsed // 60)}m {int(elapsed % 60)}s")
# Handle requires_action (tools/functions). If you see this, you must submit tool outputs.
if status == "requires_action":
print("[!] Run requires_action. This sample does not implement tool output submission.")
print(" Provide tool outputs via `submit_tool_outputs` and continue polling.")
# break or continue as needed; here we continue polling to show status.
time.sleep(poll_interval)
continue
if status in TERMINAL_STATES:
print(f"[+] Run reached terminal state: {status}")
return status
time.sleep(poll_interval)
# ---------- 5) Retrieve messages & save any image from Code Interpreter ----------
def list_messages(thread_id: str):
print("[*] Listing messages (newest-first)…")
messages = client.beta.threads.messages.list(thread_id=thread_id)
return messages
def find_first_assistant_text(messages) -> Optional[str]:
"""
Return the first assistant text block (if present) from newest-first messages.
"""
try:
for msg in messages.data:
if msg.role == "assistant":
# Each message may have multiple content items; pick the first text item.
for item in msg.content:
if item.get("type") == "text" and "text" in item:
return item["text"].get("value")
except Exception as e:
print(f"[!] Failed to parse assistant text: {e}")
return None
def save_first_code_interpreter_image(messages, out_path="sinewave.png") -> Optional[str]:
"""
Scan newest-first messages for the first code-interpreter image and save it.
Returns the local file path if saved; otherwise None.
"""
try:
for msg in messages.data:
if msg.role != "assistant":
continue
for item in msg.content:
# Code Interpreter image payload shape:
# { "type": "image_file", "image_file": { "file_id": "<id>" } }
if item.get("type") == "image_file":
image_file_id = item["image_file"]["file_id"]
print(f"[*] Found image file id: {image_file_id}")
content = client.files.content(image_file_id)
content.write_to_file(out_path)
print(f"[+] Image saved to: {out_path}")
return out_path
except Exception as e:
print(f"[!] No image saved (parsing or download error): {e}")
return None
def show_image(path: str) -> None:
try:
img = Image.open(path)
img.show()
print(f"[+] Opened image viewer for: {path}")
except Exception as e:
print(f"[!] Could not open image viewer: {e}")
# ---------- Main ----------
def main():
assistant_id = create_assistant()
thread_id = create_thread()
# User request to create a visualization
add_user_message(thread_id, "Create a visualization of a sinewave")
run_id = create_run(thread_id, assistant_id)
status = poll_run(thread_id, run_id, poll_interval=2.0)
# Fetch and display messages
messages = list_messages(thread_id)
# Print assistant text if present
text = find_first_assistant_text(messages)
if text:
print("\n===== Assistant Reply (text) =====\n")
print(text)
print("\n==================================\n")
else:
print("[!] No assistant text found in thread messages.")
# Try saving first image produced by Code Interpreter
image_path = save_first_code_interpreter_image(messages, out_path="sinewave.png")
if image_path:
# Optional: open the image (platform-dependent viewer)
show_image(image_path)
# For debugging: print raw messages JSON (indent for readability)
try:
print("\n===== Raw Messages (JSON) =====\n")
print(messages.model_dump_json(indent=2))
except Exception as e:
print(f"[!] Could not dump messages JSON: {e}")
if __name__ == "__main__":
Looking forward to hearing from you.
Thank you.