from openai import OpenAIfrom typing import List, OptionalMODEL_NAME="gpt-5"# Tools that will be passed to every model invocation. They are defined once so# that the configuration lives in a single place.TOOLS= [ {"type": "custom","name": "code_exec_python","description": "Executes python code", }, {"type": "custom","name": "code_exec_cpp","description": "Executes c++ code", }, {"type": "custom","name": "code_exec_java","description": "Executes java code", },]client = OpenAI()defcreate_response( input_messages: List[dict], previous_response_id: Optional[str] =None,):"""Wrapper around ``client.responses.create``. Parameters ---------- input_messages: List[dict] The running conversation history to feed to the model. previous_response_id: str | None Pass the ``response.id`` from the *previous* call so the model can keep the thread of the conversation. Omit on the very first request. """ kwargs = {"model": MODEL_NAME,"input": input_messages,"text": {"format": {"type": "text"}},"tools": TOOLS, }if previous_response_id: kwargs["previous_response_id"] = previous_response_idreturn client.responses.create(**kwargs)# Recursive defrun_conversation( input_messages: List[dict], previous_response_id: Optional[str] =None,): response = create_response(input_messages, previous_response_id)# ``response.output`` is expected to be a list where element 0 is the model# message. Element 1 (if present) denotes a tool call. When the model is# done with tool calls, that element is omitted. tool_call = response.output[1] iflen(response.output) >1elseNoneif tool_call and tool_call.type =="custom_tool_call":print("--- tool name ---")print(tool_call.name)print("--- tool call argument (generated code) ---")print(tool_call.input)# Add a synthetic *tool result* so the model can continue the thread. input_messages.append( {"type": "function_call_output","call_id": tool_call.call_id,"output": "done", # <-- replace with the result of the tool call } )# Recurse with updated conversation and track the response id so the# model is aware of the prior turn.return run_conversation(input_messages, previous_response_id=response.id)else:# Base-case: no further tool call - return. returnprompt ="""Write code to sort the array of numbers in three languages: C++, Python and Java (10 times each)using code_exec functions.ALWAYS CALL THESE THREE FUNCTIONS EXACTLY ONCE: code_exec_python, code_exec_cpp and code_exec_java tools to sort the array in each language. Stop once you've called these three functions in each language once.Print only the time it takes to sort the array in milliseconds. [448, 986, 255, 884, 632, 623, 246, 439, 936, 925, 644, 159, 777, 986, 706, 723, 534, 862, 195, 686, 846, 880, 970, 276, 613, 736, 329, 622, 870, 284, 945, 708, 267, 327, 678, 807, 687, 890, 907, 645, 364, 333, 385, 262, 730, 603, 945, 358, 923, 930, 761, 504, 870, 561, 517, 928, 994, 949, 233, 137, 670, 555, 149, 870, 997, 809, 180, 498, 914, 508, 411, 378, 394, 368, 766, 486, 757, 319, 338, 159, 585, 934, 654, 194, 542, 188, 934, 163, 889, 736, 792, 737, 667, 772, 198, 971, 459, 402, 989, 949]"""# Initial developer message.messages = [ {"role": "developer","content": prompt, }]run_conversation(messages)