【GPT4o対応】OpenAI API のPythonサンプルコードと出力例
今回は、OpenAI の API 利用サンプルコードをご紹介します。
OpenAI API は OpenAI純正のAPI のみならず、vLLMなど他の推論エンジンでも OpenAI 準拠のAPIサーバーが公開されており、LLMサービングAPIのデファクトとなりつつありますので、コーディングのお作法をおさえておきましょう。
OpenAI の GPT シリーズのAPIにアクセスするための、シンプルなサンプルコードは以下のようになります。生成結果をストリーミングで逐次受信してみましょう。
サンプルコード:クイックスタート
import asyncio
import os
import traceback
from openai import AsyncOpenAI
async def main() -> None:
try:
# モデル名を指定
# model="gpt-4-turbo" # $10.00/MTok for input ,$30.00/MTok for output
# model="gpt-4o" # $5.00/MTok for input ,$15.00/MTok for output
model = "gpt-3.5-turbo-0125" #
# 環境変数からAPIキーを取得
api_key = "your api key"
client = AsyncOpenAI(
api_key=api_key
)
stream = await client.chat.completions.create(
model=model,
stream=True,
messages=[
{"role": "system", "content": "あなたは誠実な日本語アシスタントです"},
{"role": "user", "content": "こんにちは"}
],
stream_options={"include_usage": True}, # usage(in,outのトークン数) を出力
)
async for chunk in stream:
print(f"chunk__{chunk}")
except Exception as e:
print(f"予期せぬエラーが発生しました: {e}\n{traceback.format_exc()}")
finally:
pass
asyncio.run(main())
出力例
ChatCompletionChunk(id='chatcmpl-9iGtdyZ43HFebZV22QOaZPIzgeStA', choices=[Choice(delta=ChoiceDelta(content='', function_call=None, role='assistant', tool_calls=None), finish_reason=None, index=0, logprobs=None)], created=1234567890, model='gpt-3.5-turbo-0125', object='chat.completion.chunk', system_fingerprint=None, usage=None)
ChatCompletionChunk(id='chatcmpl-9iGtdyZ43HFebZV22QOaZPIzgeStA', choices=[Choice(delta=ChoiceDelta(content='こんにちは', function_call=None, role=None, tool_calls=None), finish_reason=None, index=0, logprobs=None)], created=1234567890, model='gpt-3.5-turbo-0125', object='chat.completion.chunk', system_fingerprint=None, usage=None)
ChatCompletionChunk(id='chatcmpl-9iGtdyZ43HFebZV22QOaZPIzgeStA', choices=[Choice(delta=ChoiceDelta(content='!', function_call=None, role=None, tool_calls=None), finish_reason=None, index=0, logprobs=None)], created=1234567890, model='gpt-3.5-turbo-0125', object='chat.completion.chunk', system_fingerprint=None, usage=None)
ChatCompletionChunk(id='chatcmpl-9iGtdyZ43HFebZV22QOaZPIzgeStA', choices=[Choice(delta=ChoiceDelta(content='何', function_call=None, role=None, tool_calls=None), finish_reason=None, index=0, logprobs=None)], created=1234567890, model='gpt-3.5-turbo-0125', object='chat.completion.chunk', system_fingerprint=None, usage=None)
ChatCompletionChunk(id='chatcmpl-9iGtdyZ43HFebZV22QOaZPIzgeStA', choices=[Choice(delta=ChoiceDelta(content='か', function_call=None, role=None, tool_calls=None), finish_reason=None, index=0, logprobs=None)], created=1234567890, model='gpt-3.5-turbo-0125', object='chat.completion.chunk', system_fingerprint=None, usage=None)
ChatCompletionChunk(id='chatcmpl-9iGtdyZ43HFebZV22QOaZPIzgeStA', choices=[Choice(delta=ChoiceDelta(content='お', function_call=None, role=None, tool_calls=None), finish_reason=None, index=0, logprobs=None)], created=1234567890, model='gpt-3.5-turbo-0125', object='chat.completion.chunk', system_fingerprint=None, usage=None)
ChatCompletionChunk(id='chatcmpl-9iGtdyZ43HFebZV22QOaZPIzgeStA', choices=[Choice(delta=ChoiceDelta(content='手', function_call=None, role=None, tool_calls=None), finish_reason=None, index=0, logprobs=None)], created=1234567890, model='gpt-3.5-turbo-0125', object='chat.completion.chunk', system_fingerprint=None, usage=None)
ChatCompletionChunk(id='chatcmpl-9iGtdyZ43HFebZV22QOaZPIzgeStA', choices=[Choice(delta=ChoiceDelta(content='伝', function_call=None, role=None, tool_calls=None), finish_reason=None, index=0, logprobs=None)], created=1234567890, model='gpt-3.5-turbo-0125', object='chat.completion.chunk', system_fingerprint=None, usage=None)
ChatCompletionChunk(id='chatcmpl-9iGtdyZ43HFebZV22QOaZPIzgeStA', choices=[Choice(delta=ChoiceDelta(content='い', function_call=None, role=None, tool_calls=None), finish_reason=None, index=0, logprobs=None)], created=1234567890, model='gpt-3.5-turbo-0125', object='chat.completion.chunk', system_fingerprint=None, usage=None)
ChatCompletionChunk(id='chatcmpl-9iGtdyZ43HFebZV22QOaZPIzgeStA', choices=[Choice(delta=ChoiceDelta(content='で', function_call=None, role=None, tool_calls=None), finish_reason=None, index=0, logprobs=None)], created=1234567890, model='gpt-3.5-turbo-0125', object='chat.completion.chunk', system_fingerprint=None, usage=None)
ChatCompletionChunk(id='chatcmpl-9iGtdyZ43HFebZV22QOaZPIzgeStA', choices=[Choice(delta=ChoiceDelta(content='き', function_call=None, role=None, tool_calls=None), finish_reason=None, index=0, logprobs=None)], created=1234567890, model='gpt-3.5-turbo-0125', object='chat.completion.chunk', system_fingerprint=None, usage=None)
ChatCompletionChunk(id='chatcmpl-9iGtdyZ43HFebZV22QOaZPIzgeStA', choices=[Choice(delta=ChoiceDelta(content='ます', function_call=None, role=None, tool_calls=None), finish_reason=None, index=0, logprobs=None)], created=1234567890, model='gpt-3.5-turbo-0125', object='chat.completion.chunk', system_fingerprint=None, usage=None)
ChatCompletionChunk(id='chatcmpl-9iGtdyZ43HFebZV22QOaZPIzgeStA', choices=[Choice(delta=ChoiceDelta(content='か', function_call=None, role=None, tool_calls=None), finish_reason=None, index=0, logprobs=None)], created=1234567890, model='gpt-3.5-turbo-0125', object='chat.completion.chunk', system_fingerprint=None, usage=None)
ChatCompletionChunk(id='chatcmpl-9iGtdyZ43HFebZV22QOaZPIzgeStA', choices=[Choice(delta=ChoiceDelta(content='?', function_call=None, role=None, tool_calls=None), finish_reason=None, index=0, logprobs=None)], created=1234567890, model='gpt-3.5-turbo-0125', object='chat.completion.chunk', system_fingerprint=None, usage=None)
ChatCompletionChunk(id='chatcmpl-9iGtdyZ43HFebZV22QOaZPIzgeStA', choices=[Choice(delta=ChoiceDelta(content=None, function_call=None, role=None, tool_calls=None), finish_reason='stop', index=0, logprobs=None)], created=1234567890, model='gpt-3.5-turbo-0125', object='chat.completion.chunk', system_fingerprint=None, usage=None)
ChatCompletionChunk(id='chatcmpl-9iGtdyZ43HFebZV22QOaZPIzgeStA', choices=[], created=1234567890, model='gpt-3.5-turbo-0125', object='chat.completion.chunk', system_fingerprint=None, usage=CompletionUsage(completion_tokens=14, prompt_tokens=31, total_tokens=45))
サンプルコード:ストリーミングされたチャンク内容をパースする
chunkをパースして、中身の各種データを取得してみましょう
import asyncio
import traceback
from openai import AsyncOpenAI
async def main() -> None:
try:
model = "gpt-3.5-turbo-0125"
api_key = "your api key"
client = AsyncOpenAI(api_key=api_key)
stream = await client.chat.completions.create(
model=model,
stream=True,
messages=[
{"role": "system", "content": "あなたは誠実な日本語アシスタントです"},
{"role": "user", "content": "こんにちは"}
],
stream_options={"include_usage": True}, # usage を出力する
)
first_chunk = None
last_chunk = None
finish_reason = None
full_content = ""
role = None
created = None
model = None
completion_id = None
async for chunk in stream:
object_type = chunk.object
if object_type == "chat.completion.chunk":
if first_chunk is None:
# 初回チャンクのとき
first_chunk = chunk
model = first_chunk.model
created = first_chunk.created
completion_id = first_chunk.id
if chunk.choices:
first_choice = chunk.choices[0]
role = first_choice.delta.role
# 初回チャンクで取得できる情報
print(f"completion_id: {completion_id}")
print(f"created: {created}")
# 初回チャンクのみで取得できる情報
print(f"model: {model}")
print(f"role: {role}")
print("streaming text:", end="", flush=True)
last_chunk = chunk
if chunk.choices:
first_choice = chunk.choices[0]
if first_choice.delta.content:
# 今イテレーションで生成されたテキスト
delta_str = first_choice.delta.content
print(delta_str, end="", flush=True) # 生成されたテキストを逐次出力する
full_content += delta_str # 全体テキストに追記
if finish_reason is None:
finish_reason = first_choice.finish_reason
print()
if last_chunk:
# 最終チャンクのデータを処理
usage = last_chunk.usage
print(f"Full Content: {full_content}")
print(f"Finish Reason: {finish_reason}")
if usage:
print(f"ttl tokens: {usage.total_tokens}")
print(f"num input tokens:: {usage.prompt_tokens}")
print(f"num output tokens: {usage.completion_tokens}")
else:
print("Usage information not available")
except Exception as e:
print(f"予期せぬエラーが発生しました: {e}\n{traceback.format_exc()}")
finally:
pass
asyncio.run(main())
実行結果
completion_id: chatcmpl-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
created: 123456789
model: gpt-3.5-turbo-0125
role: assistant
streaming text:こんにちは!どのようにお手伝いしましょうか?
Full Content: こんにちは!どのようにお手伝いしましょうか?
Finish Reason: stop
ttl tokens: 51
num input tokens:: 31
num output tokens: 20