|
@@ -1,3 +1,122 @@
|
|
|
# openai-python
|
|
|
|
|
|
-The OpenAI Python library provides convenient access to the OpenAI API from applications written in the Python language
|
|
|
+默认10分钟超时,错误会重试2次,支持文件上传,并提供由 [HTTPX](https://github.com/encode/httpx) 提供支持的同步和异步客户端。
|
|
|
+
|
|
|
+文档:https://platform.openai.com/docs
|
|
|
+
|
|
|
+## Usage
|
|
|
+
|
|
|
+
|
|
|
+hello world:
|
|
|
+```
|
|
|
+pip install openai
|
|
|
+pip install python-dotenv
|
|
|
+
|
|
|
+from openai import OpenAI
|
|
|
+
|
|
|
+client = OpenAI() # 自动从.env 获取 apikey
|
|
|
+
|
|
|
+chat_completion = client.chat.completions.create(
|
|
|
+ messages=[
|
|
|
+ {
|
|
|
+ "role": "user",
|
|
|
+ "content": "Say this is a test",
|
|
|
+ }
|
|
|
+ ],
|
|
|
+ model="gpt-3.5-turbo",
|
|
|
+)
|
|
|
+
|
|
|
+```
|
|
|
+
|
|
|
+异步:
|
|
|
+```
|
|
|
+import asyncio
|
|
|
+from openai import AsyncOpenAI
|
|
|
+client = AsyncOpenAI()
|
|
|
+
|
|
|
+async def main() -> None:
|
|
|
+ chat_completion = await client.chat.completions.create(
|
|
|
+ messages=[
|
|
|
+ {
|
|
|
+ "role": "user",
|
|
|
+ "content": "Say this is a test",
|
|
|
+ }
|
|
|
+ ],
|
|
|
+ model="gpt-3.5-turbo",
|
|
|
+ )
|
|
|
+
|
|
|
+
|
|
|
+asyncio.run(main())
|
|
|
+```
|
|
|
+
|
|
|
+流输出
|
|
|
+```
|
|
|
+from openai import OpenAI
|
|
|
+
|
|
|
+client = OpenAI()
|
|
|
+
|
|
|
+stream = client.chat.completions.create(
|
|
|
+ model="gpt-4",
|
|
|
+ messages=[{"role": "user", "content": "Say this is a test"}],
|
|
|
+ stream=True,
|
|
|
+)
|
|
|
+for part in stream:
|
|
|
+ print(part.choices[0].delta.content or "")
|
|
|
+```
|
|
|
+
|
|
|
+在notebook中这样写比较好(notebook 不方便写.env等web应用):
|
|
|
+```
|
|
|
+openai.api_key = '...'
|
|
|
+
|
|
|
+# all client options can be configured just like the `OpenAI` instantiation counterpart
|
|
|
+openai.base_url = "https://..."
|
|
|
+openai.default_headers = {"x-foo": "true"}
|
|
|
+
|
|
|
+completion = openai.chat.completions.create(
|
|
|
+ model="gpt-4",
|
|
|
+ messages=[
|
|
|
+ {
|
|
|
+ "role": "user",
|
|
|
+ "content": "How do I output all files in a directory using Python?",
|
|
|
+ },
|
|
|
+ ],
|
|
|
+)
|
|
|
+print(completion.choices[0].message.content)
|
|
|
+```
|
|
|
+
|
|
|
+文件上传
|
|
|
+```
|
|
|
+from pathlib import Path
|
|
|
+from openai import OpenAI
|
|
|
+
|
|
|
+client = OpenAI()
|
|
|
+
|
|
|
+client.files.create(
|
|
|
+ file=Path("input.jsonl"),
|
|
|
+ purpose="fine-tune",
|
|
|
+)
|
|
|
+```
|
|
|
+
|
|
|
+支持调用 AzureOpenAI,公用相同接口
|
|
|
+
|
|
|
+```
|
|
|
+from openai import AzureOpenAI
|
|
|
+
|
|
|
+# gets the API Key from environment variable AZURE_OPENAI_API_KEY
|
|
|
+client = AzureOpenAI(
|
|
|
+ api_version="2023-07-01-preview"
|
|
|
+ azure_endpoint="https://example-endpoint.openai.azure.com",
|
|
|
+)
|
|
|
+
|
|
|
+completion = client.chat.completions.create(
|
|
|
+ model="deployment-name", # e.g. gpt-35-instant
|
|
|
+ messages=[
|
|
|
+ {
|
|
|
+ "role": "user",
|
|
|
+ "content": "How do I output all files in a directory using Python?",
|
|
|
+ },
|
|
|
+ ],
|
|
|
+)
|
|
|
+print(completion.model_dump_json(indent=2))
|
|
|
+```
|
|
|
+
|