Note: snippets for handling concurrent api requests
from fastapi import FastAPI
import asyncio
app = FastAPI()
async def fetch_data(url):
# Simulate fetching data from an API
await asyncio.sleep(1)
return f"Data from {url}"
@app.get("/concurrent")
async def concurrent_requests():
urls = ["api1", "api2", "api3"] # List of API endpoints to fetch data from
tasks = [fetch_data(url) for url in urls]
results = await asyncio.gather(*tasks)
return results
# Run the FastAPI app
import uvicorn
uvicorn.run(app, host="127.0.0.1", port=8000)
Note: snippets for handling common oai api request scenarios
# two intermediate snippets which we'll stow for later reference
def get_openai_generator(prompt: str):
openai_stream = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": prompt}],
temperature=0.0,
stream=True,
)
for event in openai_stream:
if "content" in event["choices"][0].delta:
current_response = event["choices"][0].delta.content
yield "data: " + current_response + "\n\n"
@app.get('/stream')
async def stream():
return StreamingResponse(get_openai_generator(prompt), media_type='text/event-stream')
@qa.post("/single")
@authorized
def single():
req_data = request.get_json()
query, context = req_data["query"]
ctx = req_data["context"]
url = ctx if is_url(ctx) else ""
try:
ref_text_meta = (
DocumentMetaDTO(**requests.get(url.replace("txt", "json")).json())
or ""
)
p = prompter.construct_prompt(q, ref_text_meta)
ctx = RFCRetriever(url=url.replace('txt', 'html')).load()
response = oaiservice.client.chat.completions.create(
model='gpt-4-1106-preview',
messages=prompter.construct_message(p, ctx),
)
return response.json()
except requests.exceptions.RequestException as e:
return {
"message": jsonify({'error': e})
}