fix: disable autoconnect and set pool timeout on PooledPostgresqlDatabase (#80)

- Set timeout=5 so pool exhaustion surfaces as an error instead of hanging forever
- Set autoconnect=False to require explicit connection acquisition
- Add HTTP middleware in main.py to open/close connections per request

Closes #80

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
Cal Corum 2026-03-27 00:36:35 -05:00
parent fa176c9b05
commit 6637f6e9eb
2 changed files with 15 additions and 2 deletions

View File

@ -30,8 +30,8 @@ if DATABASE_TYPE.lower() == "postgresql":
port=int(os.environ.get("POSTGRES_PORT", "5432")), port=int(os.environ.get("POSTGRES_PORT", "5432")),
max_connections=20, max_connections=20,
stale_timeout=300, # 5 minutes stale_timeout=300, # 5 minutes
timeout=0, timeout=5,
autoconnect=True, autoconnect=False,
autorollback=True, # Automatically rollback failed transactions autorollback=True, # Automatically rollback failed transactions
) )
else: else:

View File

@ -8,6 +8,8 @@ from fastapi import Depends, FastAPI, Request
from fastapi.openapi.docs import get_swagger_ui_html from fastapi.openapi.docs import get_swagger_ui_html
from fastapi.openapi.utils import get_openapi from fastapi.openapi.utils import get_openapi
from .db_engine import db
# from fastapi.openapi.docs import get_swagger_ui_html # from fastapi.openapi.docs import get_swagger_ui_html
# from fastapi.openapi.utils import get_openapi # from fastapi.openapi.utils import get_openapi
@ -68,6 +70,17 @@ app = FastAPI(
) )
@app.middleware("http")
async def db_connection_middleware(request: Request, call_next):
db.connect(reuse_if_open=True)
try:
response = await call_next(request)
finally:
if not db.is_closed():
db.close()
return response
logger.info(f"Starting up now...") logger.info(f"Starting up now...")