feat: Update CORS origins to allow Chrome extensions and improve logging in migration tool
Some checks failed
Backend CI / lint (push) Failing after 10s
Backend CI / test (push) Failing after 1m37s

This commit is contained in:
JSC
2025-09-19 16:41:11 +02:00
parent 1bef694f38
commit bccfcafe0e
9 changed files with 72 additions and 32 deletions

View File

@@ -23,7 +23,10 @@ class Settings(BaseSettings):
BACKEND_URL: str = "http://localhost:8000" # Backend base URL
# CORS Configuration
CORS_ORIGINS: list[str] = ["http://localhost:8001"] # Allowed origins for CORS
CORS_ORIGINS: list[str] = [
"http://localhost:8001", # Frontend development
"chrome-extension://*", # Chrome extensions
]
# Database Configuration
DATABASE_URL: str = "sqlite+aiosqlite:///data/soundboard.db"
@@ -37,7 +40,9 @@ class Settings(BaseSettings):
LOG_FORMAT: str = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
# JWT Configuration
JWT_SECRET_KEY: str = "your-secret-key-change-in-production" # noqa: S105 default value if none set in .env
JWT_SECRET_KEY: str = (
"your-secret-key-change-in-production" # noqa: S105 default value if none set in .env
)
JWT_ALGORITHM: str = "HS256"
JWT_ACCESS_TOKEN_EXPIRE_MINUTES: int = 15
JWT_REFRESH_TOKEN_EXPIRE_DAYS: int = 7

View File

@@ -1,11 +1,12 @@
from collections.abc import AsyncGenerator, Callable
from alembic.config import Config
from sqlalchemy.ext.asyncio import AsyncEngine, create_async_engine
from sqlmodel import SQLModel
from sqlmodel.ext.asyncio.session import AsyncSession
# Import all models to ensure SQLModel metadata discovery
import app.models # noqa: F401
from alembic import command
from app.core.config import settings
from app.core.logging import get_logger
@@ -44,8 +45,6 @@ async def init_db() -> None:
try:
logger.info("Running database migrations")
# Run Alembic migrations programmatically
from alembic import command
from alembic.config import Config
# Get the alembic config
alembic_cfg = Config("alembic.ini")

View File

@@ -598,7 +598,9 @@ class CreditService:
current_credits = user.credits
plan_credits = user.plan.credits
max_credits = user.plan.max_credits
target_credits = min(current_credits + plan_credits, max_credits)
target_credits = min(
current_credits + plan_credits, max_credits,
)
credits_added = target_credits - current_credits
stats["total_credits_added"] += credits_added
else:

View File

@@ -348,8 +348,12 @@ class SchedulerService:
# Check if task is still active and pending
if not task.is_active or task.status != TaskStatus.PENDING:
logger.warning(
"Task %s execution skipped - is_active: %s, status: %s (should be %s)",
task_id, task.is_active, task.status, TaskStatus.PENDING,
"Task %s execution skipped - is_active: %s, status: %s "
"(should be %s)",
task_id,
task.is_active,
task.status,
TaskStatus.PENDING,
)
return
@@ -364,7 +368,9 @@ class SchedulerService:
# Mark task as running
logger.info(
"Task %s starting execution (type: %s)", task_id, task.recurrence_type,
"Task %s starting execution (type: %s)",
task_id,
task.recurrence_type,
)
await repo.mark_as_running(task)
@@ -383,7 +389,8 @@ class SchedulerService:
# For CRON tasks, update execution metadata but keep PENDING
# APScheduler handles the recurring schedule automatically
logger.info(
"Task %s (CRON) executed successfully, updating metadata", task_id,
"Task %s (CRON) executed successfully, updating metadata",
task_id,
)
task.last_executed_at = datetime.now(tz=UTC)
task.executions_count += 1
@@ -392,8 +399,11 @@ class SchedulerService:
session.add(task)
await session.commit()
logger.info(
"Task %s (CRON) metadata updated, status: %s, executions: %s",
task_id, task.status, task.executions_count,
"Task %s (CRON) metadata updated, status: %s, "
"executions: %s",
task_id,
task.status,
task.executions_count,
)
else:
# For non-CRON recurring tasks, calculate next execution

View File

@@ -80,11 +80,19 @@ class TaskHandlerRegistry:
msg = f"Invalid user_id format: {user_id}"
raise TaskExecutionError(msg) from e
transaction = await self.credit_service.recharge_user_credits_auto(user_id_int)
transaction = await self.credit_service.recharge_user_credits_auto(
user_id_int,
)
if transaction:
logger.info("Recharged credits for user %s: %s credits added", user_id, transaction.amount)
logger.info(
"Recharged credits for user %s: %s credits added",
user_id,
transaction.amount,
)
else:
logger.info("No credits added for user %s (already at maximum)", user_id)
logger.info(
"No credits added for user %s (already at maximum)", user_id,
)
else:
# Recharge all users (system task)
stats = await self.credit_service.recharge_all_users_credits()