Refactor and enhance logging in webhook handlers and Redis client

- Translated comments and docstrings from German to English for better clarity.
- Improved logging consistency across various webhook handlers for create, delete, and update operations.
- Centralized logging functionality by utilizing a dedicated logger utility.
- Added new enums for file and XAI sync statuses in models.
- Updated Redis client factory to use a centralized logger and improved error handling.
- Enhanced API responses to include more descriptive messages and status codes.
This commit is contained in:
bsiggel
2026-03-08 21:50:34 +00:00
parent f392ec0f06
commit a0cf845877
17 changed files with 300 additions and 276 deletions

View File

@@ -7,7 +7,7 @@ Supports syncing a single employee or all employees.
import sys
from pathlib import Path
sys.path.insert(0, str(Path(__file__).parent))
from calendar_sync_utils import get_redis_client, set_employee_lock, log_operation
from calendar_sync_utils import get_redis_client, set_employee_lock, get_logger
from motia import http, ApiRequest, ApiResponse, FlowContext
@@ -38,10 +38,10 @@ async def handler(request: ApiRequest, ctx: FlowContext) -> ApiResponse:
kuerzel = body.get('kuerzel')
if not kuerzel:
return ApiResponse(
status=400,
status_code=400,
body={
'error': 'kuerzel required',
'message': 'Bitte kuerzel im Body angeben'
'message': 'Please provide kuerzel in body'
}
)
@@ -49,7 +49,7 @@ async def handler(request: ApiRequest, ctx: FlowContext) -> ApiResponse:
if kuerzel_upper == 'ALL':
# Emit sync-all event
log_operation('info', "Calendar Sync API: Emitting sync-all event", context=ctx)
ctx.logger.info("Calendar Sync API: Emitting sync-all event")
await ctx.enqueue({
"topic": "calendar_sync_all",
"data": {
@@ -57,10 +57,10 @@ async def handler(request: ApiRequest, ctx: FlowContext) -> ApiResponse:
}
})
return ApiResponse(
status=200,
status_code=200,
body={
'status': 'triggered',
'message': 'Calendar sync wurde für alle Mitarbeiter ausgelöst',
'message': 'Calendar sync triggered for all employees',
'triggered_by': 'api'
}
)
@@ -69,9 +69,9 @@ async def handler(request: ApiRequest, ctx: FlowContext) -> ApiResponse:
redis_client = get_redis_client(ctx)
if not set_employee_lock(redis_client, kuerzel_upper, 'api', ctx):
log_operation('info', f"Calendar Sync API: Sync already active for {kuerzel_upper}, skipping", context=ctx)
ctx.logger.info(f"Calendar Sync API: Sync already active for {kuerzel_upper}, skipping")
return ApiResponse(
status=409,
status_code=409,
body={
'status': 'conflict',
'message': f'Calendar sync already active for {kuerzel_upper}',
@@ -80,7 +80,7 @@ async def handler(request: ApiRequest, ctx: FlowContext) -> ApiResponse:
}
)
log_operation('info', f"Calendar Sync API called for {kuerzel_upper}", context=ctx)
ctx.logger.info(f"Calendar Sync API called for {kuerzel_upper}")
# Lock successfully set, now emit event
await ctx.enqueue({
@@ -92,19 +92,19 @@ async def handler(request: ApiRequest, ctx: FlowContext) -> ApiResponse:
})
return ApiResponse(
status=200,
status_code=200,
body={
'status': 'triggered',
'message': f'Calendar sync was triggered for {kuerzel_upper}',
'message': f'Calendar sync triggered for {kuerzel_upper}',
'kuerzel': kuerzel_upper,
'triggered_by': 'api'
}
)
except Exception as e:
log_operation('error', f"Error in API trigger: {e}", context=ctx)
ctx.logger.error(f"Error in API trigger: {e}")
return ApiResponse(
status=500,
status_code=500,
body={
'error': 'Internal server error',
'details': str(e)

View File

@@ -3,50 +3,24 @@ Calendar Sync Utilities
Shared utility functions for calendar synchronization between Google Calendar and Advoware.
"""
import logging
import asyncpg
import os
import redis
import time
from typing import Optional, Any, List
from googleapiclient.discovery import build
from google.oauth2 import service_account
# Configure logging
logger = logging.getLogger(__name__)
from services.logging_utils import get_service_logger
def log_operation(level: str, message: str, context=None, **context_vars):
"""Centralized logging with context, supporting file and console logging."""
context_str = ' '.join(f"{k}={v}" for k, v in context_vars.items() if v is not None)
full_message = f"{message} {context_str}".strip()
# Use ctx.logger if context is available (Motia III FlowContext)
if context and hasattr(context, 'logger'):
if level == 'info':
context.logger.info(full_message)
elif level == 'warning':
context.logger.warning(full_message)
elif level == 'error':
context.logger.error(full_message)
elif level == 'debug':
context.logger.debug(full_message)
else:
# Fallback to standard logger
if level == 'info':
logger.info(full_message)
elif level == 'warning':
logger.warning(full_message)
elif level == 'error':
logger.error(full_message)
elif level == 'debug':
logger.debug(full_message)
# Also log to console for journalctl visibility
print(f"[{level.upper()}] {full_message}")
def get_logger(context=None):
"""Get logger for calendar sync operations"""
return get_service_logger('calendar_sync', context)
async def connect_db(context=None):
"""Connect to Postgres DB from environment variables."""
logger = get_logger(context)
try:
conn = await asyncpg.connect(
host=os.getenv('POSTGRES_HOST', 'localhost'),
@@ -57,12 +31,13 @@ async def connect_db(context=None):
)
return conn
except Exception as e:
log_operation('error', f"Failed to connect to DB: {e}", context=context)
logger.error(f"Failed to connect to DB: {e}")
raise
async def get_google_service(context=None):
"""Initialize Google Calendar service."""
logger = get_logger(context)
try:
service_account_path = os.getenv('GOOGLE_CALENDAR_SERVICE_ACCOUNT_PATH', 'service-account.json')
if not os.path.exists(service_account_path):
@@ -75,48 +50,53 @@ async def get_google_service(context=None):
service = build('calendar', 'v3', credentials=creds)
return service
except Exception as e:
log_operation('error', f"Failed to initialize Google service: {e}", context=context)
logger.error(f"Failed to initialize Google service: {e}")
raise
def get_redis_client(context=None):
def get_redis_client(context=None) -> redis.Redis:
"""Initialize Redis client for calendar sync operations."""
logger = get_logger(context)
try:
redis_client = redis.Redis(
host=os.getenv('REDIS_HOST', 'localhost'),
port=int(os.getenv('REDIS_PORT', '6379')),
db=int(os.getenv('REDIS_DB_CALENDAR_SYNC', '2')),
socket_timeout=int(os.getenv('REDIS_TIMEOUT_SECONDS', '5'))
socket_timeout=int(os.getenv('REDIS_TIMEOUT_SECONDS', '5')),
decode_responses=True
)
return redis_client
except Exception as e:
log_operation('error', f"Failed to initialize Redis client: {e}", context=context)
logger.error(f"Failed to initialize Redis client: {e}")
raise
async def get_advoware_employees(advoware, context=None):
async def get_advoware_employees(advoware, context=None) -> List[Any]:
"""Fetch list of employees from Advoware."""
logger = get_logger(context)
try:
result = await advoware.api_call('api/v1/advonet/Mitarbeiter', method='GET', params={'aktiv': 'true'})
employees = result if isinstance(result, list) else []
log_operation('info', f"Fetched {len(employees)} Advoware employees", context=context)
logger.info(f"Fetched {len(employees)} Advoware employees")
return employees
except Exception as e:
log_operation('error', f"Failed to fetch Advoware employees: {e}", context=context)
logger.error(f"Failed to fetch Advoware employees: {e}")
raise
def set_employee_lock(redis_client, kuerzel: str, triggered_by: str, context=None) -> bool:
def set_employee_lock(redis_client: redis.Redis, kuerzel: str, triggered_by: str, context=None) -> bool:
"""Set lock for employee sync operation."""
logger = get_logger(context)
employee_lock_key = f'calendar_sync_lock_{kuerzel}'
if redis_client.set(employee_lock_key, triggered_by, ex=1800, nx=True) is None:
log_operation('info', f"Sync already active for {kuerzel}, skipping", context=context)
logger.info(f"Sync already active for {kuerzel}, skipping")
return False
return True
def clear_employee_lock(redis_client, kuerzel: str, context=None):
def clear_employee_lock(redis_client: redis.Redis, kuerzel: str, context=None) -> None:
"""Clear lock for employee sync operation and update last-synced timestamp."""
logger = get_logger(context)
try:
employee_lock_key = f'calendar_sync_lock_{kuerzel}'
employee_last_synced_key = f'calendar_sync_last_synced_{kuerzel}'
@@ -128,6 +108,6 @@ def clear_employee_lock(redis_client, kuerzel: str, context=None):
# Delete the lock
redis_client.delete(employee_lock_key)
log_operation('debug', f"Cleared lock and updated last-synced for {kuerzel} to {current_time}", context=context)
logger.debug(f"Cleared lock and updated last-synced for {kuerzel} to {current_time}")
except Exception as e:
log_operation('warning', f"Failed to clear lock and update last-synced for {kuerzel}: {e}", context=context)
logger.warning(f"Failed to clear lock and update last-synced for {kuerzel}: {e}")

View File

@@ -7,7 +7,7 @@ from motia import FlowContext, http, ApiRequest, ApiResponse
config = {
"name": "VMH Webhook Bankverbindungen Create",
"description": "Empfängt Create-Webhooks von EspoCRM für Bankverbindungen",
"description": "Receives create webhooks from EspoCRM for Bankverbindungen",
"flows": ["vmh-bankverbindungen"],
"triggers": [
http("POST", "/vmh/webhook/bankverbindungen/create")
@@ -29,7 +29,7 @@ async def handler(request: ApiRequest, ctx: FlowContext[Any]) -> ApiResponse:
ctx.logger.info(f"Payload: {json.dumps(payload, indent=2, ensure_ascii=False)}")
ctx.logger.info("=" * 80)
# Sammle alle IDs aus dem Batch
# Collect all IDs from batch
entity_ids = set()
if isinstance(payload, list):
@@ -39,7 +39,7 @@ async def handler(request: ApiRequest, ctx: FlowContext[Any]) -> ApiResponse:
elif isinstance(payload, dict) and 'id' in payload:
entity_ids.add(payload['id'])
ctx.logger.info(f"{len(entity_ids)} IDs zum Create-Sync gefunden")
ctx.logger.info(f"{len(entity_ids)} IDs found for create sync")
# Emit events
for entity_id in entity_ids:
@@ -53,11 +53,11 @@ async def handler(request: ApiRequest, ctx: FlowContext[Any]) -> ApiResponse:
}
})
ctx.logger.info("✅ VMH Create Webhook verarbeitet: "
f"{len(entity_ids)} Events emittiert")
ctx.logger.info("✅ VMH Create Webhook processed: "
f"{len(entity_ids)} events emitted")
return ApiResponse(
status=200,
status_code=200,
body={
'status': 'received',
'action': 'create',
@@ -67,10 +67,10 @@ async def handler(request: ApiRequest, ctx: FlowContext[Any]) -> ApiResponse:
except Exception as e:
ctx.logger.error("=" * 80)
ctx.logger.error("FEHLER: BANKVERBINDUNGEN CREATE WEBHOOK")
ctx.logger.error("ERROR: BANKVERBINDUNGEN CREATE WEBHOOK")
ctx.logger.error(f"Error: {e}")
ctx.logger.error("=" * 80)
return ApiResponse(
status=500,
status_code=500,
body={'error': 'Internal server error', 'details': str(e)}
)

View File

@@ -7,7 +7,7 @@ from motia import FlowContext, http, ApiRequest, ApiResponse
config = {
"name": "VMH Webhook Bankverbindungen Delete",
"description": "Empfängt Delete-Webhooks von EspoCRM für Bankverbindungen",
"description": "Receives delete webhooks from EspoCRM for Bankverbindungen",
"flows": ["vmh-bankverbindungen"],
"triggers": [
http("POST", "/vmh/webhook/bankverbindungen/delete")
@@ -29,7 +29,7 @@ async def handler(request: ApiRequest, ctx: FlowContext[Any]) -> ApiResponse:
ctx.logger.info(f"Payload: {json.dumps(payload, indent=2, ensure_ascii=False)}")
ctx.logger.info("=" * 80)
# Sammle alle IDs
# Collect all IDs
entity_ids = set()
if isinstance(payload, list):
@@ -39,7 +39,7 @@ async def handler(request: ApiRequest, ctx: FlowContext[Any]) -> ApiResponse:
elif isinstance(payload, dict) and 'id' in payload:
entity_ids.add(payload['id'])
ctx.logger.info(f"{len(entity_ids)} IDs zum Delete-Sync gefunden")
ctx.logger.info(f"{len(entity_ids)} IDs found for delete sync")
# Emit events
for entity_id in entity_ids:
@@ -53,11 +53,11 @@ async def handler(request: ApiRequest, ctx: FlowContext[Any]) -> ApiResponse:
}
})
ctx.logger.info("✅ VMH Delete Webhook verarbeitet: "
f"{len(entity_ids)} Events emittiert")
ctx.logger.info("✅ VMH Delete Webhook processed: "
f"{len(entity_ids)} events emitted")
return ApiResponse(
status=200,
status_code=200,
body={
'status': 'received',
'action': 'delete',
@@ -67,10 +67,10 @@ async def handler(request: ApiRequest, ctx: FlowContext[Any]) -> ApiResponse:
except Exception as e:
ctx.logger.error("=" * 80)
ctx.logger.error("FEHLER: BANKVERBINDUNGEN DELETE WEBHOOK")
ctx.logger.error("ERROR: BANKVERBINDUNGEN DELETE WEBHOOK")
ctx.logger.error(f"Error: {e}")
ctx.logger.error("=" * 80)
return ApiResponse(
status=500,
status_code=500,
body={'error': 'Internal server error', 'details': str(e)}
)

View File

@@ -7,7 +7,7 @@ from motia import FlowContext, http, ApiRequest, ApiResponse
config = {
"name": "VMH Webhook Bankverbindungen Update",
"description": "Empfängt Update-Webhooks von EspoCRM für Bankverbindungen",
"description": "Receives update webhooks from EspoCRM for Bankverbindungen",
"flows": ["vmh-bankverbindungen"],
"triggers": [
http("POST", "/vmh/webhook/bankverbindungen/update")
@@ -29,7 +29,7 @@ async def handler(request: ApiRequest, ctx: FlowContext[Any]) -> ApiResponse:
ctx.logger.info(f"Payload: {json.dumps(payload, indent=2, ensure_ascii=False)}")
ctx.logger.info("=" * 80)
# Sammle alle IDs
# Collect all IDs
entity_ids = set()
if isinstance(payload, list):
@@ -39,7 +39,7 @@ async def handler(request: ApiRequest, ctx: FlowContext[Any]) -> ApiResponse:
elif isinstance(payload, dict) and 'id' in payload:
entity_ids.add(payload['id'])
ctx.logger.info(f"{len(entity_ids)} IDs zum Update-Sync gefunden")
ctx.logger.info(f"{len(entity_ids)} IDs found for update sync")
# Emit events
for entity_id in entity_ids:
@@ -53,11 +53,11 @@ async def handler(request: ApiRequest, ctx: FlowContext[Any]) -> ApiResponse:
}
})
ctx.logger.info("✅ VMH Update Webhook verarbeitet: "
f"{len(entity_ids)} Events emittiert")
ctx.logger.info("✅ VMH Update Webhook processed: "
f"{len(entity_ids)} events emitted")
return ApiResponse(
status=200,
status_code=200,
body={
'status': 'received',
'action': 'update',
@@ -67,10 +67,10 @@ async def handler(request: ApiRequest, ctx: FlowContext[Any]) -> ApiResponse:
except Exception as e:
ctx.logger.error("=" * 80)
ctx.logger.error("FEHLER: BANKVERBINDUNGEN UPDATE WEBHOOK")
ctx.logger.error("ERROR: BANKVERBINDUNGEN UPDATE WEBHOOK")
ctx.logger.error(f"Error: {e}")
ctx.logger.error("=" * 80)
return ApiResponse(
status=500,
status_code=500,
body={'error': 'Internal server error', 'details': str(e)}
)

View File

@@ -7,7 +7,7 @@ from motia import FlowContext, http, ApiRequest, ApiResponse
config = {
"name": "VMH Webhook Beteiligte Create",
"description": "Empfängt Create-Webhooks von EspoCRM für Beteiligte",
"description": "Receives create webhooks from EspoCRM for Beteiligte",
"flows": ["vmh-beteiligte"],
"triggers": [
http("POST", "/vmh/webhook/beteiligte/create")
@@ -32,7 +32,7 @@ async def handler(request: ApiRequest, ctx: FlowContext[Any]) -> ApiResponse:
ctx.logger.info(f"Payload: {json.dumps(payload, indent=2, ensure_ascii=False)}")
ctx.logger.info("=" * 80)
# Sammle alle IDs aus dem Batch
# Collect all IDs from batch
entity_ids = set()
if isinstance(payload, list):
@@ -42,9 +42,9 @@ async def handler(request: ApiRequest, ctx: FlowContext[Any]) -> ApiResponse:
elif isinstance(payload, dict) and 'id' in payload:
entity_ids.add(payload['id'])
ctx.logger.info(f"{len(entity_ids)} IDs zum Create-Sync gefunden")
ctx.logger.info(f"{len(entity_ids)} IDs found for create sync")
# Emit events für Queue-Processing (Deduplizierung erfolgt im Event-Handler via Lock)
# Emit events for queue processing (deduplication via lock in event handler)
for entity_id in entity_ids:
await ctx.enqueue({
'topic': 'vmh.beteiligte.create',
@@ -56,11 +56,11 @@ async def handler(request: ApiRequest, ctx: FlowContext[Any]) -> ApiResponse:
}
})
ctx.logger.info("✅ VMH Create Webhook verarbeitet: "
f"{len(entity_ids)} Events emittiert")
ctx.logger.info("✅ VMH Create Webhook processed: "
f"{len(entity_ids)} events emitted")
return ApiResponse(
status=200,
status_code=200,
body={
'status': 'received',
'action': 'create',
@@ -70,11 +70,11 @@ async def handler(request: ApiRequest, ctx: FlowContext[Any]) -> ApiResponse:
except Exception as e:
ctx.logger.error("=" * 80)
ctx.logger.error("FEHLER: VMH CREATE WEBHOOK")
ctx.logger.error("ERROR: VMH CREATE WEBHOOK")
ctx.logger.error(f"Error: {e}")
ctx.logger.error("=" * 80)
return ApiResponse(
status=500,
status_code=500,
body={
'error': 'Internal server error',
'details': str(e)

View File

@@ -7,7 +7,7 @@ from motia import FlowContext, http, ApiRequest, ApiResponse
config = {
"name": "VMH Webhook Beteiligte Delete",
"description": "Empfängt Delete-Webhooks von EspoCRM für Beteiligte",
"description": "Receives delete webhooks from EspoCRM for Beteiligte",
"flows": ["vmh-beteiligte"],
"triggers": [
http("POST", "/vmh/webhook/beteiligte/delete")
@@ -29,7 +29,7 @@ async def handler(request: ApiRequest, ctx: FlowContext[Any]) -> ApiResponse:
ctx.logger.info(f"Payload: {json.dumps(payload, indent=2, ensure_ascii=False)}")
ctx.logger.info("=" * 80)
# Sammle alle IDs aus dem Batch
# Collect all IDs from batch
entity_ids = set()
if isinstance(payload, list):
@@ -39,9 +39,9 @@ async def handler(request: ApiRequest, ctx: FlowContext[Any]) -> ApiResponse:
elif isinstance(payload, dict) and 'id' in payload:
entity_ids.add(payload['id'])
ctx.logger.info(f"{len(entity_ids)} IDs zum Delete-Sync gefunden")
ctx.logger.info(f"{len(entity_ids)} IDs found for delete sync")
# Emit events für Queue-Processing
# Emit events for queue processing
for entity_id in entity_ids:
await ctx.enqueue({
'topic': 'vmh.beteiligte.delete',
@@ -53,11 +53,11 @@ async def handler(request: ApiRequest, ctx: FlowContext[Any]) -> ApiResponse:
}
})
ctx.logger.info("✅ VMH Delete Webhook verarbeitet: "
f"{len(entity_ids)} Events emittiert")
ctx.logger.info("✅ VMH Delete Webhook processed: "
f"{len(entity_ids)} events emitted")
return ApiResponse(
status=200,
status_code=200,
body={
'status': 'received',
'action': 'delete',
@@ -67,10 +67,10 @@ async def handler(request: ApiRequest, ctx: FlowContext[Any]) -> ApiResponse:
except Exception as e:
ctx.logger.error("=" * 80)
ctx.logger.error("FEHLER: BETEILIGTE DELETE WEBHOOK")
ctx.logger.error("ERROR: BETEILIGTE DELETE WEBHOOK")
ctx.logger.error(f"Error: {e}")
ctx.logger.error("=" * 80)
return ApiResponse(
status=500,
status_code=500,
body={'error': 'Internal server error', 'details': str(e)}
)

View File

@@ -7,7 +7,7 @@ from motia import FlowContext, http, ApiRequest, ApiResponse
config = {
"name": "VMH Webhook Beteiligte Update",
"description": "Empfängt Update-Webhooks von EspoCRM für Beteiligte",
"description": "Receives update webhooks from EspoCRM for Beteiligte",
"flows": ["vmh-beteiligte"],
"triggers": [
http("POST", "/vmh/webhook/beteiligte/update")
@@ -20,8 +20,8 @@ async def handler(request: ApiRequest, ctx: FlowContext[Any]) -> ApiResponse:
"""
Webhook handler for Beteiligte updates in EspoCRM.
Note: Loop-Prevention ist auf EspoCRM-Seite implementiert.
rowId-Updates triggern keine Webhooks mehr, daher keine Filterung nötig.
Note: Loop prevention is implemented on EspoCRM side.
rowId updates no longer trigger webhooks, so no filtering needed.
"""
try:
payload = request.body or []
@@ -32,7 +32,7 @@ async def handler(request: ApiRequest, ctx: FlowContext[Any]) -> ApiResponse:
ctx.logger.info(f"Payload: {json.dumps(payload, indent=2, ensure_ascii=False)}")
ctx.logger.info("=" * 80)
# Sammle alle IDs aus dem Batch
# Collect all IDs from batch
entity_ids = set()
if isinstance(payload, list):
@@ -42,9 +42,9 @@ async def handler(request: ApiRequest, ctx: FlowContext[Any]) -> ApiResponse:
elif isinstance(payload, dict) and 'id' in payload:
entity_ids.add(payload['id'])
ctx.logger.info(f"{len(entity_ids)} IDs zum Update-Sync gefunden")
ctx.logger.info(f"{len(entity_ids)} IDs found for update sync")
# Emit events für Queue-Processing
# Emit events for queue processing
for entity_id in entity_ids:
await ctx.enqueue({
'topic': 'vmh.beteiligte.update',
@@ -56,11 +56,11 @@ async def handler(request: ApiRequest, ctx: FlowContext[Any]) -> ApiResponse:
}
})
ctx.logger.info("✅ VMH Update Webhook verarbeitet: "
f"{len(entity_ids)} Events emittiert")
ctx.logger.info("✅ VMH Update Webhook processed: "
f"{len(entity_ids)} events emitted")
return ApiResponse(
status=200,
status_code=200,
body={
'status': 'received',
'action': 'update',
@@ -70,11 +70,11 @@ async def handler(request: ApiRequest, ctx: FlowContext[Any]) -> ApiResponse:
except Exception as e:
ctx.logger.error("=" * 80)
ctx.logger.error("FEHLER: VMH UPDATE WEBHOOK")
ctx.logger.error("ERROR: VMH UPDATE WEBHOOK")
ctx.logger.error(f"Error: {e}")
ctx.logger.error("=" * 80)
return ApiResponse(
status=500,
status_code=500,
body={
'error': 'Internal server error',
'details': str(e)

View File

@@ -30,7 +30,7 @@ async def handler(request: ApiRequest, ctx: FlowContext[Any]) -> ApiResponse:
ctx.logger.info("=" * 80)
ctx.logger.debug(f"Payload: {json.dumps(payload, indent=2, ensure_ascii=False)}")
# Sammle alle IDs aus dem Batch
# Collect all IDs from batch
entity_ids = set()
entity_type = 'CDokumente' # Default
@@ -45,9 +45,9 @@ async def handler(request: ApiRequest, ctx: FlowContext[Any]) -> ApiResponse:
entity_ids.add(payload['id'])
entity_type = payload.get('entityType', 'CDokumente')
ctx.logger.info(f"{len(entity_ids)} Document IDs zum Create-Sync gefunden")
ctx.logger.info(f"{len(entity_ids)} document IDs found for create sync")
# Emit events für Queue-Processing (Deduplizierung erfolgt im Event-Handler via Lock)
# Emit events for queue processing (deduplication via lock in event handler)
for entity_id in entity_ids:
await ctx.enqueue({
'topic': 'vmh.document.create',
@@ -59,27 +59,27 @@ async def handler(request: ApiRequest, ctx: FlowContext[Any]) -> ApiResponse:
}
})
ctx.logger.info("✅ Document Create Webhook verarbeitet: "
f"{len(entity_ids)} Events emittiert")
ctx.logger.info("✅ Document Create Webhook processed: "
f"{len(entity_ids)} events emitted")
return ApiResponse(
status=200,
status_code=200,
body={
'success': True,
'message': f'{len(entity_ids)} Document(s) zum Sync enqueued',
'message': f'{len(entity_ids)} document(s) enqueued for sync',
'entity_ids': list(entity_ids)
}
)
except Exception as e:
ctx.logger.error("=" * 80)
ctx.logger.error("FEHLER: DOCUMENT CREATE WEBHOOK")
ctx.logger.error("ERROR: DOCUMENT CREATE WEBHOOK")
ctx.logger.error(f"Error: {e}")
ctx.logger.error(f"Payload: {request.body}")
ctx.logger.error("=" * 80)
return ApiResponse(
status=500,
status_code=500,
body={
'success': False,
'error': str(e)

View File

@@ -30,7 +30,7 @@ async def handler(request: ApiRequest, ctx: FlowContext[Any]) -> ApiResponse:
ctx.logger.info("=" * 80)
ctx.logger.debug(f"Payload: {json.dumps(payload, indent=2, ensure_ascii=False)}")
# Sammle alle IDs aus dem Batch
# Collect all IDs from batch
entity_ids = set()
entity_type = 'CDokumente' # Default
@@ -45,9 +45,9 @@ async def handler(request: ApiRequest, ctx: FlowContext[Any]) -> ApiResponse:
entity_ids.add(payload['id'])
entity_type = payload.get('entityType', 'CDokumente')
ctx.logger.info(f"{len(entity_ids)} Document IDs zum Delete-Sync gefunden")
ctx.logger.info(f"{len(entity_ids)} document IDs found for delete sync")
# Emit events für Queue-Processing
# Emit events for queue processing
for entity_id in entity_ids:
await ctx.enqueue({
'topic': 'vmh.document.delete',
@@ -59,27 +59,27 @@ async def handler(request: ApiRequest, ctx: FlowContext[Any]) -> ApiResponse:
}
})
ctx.logger.info("✅ Document Delete Webhook verarbeitet: "
f"{len(entity_ids)} Events emittiert")
ctx.logger.info("✅ Document Delete Webhook processed: "
f"{len(entity_ids)} events emitted")
return ApiResponse(
status=200,
status_code=200,
body={
'success': True,
'message': f'{len(entity_ids)} Document(s) zum Delete enqueued',
'message': f'{len(entity_ids)} document(s) enqueued for deletion',
'entity_ids': list(entity_ids)
}
)
except Exception as e:
ctx.logger.error("=" * 80)
ctx.logger.error("FEHLER: DOCUMENT DELETE WEBHOOK")
ctx.logger.error("ERROR: DOCUMENT DELETE WEBHOOK")
ctx.logger.error(f"Error: {e}")
ctx.logger.error(f"Payload: {request.body}")
ctx.logger.error("=" * 80)
return ApiResponse(
status=500,
status_code=500,
body={
'success': False,
'error': str(e)

View File

@@ -30,7 +30,7 @@ async def handler(request: ApiRequest, ctx: FlowContext[Any]) -> ApiResponse:
ctx.logger.info("=" * 80)
ctx.logger.debug(f"Payload: {json.dumps(payload, indent=2, ensure_ascii=False)}")
# Sammle alle IDs aus dem Batch
# Collect all IDs from batch
entity_ids = set()
entity_type = 'CDokumente' # Default
@@ -45,9 +45,9 @@ async def handler(request: ApiRequest, ctx: FlowContext[Any]) -> ApiResponse:
entity_ids.add(payload['id'])
entity_type = payload.get('entityType', 'CDokumente')
ctx.logger.info(f"{len(entity_ids)} Document IDs zum Update-Sync gefunden")
ctx.logger.info(f"{len(entity_ids)} document IDs found for update sync")
# Emit events für Queue-Processing
# Emit events for queue processing
for entity_id in entity_ids:
await ctx.enqueue({
'topic': 'vmh.document.update',
@@ -59,27 +59,27 @@ async def handler(request: ApiRequest, ctx: FlowContext[Any]) -> ApiResponse:
}
})
ctx.logger.info("✅ Document Update Webhook verarbeitet: "
f"{len(entity_ids)} Events emittiert")
ctx.logger.info("✅ Document Update Webhook processed: "
f"{len(entity_ids)} events emitted")
return ApiResponse(
status=200,
status_code=200,
body={
'success': True,
'message': f'{len(entity_ids)} Document(s) zum Sync enqueued',
'message': f'{len(entity_ids)} document(s) enqueued for sync',
'entity_ids': list(entity_ids)
}
)
except Exception as e:
ctx.logger.error("=" * 80)
ctx.logger.error("FEHLER: DOCUMENT UPDATE WEBHOOK")
ctx.logger.error("ERROR: DOCUMENT UPDATE WEBHOOK")
ctx.logger.error(f"Error: {e}")
ctx.logger.error(f"Payload: {request.body}")
ctx.logger.error("=" * 80)
return ApiResponse(
status=500,
status_code=500,
body={
'success': False,
'error': str(e)