feat: Add Advoware History and Watcher services for document synchronization

- Implement AdvowareHistoryService for fetching and creating history entries.
- Implement AdvowareWatcherService for file operations including listing, downloading, and uploading with Blake3 hash verification.
- Introduce Blake3 utility functions for hash computation and verification.
- Create document sync cron step to poll Redis for pending Aktennummern and emit sync events.
- Develop document sync event handler to manage 3-way merge synchronization for Akten, including metadata updates and error handling.
This commit is contained in:
bsiggel
2026-03-25 21:24:31 +00:00
parent 3c4c1dc852
commit 1ffc37b0b7
11 changed files with 2147 additions and 10 deletions

View File

@@ -0,0 +1,237 @@
"""
Advoware Document Sync - Cron Poller
Polls Redis SET for pending Aktennummern every 10 seconds.
Filters by Akte status and emits sync events.
Flow:
1. SPOP from advoware:pending_aktennummern
2. SADD to advoware:processing_aktennummern
3. Validate Akte status in EspoCRM
4. Emit event if status valid
5. Remove from processing if invalid
"""
from typing import Dict, Any
from motia import FlowContext, cron
config = {
"name": "Advoware Document Sync - Cron Poller",
"description": "Poll Redis for pending Aktennummern and emit sync events",
"flows": ["advoware-document-sync"],
"triggers": [cron("*/10 * * * * *")], # Every 10 seconds
"enqueues": ["advoware.document.sync"],
}
async def handler(input_data: None, ctx: FlowContext) -> None:
"""
Poll Redis and emit sync events.
Flow:
1. SPOP from advoware:pending_aktennummern
2. SADD to advoware:processing_aktennummern
3. Validate Akte status in EspoCRM
4. Emit event if status valid
5. Remove from processing if invalid
"""
ctx.logger.info("=" * 80)
ctx.logger.info("🔍 Polling Redis for pending Aktennummern")
from services.redis_client import get_redis_client
from services.espocrm import EspoCRMAPI
redis_client = get_redis_client(strict=False)
if not redis_client:
ctx.logger.error("❌ Redis unavailable - cannot poll")
ctx.logger.info("=" * 80)
return
espocrm = EspoCRMAPI(ctx)
try:
import time
# Debounce-Zeit: 10 Sekunden
debounce_seconds = 10
cutoff_time = time.time() - debounce_seconds
# Check queue sizes BEFORE poll (Sorted Set = ZCARD)
pending_count = redis_client.zcard("advoware:pending_aktennummern")
processing_count = redis_client.scard("advoware:processing_aktennummern")
ctx.logger.info(f"📊 Queue Status:")
ctx.logger.info(f" • Pending: {pending_count} Aktennummern (Sorted Set)")
ctx.logger.info(f" • Processing: {processing_count} Aktennummern (Set)")
ctx.logger.info(f" • Debounce: {debounce_seconds} seconds")
# Poll Redis Sorted Set: Hole Einträge älter als 10 Sekunden
# ZRANGEBYSCORE: Return members with score between min and max (timestamp)
old_entries = redis_client.zrangebyscore(
"advoware:pending_aktennummern",
min=0, # Älteste möglich
max=cutoff_time, # Maximal cutoff_time (vor 10 Sekunden)
start=0,
num=1 # Nur 1 Eintrag pro Iteration
)
if not old_entries or len(old_entries) == 0:
# Entweder Queue leer ODER alle Einträge sind zu neu (<10 Sekunden)
if pending_count > 0:
ctx.logger.info(f"⏸️ {pending_count} Aktennummern in queue, but all too recent (< {debounce_seconds}s)")
ctx.logger.info(f" Waiting for debounce window to pass...")
else:
ctx.logger.info("✓ No pending Aktennummern (queue is empty)")
ctx.logger.info("=" * 80)
return
# Aktennummer gefunden (≥10 Sekunden alt)
aktennr = old_entries[0]
# Decode if bytes
if isinstance(aktennr, bytes):
aktennr = aktennr.decode('utf-8')
# Hole den Timestamp des Eintrags
score = redis_client.zscore("advoware:pending_aktennummern", aktennr)
age_seconds = time.time() - score if score else 0
# Entferne aus Sorted Set
redis_client.zrem("advoware:pending_aktennummern", aktennr)
ctx.logger.info(f"")
ctx.logger.info(f"📋 Processing Aktennummer: {aktennr}")
ctx.logger.info(f" ├─ First Event: {age_seconds:.1f} seconds ago")
ctx.logger.info(f" ├─ Debounced: ✅ (waited {debounce_seconds}s)")
ctx.logger.info(f" └─ Removed from pending queue")
ctx.logger.info(f" ├─ Source: Redis SET 'advoware:pending_aktennummern'")
ctx.logger.info(f" ├─ Action: Moved to 'advoware:processing_aktennummern'")
ctx.logger.info(f" └─ Next: Validate Akte status in EspoCRM")
# Move to processing SET
redis_client.sadd("advoware:processing_aktennummern", aktennr)
ctx.logger.info(f"✓ Moved to processing queue")
# Validate Akte status in EspoCRM
ctx.logger.info(f"")
ctx.logger.info(f"🔍 Looking up Akte in EspoCRM...")
try:
# Search for Akte by aktennummer
result = await espocrm.list_entities(
'CAdvowareAkten',
where=[{
'type': 'equals',
'attribute': 'aktennummer',
'value': aktennr
}],
max_size=1
)
if not result or not result.get('list') or len(result['list']) == 0:
ctx.logger.warn(f"")
ctx.logger.warn(f"⚠️ REJECTED: Akte {aktennr} not found in EspoCRM")
ctx.logger.warn(f" Reason: No CAdvowareAkten entity with aktennummer={aktennr}")
ctx.logger.warn(f" Action: Removed from processing queue")
ctx.logger.warn(f" Impact: Will not be synced until re-added to Redis")
redis_client.srem("advoware:processing_aktennummern", aktennr)
return
akte = result['list'][0]
akte_id = akte.get('id', '')
advoware_id = akte.get('advowareId', 'N/A')
aktivierungsstatus = akte.get('aktivierungsstatus', 'N/A') # Feldname kleingeschrieben!
ctx.logger.info(f"✓ Akte found in EspoCRM:")
ctx.logger.info(f" ├─ EspoCRM ID: {akte_id}")
ctx.logger.info(f" ├─ Advoware ID: {advoware_id}")
ctx.logger.info(f" ├─ Aktivierungsstatus RAW: '{aktivierungsstatus}' (type: {type(aktivierungsstatus).__name__})")
ctx.logger.info(f" └─ All akte fields: {list(akte.keys())[:10]}...") # Debug: Zeige Feldnamen
# Valid statuses: import, neu, aktiv (case-insensitive)
# EspoCRM liefert kleingeschriebene Werte!
valid_statuses = ['import', 'neu', 'aktiv']
aktivierungsstatus_lower = str(aktivierungsstatus).lower().strip()
ctx.logger.info(f"🔍 Status validation:")
ctx.logger.info(f" ├─ Aktivierungsstatus: '{aktivierungsstatus}'")
ctx.logger.info(f" ├─ Aktivierungsstatus (lowercase): '{aktivierungsstatus_lower}'")
ctx.logger.info(f" ├─ Valid statuses: {valid_statuses}")
ctx.logger.info(f" └─ Is valid? {aktivierungsstatus_lower in valid_statuses}")
if aktivierungsstatus_lower not in valid_statuses:
ctx.logger.warn(f"")
ctx.logger.warn(f"⚠️ REJECTED: Akte {aktennr} has invalid aktivierungsstatus")
ctx.logger.warn(f" Current Aktivierungsstatus: '{aktivierungsstatus}' (lowercased: '{aktivierungsstatus_lower}')")
ctx.logger.warn(f" Valid Statuses: {valid_statuses}")
ctx.logger.warn(f" Reason: Only active Akten are synced")
ctx.logger.warn(f" Action: Removed from processing queue")
redis_client.srem("advoware:processing_aktennummern", aktennr)
return
ctx.logger.info(f"")
ctx.logger.info(f"✅ ACCEPTED: Akte {aktennr} is valid for sync")
ctx.logger.info(f" Aktivierungsstatus: {aktivierungsstatus} (valid)")
ctx.logger.info(f" Action: Emitting sync event to queue")
# Emit sync event
ctx.logger.info(f"📤 Emitting event to topic 'advoware.document.sync'...")
await ctx.enqueue({
'topic': 'advoware.document.sync',
'data': {
'aktennummer': aktennr,
'akte_id': akte_id,
'aktivierungsstatus': aktivierungsstatus # FIXED: war 'status'
}
})
ctx.logger.info(f"✅ Event emitted successfully")
ctx.logger.info(f"")
ctx.logger.info(f"🚀 Sync event emitted successfully")
ctx.logger.info(f" Topic: advoware.document.sync")
ctx.logger.info(f" Payload: aktennummer={aktennr}, akte_id={akte_id}, aktivierungsstatus={aktivierungsstatus}")
ctx.logger.info(f" Next: Event handler will process sync")
except Exception as e:
ctx.logger.error(f"")
ctx.logger.error(f"❌ ERROR: Failed to process {aktennr}")
ctx.logger.error(f" Error Type: {type(e).__name__}")
ctx.logger.error(f" Error Message: {str(e)}")
ctx.logger.error(f" Traceback: ", exc_info=True) # Full traceback
ctx.logger.error(f" Action: Moving back to pending queue for retry")
# Move back to pending Sorted Set for retry
# Set timestamp to NOW so it gets retried immediately (no debounce on retry)
retry_timestamp = time.time()
redis_client.zadd(
"advoware:pending_aktennummern",
{aktennr: retry_timestamp}
)
ctx.logger.info(f"✓ Moved {aktennr} back to pending queue (timestamp: now)")
raise
except Exception as e:
ctx.logger.error(f"")
ctx.logger.error(f"❌ CRON POLLER ERROR (non-fatal)")
ctx.logger.error(f" Error Type: {type(e).__name__}")
ctx.logger.error(f" Error Message: {str(e)}")
ctx.logger.error(f" Traceback: ", exc_info=True) # Full traceback
ctx.logger.error(f" Impact: This iteration failed, will retry in next cycle")
# Don't raise - let next cron iteration retry
finally:
# Final queue status
try:
pending_final = redis_client.zcard("advoware:pending_aktennummern")
processing_final = redis_client.scard("advoware:processing_aktennummern")
ctx.logger.info(f"")
ctx.logger.info(f"📊 Final Queue Status:")
ctx.logger.info(f" • Pending: {pending_final} Aktennummern")
ctx.logger.info(f" • Processing: {processing_final} Aktennummern")
except:
pass
ctx.logger.info("=" * 80)

View File

@@ -0,0 +1,412 @@
"""
Advoware Document Sync - Event Handler
Executes 3-way merge sync for one Akte.
PER-AKTE LOCK: Allows parallel syncs of different Akten.
Flow:
1. Acquire per-Akte lock (key: advoware_document_sync:akte:{aktennr})
2. Fetch data: EspoCRM docs + Windows files + Advoware history
3. Cleanup file list (filter by History)
4. 3-Way merge per file
5. Sync metadata (always)
6. Check Akte ablage status
7. Update sync status
8. Redis: SREM processing (success) or ZADD to pending Sorted Set (error)
9. Release per-Akte lock (always in finally)
PARALLEL EXECUTION: Multiple Akten can sync simultaneously.
LOCK SCOPE: Only prevents the same Akte from syncing twice at once.
"""
from typing import Dict, Any
from datetime import datetime
from motia import FlowContext, queue
config = {
"name": "Advoware Document Sync - Event Handler",
"description": "Execute 3-way merge sync for Akte",
"flows": ["advoware-document-sync"],
"triggers": [queue("advoware.document.sync")],
"enqueues": [],
}
async def handler(event_data: Dict[str, Any], ctx: FlowContext) -> None:
"""
Execute sync with GLOBAL lock.
Flow:
1. Acquire GLOBAL lock (key: advoware_document_sync_global)
2. Fetch data: EspoCRM docs + Windows files + Advoware history
3. Cleanup file list
4. 3-Way merge per file
5. Sync metadata (always)
6. Check Akte ablage status
7. Update sync status
8. Redis: SREM processing (success) or SMOVE to pending (error)
9. Release GLOBAL lock (always in finally)
"""
aktennummer = event_data.get('aktennummer')
akte_id = event_data.get('akte_id')
status = event_data.get('status', 'Unknown')
ctx.logger.info("=" * 80)
ctx.logger.info(f"🔄 DOCUMENT SYNC STARTED")
ctx.logger.info(f"=" * 80)
ctx.logger.info(f"📋 Akte Details:")
ctx.logger.info(f" ├─ Aktennummer: {aktennummer}")
ctx.logger.info(f" ├─ EspoCRM ID: {akte_id}")
ctx.logger.info(f" ├─ Status: {status}")
ctx.logger.info(f" └─ Triggered: Via cron poller")
ctx.logger.info(f"")
ctx.logger.info(f"🚀 Parallelization: This Akte syncs independently")
ctx.logger.info(f" Other Akten can sync at the same time!")
ctx.logger.info("")
from services.redis_client import get_redis_client
from services.espocrm import EspoCRMAPI
from services.advoware_watcher_service import AdvowareWatcherService
from services.advoware_history_service import AdvowareHistoryService
from services.advoware_service import AdvowareService
from services.advoware_document_sync_utils import AdvowareDocumentSyncUtils
from services.blake3_utils import compute_blake3
redis_client = get_redis_client(strict=False)
lock_acquired = False
lock_key = f"advoware_document_sync:akte:{aktennummer}" # Per-Akte lock
if not redis_client:
ctx.logger.error("❌ Redis unavailable, cannot acquire lock")
return
try:
# 1. PER-AKTE LOCK (allows parallel syncs of different Akten)
ctx.logger.info(f"🔐 Attempting to acquire lock for Akte {aktennummer}...")
lock_acquired = redis_client.set(lock_key, f"sync_{datetime.now().isoformat()}", nx=True, ex=1800)
if not lock_acquired:
current_holder = redis_client.get(lock_key)
ctx.logger.warn(f"")
ctx.logger.warn(f"⏸️ Lock busy for Akte {aktennummer}")
ctx.logger.warn(f" Lock Key: {lock_key}")
ctx.logger.warn(f" Current Holder: {current_holder}")
ctx.logger.warn(f" Action: Requeueing (Motia will retry)")
raise RuntimeError(f"Lock busy for Akte {aktennummer}, retry later")
ctx.logger.info(f"✅ Lock acquired for Akte {aktennummer}")
ctx.logger.info(f" Lock Key: {lock_key}")
ctx.logger.info(f" TTL: 30 minutes")
ctx.logger.info(f" Scope: Only this Akte is locked (other Akten can sync in parallel)")
# 2. Initialize services
espocrm = EspoCRMAPI(ctx)
watcher = AdvowareWatcherService(ctx)
history_service = AdvowareHistoryService(ctx)
advoware_service = AdvowareService(ctx)
sync_utils = AdvowareDocumentSyncUtils(ctx)
# 3. Fetch data
ctx.logger.info("📥 Fetching data...")
# Get Akte from EspoCRM
akte = await espocrm.get_entity('CAdvowareAkten', akte_id)
if not akte:
ctx.logger.error(f"❌ Akte {akte_id} not found in EspoCRM")
redis_client.srem("advoware:processing_aktennummern", aktennummer)
return
# Die Aktennummer IST die Advoware-ID
advoware_id = aktennummer
ctx.logger.info(f"📋 Using Aktennummer as Advoware-ID: {advoware_id}")
# Get linked documents from EspoCRM
espo_docs_result = await espocrm.list_related(
'CAdvowareAkten',
akte_id,
'dokumentes'
)
espo_docs = espo_docs_result.get('list', [])
# Get Windows file list
try:
windows_files = await watcher.get_akte_files(aktennummer)
except Exception as e:
ctx.logger.error(f"❌ Failed to fetch Windows files: {e}")
windows_files = []
# Get Advoware History
try:
advo_history = await history_service.get_akte_history(advoware_id)
except Exception as e:
ctx.logger.error(f"❌ Failed to fetch Advoware History: {e}")
advo_history = []
ctx.logger.info(f"📊 Data fetched:")
ctx.logger.info(f" - {len(espo_docs)} EspoCRM docs")
ctx.logger.info(f" - {len(windows_files)} Windows files")
ctx.logger.info(f" - {len(advo_history)} History entries")
# 4. Cleanup file list (filter by History)
windows_files = sync_utils.cleanup_file_list(windows_files, advo_history)
ctx.logger.info(f"🧹 After cleanup: {len(windows_files)} Windows files with History")
# 5. Build file mapping for 3-way merge
# Create lookup dicts by full path (History uses full path, Windows also has full path)
espo_docs_by_name = {doc.get('name', '').lower(): doc for doc in espo_docs}
windows_files_by_path = {f.get('path', '').lower(): f for f in windows_files}
history_by_path = {}
for entry in advo_history:
datei = entry.get('datei', '')
if datei:
history_by_path[datei.lower()] = entry
# Get all unique file paths (Windows files already filtered by cleanup)
all_paths = set(windows_files_by_path.keys())
ctx.logger.info(f"📋 Total unique files: {len(all_paths)}")
# 6. 3-Way merge per file
sync_results = {
'created': 0,
'uploaded': 0,
'updated': 0,
'skipped': 0,
'errors': 0
}
for file_path in all_paths:
# Extract filename for display and EspoCRM lookup
filename = file_path.split('\\')[-1]
ctx.logger.info(f"\n{'='*80}")
ctx.logger.info(f"Processing: {filename}")
ctx.logger.info(f"{'='*80}")
espo_doc = espo_docs_by_name.get(filename.lower())
windows_file = windows_files_by_path.get(file_path)
history_entry = history_by_path.get(file_path)
try:
# Perform 3-way merge
action = sync_utils.merge_three_way(espo_doc, windows_file, history_entry)
ctx.logger.info(f"📊 Merge decision:")
ctx.logger.info(f" Action: {action.action}")
ctx.logger.info(f" Reason: {action.reason}")
ctx.logger.info(f" Source: {action.source}")
# Execute action
if action.action == 'SKIP':
ctx.logger.info(f"⏭️ Skipping {filename}")
sync_results['skipped'] += 1
elif action.action == 'CREATE':
# Download from Windows and create in EspoCRM
ctx.logger.info(f"📥 Downloading {filename} from Windows...")
content = await watcher.download_file(aktennummer, windows_file.get('relative_path', filename))
# Compute Blake3 hash
blake3_hash = compute_blake3(content)
# Determine MIME type
import mimetypes
mime_type, _ = mimetypes.guess_type(filename)
if not mime_type:
mime_type = 'application/octet-stream'
# Step 1: Upload attachment for File field
ctx.logger.info(f"📤 Uploading attachment (Step 1/2)...")
try:
attachment = await espocrm.upload_attachment_for_file_field(
file_content=content,
filename=filename,
related_type='CDokumente',
field='dokument',
mime_type=mime_type
)
ctx.logger.info(f"✅ Attachment uploaded: {attachment.get('id')}")
except Exception as e:
ctx.logger.error(f"❌ Failed to upload attachment: {e}")
raise
# Step 2: Create document entity with attachment ID and Advoware fields
ctx.logger.info(f"💾 Creating document entity (Step 2/2)...")
# Extract full Windows path from watcher data
full_path = windows_file.get('path', '')
new_doc = await espocrm.create_entity('CDokumente', {
'name': filename,
'dokumentId': attachment.get('id'), # Link to attachment
# Advoware History fields
'hnr': history_entry.get('hNr') if history_entry else None,
'advowareArt': history_entry.get('art', 'Schreiben') if history_entry else 'Schreiben',
'advowareBemerkung': history_entry.get('text', '') if history_entry else '',
# Windows file sync fields
'dateipfad': full_path,
'blake3hash': blake3_hash,
'syncedHash': blake3_hash,
'usn': windows_file.get('usn', 0),
'syncStatus': 'synced'
})
doc_id = new_doc.get('id')
ctx.logger.info(f"✅ Created document with attachment: {doc_id}")
# Link to Akte
await espocrm.link_entities(
'CAdvowareAkten',
akte_id,
'dokumentes',
doc_id
)
sync_results['created'] += 1
elif action.action == 'UPDATE_ESPO':
# Download from Windows and update EspoCRM
ctx.logger.info(f"📥 Downloading {filename} from Windows...")
content = await watcher.download_file(aktennummer, windows_file.get('relative_path', filename))
# Compute Blake3 hash
blake3_hash = compute_blake3(content)
# Extract full Windows path
full_path = windows_file.get('path', '')
# Update document in EspoCRM with correct field names
ctx.logger.info(f"💾 Updating document in EspoCRM...")
update_data = {
'blake3hash': blake3_hash,
'syncedHash': blake3_hash,
'usn': windows_file.get('usn', 0),
'dateipfad': full_path,
'syncStatus': 'synced'
}
# Also update History fields if available
if history_entry:
update_data['hnr'] = history_entry.get('hNr')
update_data['advowareArt'] = history_entry.get('art', 'Schreiben')
update_data['advowareBemerkung'] = history_entry.get('text', '')
await espocrm.update_entity('CDokumente', espo_doc.get('id'), update_data)
ctx.logger.info(f"✅ Updated document: {espo_doc.get('id')}")
sync_results['updated'] += 1
elif action.action == 'UPLOAD_WINDOWS':
# Upload to Windows from EspoCRM
ctx.logger.info(f"📤 Uploading {filename} to Windows...")
# Get file content from EspoCRM (would need attachment download)
# For now, log that this needs implementation
ctx.logger.warn(f"⚠️ Upload to Windows not yet implemented for {filename}")
sync_results['skipped'] += 1
except Exception as e:
ctx.logger.error(f"❌ Error processing {filename}: {e}")
sync_results['errors'] += 1
# 7. Sync metadata (always update from History)
ctx.logger.info(f"\n{'='*80}")
ctx.logger.info("📋 Syncing metadata from History...")
ctx.logger.info(f"{'='*80}")
metadata_updates = 0
for file_path in all_paths:
# Extract filename for EspoCRM lookup
filename = file_path.split('\\')[-1]
espo_doc = espo_docs_by_name.get(filename.lower())
history_entry = history_by_path.get(file_path)
if espo_doc and history_entry:
needs_update, updates = sync_utils.should_sync_metadata(espo_doc, history_entry)
if needs_update:
try:
await espocrm.update_entity('CDokumente', espo_doc.get('id'), updates)
ctx.logger.info(f"✅ Updated metadata for {filename}: {list(updates.keys())}")
metadata_updates += 1
except Exception as e:
ctx.logger.error(f"❌ Failed to update metadata for {filename}: {e}")
ctx.logger.info(f"📊 Metadata sync: {metadata_updates} updates")
# 8. Check Akte ablage status
ctx.logger.info(f"\n{'='*80}")
ctx.logger.info("🗂️ Checking Akte ablage status...")
ctx.logger.info(f"{'='*80}")
akte_details = await advoware_service.get_akte(advoware_id)
if akte_details and akte_details.get('ablage') == 1:
ctx.logger.info(f"📁 Akte {aktennummer} marked as ablage, deactivating in EspoCRM")
await espocrm.update_entity('CAdvowareAkten', akte_id, {
'Aktivierungsstatus': 'Deaktiviert'
})
# 9. Update sync status
await espocrm.update_entity('CAdvowareAkten', akte_id, {
'syncStatus': 'synced',
'lastSync': datetime.now().isoformat()
})
# 10. SUCCESS: Remove from processing SET
redis_client.srem("advoware:processing_aktennummern", aktennummer)
# Summary
ctx.logger.info(f"\n{'='*80}")
ctx.logger.info(f"✅ Sync complete for Akte {aktennummer}")
ctx.logger.info(f"{'='*80}")
ctx.logger.info(f"📊 Results:")
ctx.logger.info(f" - Created: {sync_results['created']}")
ctx.logger.info(f" - Updated: {sync_results['updated']}")
ctx.logger.info(f" - Uploaded: {sync_results['uploaded']}")
ctx.logger.info(f" - Skipped: {sync_results['skipped']}")
ctx.logger.info(f" - Errors: {sync_results['errors']}")
ctx.logger.info(f" - Metadata updates: {metadata_updates}")
ctx.logger.info(f"{'='*80}")
except Exception as e:
ctx.logger.error(f"❌ Sync failed for {aktennummer}: {e}")
# Move back to pending Sorted Set for retry
if redis_client:
import time
retry_timestamp = time.time()
redis_client.zadd(
"advoware:pending_aktennummern",
{aktennummer: retry_timestamp}
)
ctx.logger.info(f"✓ Moved {aktennummer} back to pending queue for retry")
# Update status in EspoCRM
try:
await espocrm.update_entity('CAdvowareAkten', akte_id, {
'syncStatus': 'failed',
'lastSyncError': str(e)[:500] # Truncate long errors
})
except:
pass
# Re-raise for Motia retry
raise
finally:
# ALWAYS release lock
if lock_acquired and redis_client:
redis_client.delete(lock_key)
ctx.logger.info(f"")
ctx.logger.info(f"🔓 Lock released for Akte {aktennummer}")
ctx.logger.info(f" Lock Key: {lock_key}")
ctx.logger.info(f" Duration: Released after processing")
ctx.logger.info("=" * 80)