Compare commits

...

52 Commits

Author SHA1 Message Date
bsiggel
71f583481a fix: Remove deprecated AI Chat Completions and Models List API implementations 2026-03-19 23:10:00 +00:00
bsiggel
48d440a860 fix: Remove deprecated VMH xAI Chat Completions API implementation 2026-03-19 21:42:43 +00:00
bsiggel
c02a5d8823 fix: Update ExecModule exec path to use correct binary location 2026-03-19 21:23:42 +00:00
bsiggel
edae5f6081 fix: Update ExecModule configuration to use correct source directory for step scripts 2026-03-19 21:20:31 +00:00
bsiggel
8ce843415e feat: Enhance developer guide with updated platform evolution and workflow details 2026-03-19 20:56:32 +00:00
bsiggel
46085bd8dd update to iii 0.90 and change directory structure 2026-03-19 20:33:49 +00:00
bsiggel
2ac83df1e0 fix: Update default chat model to grok-4-1-fast-reasoning and enhance logging for LLM responses 2026-03-19 09:50:31 +00:00
bsiggel
7fffdb2660 fix: Simplify error logging in models list API handler 2026-03-19 09:48:57 +00:00
bsiggel
69f0c6a44d feat: Implement AI Chat Completions API with streaming support and models list endpoint
- Enhanced the AI Chat Completions API to support true streaming using async generators and proper SSE headers.
- Updated endpoint paths to align with OpenAI's API versioning.
- Improved logging for request details and error handling.
- Added a new AI Models List API to return available models compatible with chat completions.
- Refactored code for better readability and maintainability, including the extraction of common functionalities.
- Introduced a VMH-specific Chat Completions API with similar features and structure.
2026-03-18 21:30:59 +00:00
bsiggel
949a5fd69c feat: Implement AI Chat Completions API with support for file search, web search, and Aktenzeichen-based collection lookup 2026-03-18 18:22:04 +00:00
bsiggel
8e53fd6345 fix: Enhance tool binding in LangChainXAIService to support web search and update API handler for new parameters 2026-03-15 16:37:57 +00:00
bsiggel
59fdd7d9ec fix: Normalize MIME type for PDF uploads and update collection management endpoint to use vector store API 2026-03-15 16:34:13 +00:00
bsiggel
eaab14ae57 fix: Adjust multipart form to use raw UTF-8 encoding for filenames in file uploads 2026-03-14 23:00:49 +00:00
bsiggel
331d43390a fix: Import unquote for URL decoding in AI Knowledge synchronization utilities 2026-03-14 22:50:59 +00:00
bsiggel
18f2ff775e fix: URL-decode filenames in document synchronization to handle special characters 2026-03-14 22:49:07 +00:00
bsiggel
c032e24d7a fix: Update default model name to 'grok-4-1-fast-reasoning' in xAI Chat Completions API 2026-03-14 08:39:50 +00:00
bsiggel
4a5065aea4 feat: Add Aktenzeichen utility functions and LangChain xAI service integration
- Implemented utility functions for extracting, validating, and normalizing Aktenzeichen in 'aktenzeichen_utils.py'.
- Created LangChainXAIService for integrating LangChain ChatXAI with file search capabilities in 'langchain_xai_service.py'.
- Developed VMH xAI Chat Completions API to handle OpenAI-compatible requests with support for Aktenzeichen detection and file search in 'xai_chat_completion_api_step.py'.
2026-03-13 10:10:33 +00:00
bsiggel
bb13d59ddb fix: Improve orphan detection and Blake3 hash verification in document synchronization 2026-03-13 08:40:20 +00:00
bsiggel
b0fceef4e2 fix: Update sync mode logging to clarify Blake3 hash verification status 2026-03-12 23:09:21 +00:00
bsiggel
e727582584 fix: Update JunctionData URL construction to use API Gateway instead of direct EspoCRM endpoint 2026-03-12 23:07:33 +00:00
bsiggel
2292fd4762 feat: Enhance document synchronization logic to continue syncing after collection activation 2026-03-12 23:06:40 +00:00
bsiggel
9ada48d8c8 fix: Update collection ID retrieval logic and simplify error logging in AI Knowledge sync event handler 2026-03-12 23:04:01 +00:00
bsiggel
9a3e01d447 fix: Correct logging method from warning to warn for lock acquisition in AI Knowledge sync handler 2026-03-12 23:00:08 +00:00
bsiggel
e945333c1a feat: Update activation status references to 'aktivierungsstatus' for consistency across AI Knowledge sync utilities 2026-03-12 22:53:47 +00:00
bsiggel
6f7f847939 feat: Enhance AI Knowledge Update webhook handler to validate payload structure and handle empty lists 2026-03-12 22:51:44 +00:00
bsiggel
46c0bbf381 feat: Refactor AI Knowledge sync processes to remove full sync parameter and ensure Blake3 verification is always performed 2026-03-12 22:41:19 +00:00
bsiggel
8f1533337c feat: Enhance AI Knowledge sync process with full sync mode and attachment handling 2026-03-12 22:35:48 +00:00
bsiggel
6bf2343a12 feat: Enhance document synchronization by integrating CAIKnowledge handling and improving error logging 2026-03-12 22:30:11 +00:00
bsiggel
8ed7cca432 feat: Add logging utility for calendar sync operations and enhance error handling 2026-03-12 19:26:04 +00:00
bsiggel
9bbfa61b3b feat: Implement AI Knowledge Sync Utilities and Event Handlers
- Added AIKnowledgeActivationStatus and AIKnowledgeSyncStatus enums to models.py for managing activation and sync states.
- Introduced AIKnowledgeSync class in aiknowledge_sync_utils.py for synchronizing CAIKnowledge entities with XAI Collections, including collection lifecycle management, document synchronization, and metadata updates.
- Created a daily cron job (aiknowledge_full_sync_cron_step.py) to perform a full sync of CAIKnowledge entities.
- Developed an event handler (aiknowledge_sync_event_step.py) to synchronize CAIKnowledge entities with XAI Collections triggered by webhooks and cron jobs.
- Implemented a webhook handler (aiknowledge_update_api_step.py) to receive updates from EspoCRM for CAIKnowledge entities and enqueue sync events.
- Enhanced xai_service.py with methods for collection management, document listing, and metadata updates.
2026-03-11 21:14:52 +00:00
bsiggel
a5a122b688 refactor(logging): enhance error handling and resource management in rate limiting and sync operations 2026-03-08 22:47:05 +00:00
bsiggel
6c3cf3ca91 refactor(logging): remove unused logger instances and enhance error logging in webhook steps 2026-03-08 22:21:08 +00:00
bsiggel
1c765d1eec refactor(logging): standardize status code handling and enhance logging in webhook and cron handlers 2026-03-08 22:09:22 +00:00
bsiggel
a0cf845877 Refactor and enhance logging in webhook handlers and Redis client
- Translated comments and docstrings from German to English for better clarity.
- Improved logging consistency across various webhook handlers for create, delete, and update operations.
- Centralized logging functionality by utilizing a dedicated logger utility.
- Added new enums for file and XAI sync statuses in models.
- Updated Redis client factory to use a centralized logger and improved error handling.
- Enhanced API responses to include more descriptive messages and status codes.
2026-03-08 21:50:34 +00:00
bsiggel
f392ec0f06 refactor(typing): update handler signatures to use Dict and Any for improved type hinting 2026-03-08 21:24:12 +00:00
bsiggel
2532bd89ee refactor(logging): standardize logging approach across services and steps 2026-03-08 21:20:49 +00:00
bsiggel
2e449d2928 docs: enhance error handling and locking strategies in document synchronization 2026-03-08 20:58:58 +00:00
bsiggel
fd0196ec31 docs: add guidelines for Steps vs. Utils architecture and decision matrix 2026-03-08 20:30:33 +00:00
bsiggel
d71b5665b6 docs: update Design Principles section with enhanced lock strategy and event handling guidelines 2026-03-08 20:28:57 +00:00
bsiggel
d69801ed97 docs: expand Design Principles section with detailed guidelines and examples for event handling and locking strategies 2026-03-08 20:18:18 +00:00
bsiggel
6e2303c5eb feat(docs): add Design Principles section with Event Storm and Bidirectional Reference patterns 2026-03-08 20:09:32 +00:00
bsiggel
93d4d89531 feat(document-sync): update xaiSyncStatus handling and logging for document synchronization 2026-03-08 19:21:17 +00:00
bsiggel
4ed752b19e feat(document-sync): enhance metadata update to reset file status after preview generation 2026-03-08 18:59:56 +00:00
bsiggel
ba657ecd3b docs: update troubleshooting section with service restart instructions and log checking commands 2026-03-08 18:49:45 +00:00
bsiggel
9e7e163933 docs: add production and manual start instructions for system services 2026-03-08 18:49:08 +00:00
bsiggel
82b48eee8e fix(api-steps): update response status field in document create, delete, and update handlers 2026-03-08 18:48:56 +00:00
bsiggel
7fd6eed86d feat(sync-utils): add logging delegation method to BaseSyncUtils 2026-03-08 18:38:48 +00:00
bsiggel
91ae2947e5 feat(xai-service): implement xAI Files & Collections service for document synchronization 2026-03-08 18:31:29 +00:00
bsiggel
6f7d62293e feat(espocrm): add logging method to EspoCRMAPI for improved message handling
fix(calendar_sync): correct cron expression for calendar sync job
2026-03-08 17:58:10 +00:00
bsiggel
d7b2b5543f feat(espocrm): add caching for entity definitions and implement related entity listing 2026-03-08 17:51:36 +00:00
bsiggel
a53051ea8e feat(api-client): implement session management for AdvowareAPI and EspoCRMAPI 2026-03-03 17:24:35 +00:00
bsiggel
69a48f5f9a Implement central configuration, custom exceptions, logging utilities, Pydantic models, and Redis client for BitByLaw integration
- Added `config.py` for centralized configuration management including Sync, API, Advoware, EspoCRM, Redis, Logging, Calendar Sync, and Feature Flags.
- Created `exceptions.py` with a hierarchy of custom exceptions for integration errors, API errors, sync errors, and Redis errors.
- Developed `logging_utils.py` for a unified logging wrapper supporting structured logging and performance tracking.
- Defined Pydantic models in `models.py` for data validation of Advoware and EspoCRM entities, including sync operation models.
- Introduced `redis_client.py` for a centralized Redis client factory with connection pooling, automatic reconnection, and health checks.
2026-03-03 17:18:49 +00:00
58 changed files with 8474 additions and 999 deletions

382
REFACTORING_SUMMARY.md Normal file
View File

@@ -0,0 +1,382 @@
# Code Refactoring - Verbesserungen Übersicht
Datum: 3. März 2026
## Zusammenfassung
Umfassendes Refactoring zur Verbesserung von Robustheit, Eleganz und Effizienz des BitByLaw Integration Codes.
## Implementierte Verbesserungen
### 1. ✅ Custom Exception Classes ([services/exceptions.py](services/exceptions.py))
**Problem:** Zu generisches Exception Handling mit `except Exception`
**Lösung:** Hierarchische Exception-Struktur:
```python
from services.exceptions import (
AdvowareAPIError,
AdvowareAuthError,
AdvowareTimeoutError,
EspoCRMAPIError,
EspoCRMAuthError,
RetryableError,
NonRetryableError,
LockAcquisitionError,
ValidationError
)
# Verwendung:
try:
result = await advoware.api_call(...)
except AdvowareTimeoutError:
# Spezifisch für Timeouts
raise RetryableError()
except AdvowareAuthError:
# Auth-Fehler nicht retryable
raise
except AdvowareAPIError as e:
# Andere API-Fehler
if is_retryable(e):
# Retry logic
```
**Vorteile:**
- Präzise Fehlerbehandlung
- Besseres Error Tracking
- Automatische Retry-Klassifizierung mit `is_retryable()`
---
### 2. ✅ Redis Client Factory ([services/redis_client.py](services/redis_client.py))
**Problem:** Duplizierte Redis-Initialisierung in 4+ Dateien
**Lösung:** Zentralisierte Redis Client Factory mit Singleton Pattern:
```python
from services.redis_client import get_redis_client, is_redis_available
# Strict mode: Exception bei Fehler
redis_client = get_redis_client(strict=True)
# Optional mode: None bei Fehler (für optionale Features)
redis_client = get_redis_client(strict=False)
# Health Check
if is_redis_available():
# Redis verfügbar
```
**Vorteile:**
- DRY (Don't Repeat Yourself)
- Connection Pooling
- Zentrale Konfiguration
- Health Checks
---
### 3. ✅ Pydantic Models für Validation ([services/models.py](services/models.py))
**Problem:** Keine Datenvalidierung, unsichere Typen
**Lösung:** Pydantic Models mit automatischer Validierung:
```python
from services.models import (
AdvowareBeteiligteCreate,
EspoCRMBeteiligteCreate,
validate_beteiligte_advoware
)
# Automatische Validierung:
try:
validated = AdvowareBeteiligteCreate.model_validate(data)
except ValidationError as e:
# Handle validation errors
# Helper:
validated = validate_beteiligte_advoware(data)
```
**Features:**
- Type Safety
- Automatische Validierung (Geburtsdatum, Name, etc.)
- Enums für Status/Rechtsformen
- Field Validators
---
### 4. ✅ Zentrale Konfiguration ([services/config.py](services/config.py))
**Problem:** Magic Numbers und Strings überall im Code
**Lösung:** Zentrale Config mit Dataclasses:
```python
from services.config import (
SYNC_CONFIG,
API_CONFIG,
ADVOWARE_CONFIG,
ESPOCRM_CONFIG,
FEATURE_FLAGS,
get_retry_delay_seconds,
get_lock_key
)
# Verwendung:
max_retries = SYNC_CONFIG.max_retries # 5
lock_ttl = SYNC_CONFIG.lock_ttl_seconds # 900
backoff = SYNC_CONFIG.retry_backoff_minutes # [1, 5, 15, 60, 240]
# Helper Functions:
lock_key = get_lock_key('cbeteiligte', entity_id)
retry_delay = get_retry_delay_seconds(attempt=2) # 15 * 60 seconds
```
**Konfigurationsbereiche:**
- `SYNC_CONFIG` - Retry, Locking, Change Detection
- `API_CONFIG` - Timeouts, Rate Limiting
- `ADVOWARE_CONFIG` - Token, Auth, Read-only Fields
- `ESPOCRM_CONFIG` - Pagination, Notifications
- `FEATURE_FLAGS` - Feature Toggles
---
### 5. ✅ Konsistentes Logging ([services/logging_utils.py](services/logging_utils.py))
**Problem:** Inkonsistentes Logging (3 verschiedene Patterns)
**Lösung:** Unified Logger mit Context-Support:
```python
from services.logging_utils import get_logger, get_service_logger
# Service Logger:
logger = get_service_logger('advoware', context)
logger.info("Message", entity_id="123")
# Mit Context Manager für Timing:
with logger.operation('sync_entity', entity_id='123'):
# Do work
pass # Automatisches Timing und Error Logging
# API Call Tracking:
with logger.api_call('/api/v1/Beteiligte', method='POST'):
result = await api.post(...)
```
**Features:**
- Motia FlowContext Support
- Structured Logging
- Automatisches Performance Tracking
- Context Fields
---
### 6. ✅ Spezifische Exceptions in Services
**Aktualisierte Services:**
- [advoware.py](services/advoware.py) - AdvowareAPIError, AdvowareAuthError, AdvowareTimeoutError
- [espocrm.py](services/espocrm.py) - EspoCRMAPIError, EspoCRMAuthError, EspoCRMTimeoutError
- [sync_utils_base.py](services/sync_utils_base.py) - LockAcquisitionError
- [beteiligte_sync_utils.py](services/beteiligte_sync_utils.py) - SyncError
**Beispiel:**
```python
# Vorher:
except Exception as e:
logger.error(f"Error: {e}")
# Nachher:
except AdvowareTimeoutError:
raise RetryableError("Request timed out")
except AdvowareAuthError:
raise # Nicht retryable
except AdvowareAPIError as e:
if is_retryable(e):
# Retry
```
---
### 7. ✅ Type Hints ergänzt
**Verbesserte Type Hints in:**
- Service-Methoden (advoware.py, espocrm.py)
- Mapper-Funktionen (espocrm_mapper.py)
- Utility-Klassen (sync_utils_base.py, beteiligte_sync_utils.py)
- Step Handler
**Beispiel:**
```python
# Vorher:
async def handler(event_data, ctx):
...
# Nachher:
async def handler(
event_data: Dict[str, Any],
ctx: FlowContext[Any]
) -> Optional[Dict[str, Any]]:
...
```
---
## Migration Guide
### Für bestehenden Code
1. **Exception Handling aktualisieren:**
```python
# Alt:
try:
result = await api.call()
except Exception as e:
logger.error(f"Error: {e}")
# Neu:
try:
result = await api.call()
except AdvowareTimeoutError:
# Spezifisch behandeln
raise RetryableError()
except AdvowareAPIError as e:
logger.error(f"API Error: {e}")
if is_retryable(e):
# Retry
```
2. **Redis initialisieren:**
```python
# Alt:
redis_client = redis.Redis(host=..., port=...)
# Neu:
from services.redis_client import get_redis_client
redis_client = get_redis_client(strict=False)
```
3. **Konstanten verwenden:**
```python
# Alt:
MAX_RETRIES = 5
LOCK_TTL = 900
# Neu:
from services.config import SYNC_CONFIG
max_retries = SYNC_CONFIG.max_retries
lock_ttl = SYNC_CONFIG.lock_ttl_seconds
```
4. **Logging standardisieren:**
```python
# Alt:
logger = logging.getLogger(__name__)
logger.info("Message")
# Neu:
from services.logging_utils import get_service_logger
logger = get_service_logger('my_service', context)
logger.info("Message", entity_id="123")
```
---
## Performance-Verbesserungen
- ✅ Redis Connection Pooling (max 50 Connections)
- ✅ Token Caching optimiert
- ✅ Bessere Error Classification (weniger unnötige Retries)
- ⚠️ Noch TODO: Batch Operations für parallele Syncs
---
## Feature Flags
Neue Features können über `FEATURE_FLAGS` gesteuert werden:
```python
from services.config import FEATURE_FLAGS
# Aktivieren/Deaktivieren:
FEATURE_FLAGS.strict_validation = True # Pydantic Validation
FEATURE_FLAGS.kommunikation_sync_enabled = False # Noch in Entwicklung
FEATURE_FLAGS.parallel_sync_enabled = False # Experimentell
```
---
## Testing
**Unit Tests sollten nun leichter sein:**
```python
# Mock Redis:
from services.redis_client import RedisClientFactory
RedisClientFactory._instance = mock_redis
# Mock Exceptions:
from services.exceptions import AdvowareAPIError
raise AdvowareAPIError("Test error", status_code=500)
# Validate Models:
from services.models import validate_beteiligte_advoware
with pytest.raises(ValidationError):
validate_beteiligte_advoware(invalid_data)
```
---
## Nächste Schritte
1. **Unit Tests schreiben** (min. 60% Coverage)
- Exception Handling Tests
- Mapper Tests mit Pydantic
- Redis Factory Tests
2. **Batch Operations** implementieren
- Parallele API-Calls
- Bulk Updates
3. **Monitoring** verbessern
- Performance Metrics aus Logger nutzen
- Redis Health Checks
4. **Dokumentation** erweitern
- API-Docs generieren (Sphinx)
- Error Handling Guide
---
## Breakfree Changes
⚠️ **Minimale Breaking Changes:**
1. Import-Pfade haben sich geändert:
- `AdvowareTokenError``AdvowareAuthError`
- `EspoCRMError``EspoCRMAPIError`
2. Redis wird jetzt über Factory bezogen:
- Statt direktem `redis.Redis()``get_redis_client()`
**Migration ist einfach:** Imports aktualisieren, Code läuft sonst identisch.
---
## Autoren
- Code Refactoring: GitHub Copilot
- Review: BitByLaw Team
- Datum: 3. März 2026
---
## Fragen?
Bei Fragen zum Refactoring siehe:
- [services/README.md](services/README.md) - Service-Layer Dokumentation
- [exceptions.py](services/exceptions.py) - Exception Hierarchie
- [config.py](services/config.py) - Alle Konfigurationsoptionen

599
docs/AI_KNOWLEDGE_SYNC.md Normal file
View File

@@ -0,0 +1,599 @@
# AI Knowledge Collection Sync - Dokumentation
**Version**: 1.0
**Datum**: 11. März 2026
**Status**: ✅ Implementiert
---
## Überblick
Synchronisiert EspoCRM `CAIKnowledge` Entities mit XAI Collections für semantische Dokumentensuche. Unterstützt vollständigen Collection-Lifecycle, BLAKE3-basierte Integritätsprüfung und robustes Hash-basiertes Change Detection.
## Features
**Collection Lifecycle Management**
- NEW → Collection erstellen in XAI
- ACTIVE → Automatischer Sync der Dokumente
- PAUSED → Sync pausiert, Collection bleibt
- DEACTIVATED → Collection aus XAI löschen
**Dual-Hash Change Detection**
- EspoCRM Hash (MD5/SHA256) für lokale Änderungserkennung
- XAI BLAKE3 Hash für Remote-Integritätsverifikation
- Metadata-Hash für Beschreibungs-Änderungen
**Robustheit**
- BLAKE3 Verification nach jedem Upload
- Metadata-Only Updates via PATCH
- Orphan Detection & Cleanup
- Distributed Locking (Redis)
- Daily Full Sync (02:00 Uhr nachts)
**Fehlerbehandlung**
- Unsupported MIME Types → Status "unsupported"
- Transient Errors → Retry mit Exponential Backoff
- Partial Failures toleriert
---
## Architektur
```
┌─────────────────────────────────────────────────────────────────┐
│ EspoCRM CAIKnowledge │
│ ├─ activationStatus: new/active/paused/deactivated │
│ ├─ syncStatus: unclean/pending_sync/synced/failed │
│ └─ datenbankId: XAI Collection ID │
└─────────────────────────────────────────────────────────────────┘
↓ Webhook
┌─────────────────────────────────────────────────────────────────┐
│ Motia Webhook Handler │
│ → POST /vmh/webhook/aiknowledge/update │
└─────────────────────────────────────────────────────────────────┘
↓ Emit Event
┌─────────────────────────────────────────────────────────────────┐
│ Queue: aiknowledge.sync │
└─────────────────────────────────────────────────────────────────┘
↓ Lock: aiknowledge:{id}
┌─────────────────────────────────────────────────────────────────┐
│ Sync Handler │
│ ├─ Check activationStatus │
│ ├─ Manage Collection Lifecycle │
│ ├─ Sync Documents (with BLAKE3 verification) │
│ └─ Update Statuses │
└─────────────────────────────────────────────────────────────────┘
┌─────────────────────────────────────────────────────────────────┐
│ XAI Collections API │
│ └─ Collections with embedded documents │
└─────────────────────────────────────────────────────────────────┘
```
---
## EspoCRM Konfiguration
### 1. Entity: CAIKnowledge
**Felder:**
| Feld | Typ | Beschreibung | Werte |
|------|-----|--------------|-------|
| `name` | varchar(255) | Name der Knowledge Base | - |
| `datenbankId` | varchar(255) | XAI Collection ID | Automatisch gefüllt |
| `activationStatus` | enum | Lifecycle-Status | new, active, paused, deactivated |
| `syncStatus` | enum | Sync-Status | unclean, pending_sync, synced, failed |
| `lastSync` | datetime | Letzter erfolgreicher Sync | ISO 8601 |
| `syncError` | text | Fehlermeldung bei Failure | Max 2000 Zeichen |
**Enum-Definitionen:**
```json
{
"activationStatus": {
"type": "enum",
"options": ["new", "active", "paused", "deactivated"],
"default": "new"
},
"syncStatus": {
"type": "enum",
"options": ["unclean", "pending_sync", "synced", "failed"],
"default": "unclean"
}
}
```
### 2. Junction: CAIKnowledgeCDokumente
**additionalColumns:**
| Feld | Typ | Beschreibung |
|------|-----|--------------|
| `aiDocumentId` | varchar(255) | XAI file_id |
| `syncstatus` | enum | Per-Document Sync-Status |
| `syncedHash` | varchar(64) | MD5/SHA256 von EspoCRM |
| `xaiBlake3Hash` | varchar(128) | BLAKE3 Hash von XAI |
| `syncedMetadataHash` | varchar(64) | Hash der Metadaten |
| `lastSync` | datetime | Letzter Sync dieses Dokuments |
**Enum-Definition:**
```json
{
"syncstatus": {
"type": "enum",
"options": ["new", "unclean", "synced", "failed", "unsupported"]
}
}
```
### 3. Webhooks
**Webhook 1: CREATE**
```json
{
"event": "CAIKnowledge.afterSave",
"url": "https://your-motia-domain.com/vmh/webhook/aiknowledge/update",
"method": "POST",
"payload": "{\"entity_id\": \"{$id}\", \"entity_type\": \"CAIKnowledge\", \"action\": \"create\"}",
"condition": "entity.isNew()"
}
```
**Webhook 2: UPDATE**
```json
{
"event": "CAIKnowledge.afterSave",
"url": "https://your-motia-domain.com/vmh/webhook/aiknowledge/update",
"method": "POST",
"payload": "{\"entity_id\": \"{$id}\", \"entity_type\": \"CAIKnowledge\", \"action\": \"update\"}",
"condition": "!entity.isNew()"
}
```
**Webhook 3: DELETE (Optional)**
```json
{
"event": "CAIKnowledge.afterRemove",
"url": "https://your-motia-domain.com/vmh/webhook/aiknowledge/delete",
"method": "POST",
"payload": "{\"entity_id\": \"{$id}\", \"entity_type\": \"CAIKnowledge\", \"action\": \"delete\"}"
}
```
**Empfehlung**: Nur CREATE + UPDATE verwenden. DELETE über `activationStatus="deactivated"` steuern.
### 4. Hooks (EspoCRM Backend)
**Hook 1: Document Link → syncStatus auf "unclean"**
```php
// Hooks/Custom/CAIKnowledge/AfterRelateLinkMultiple.php
namespace Espo\Custom\Hooks\CAIKnowledge;
class AfterRelateLinkMultiple extends \Espo\Core\Hooks\Base
{
public function afterRelateLinkMultiple($entity, $options, $data)
{
if ($data['link'] === 'dokumentes') {
// Mark as unclean when documents linked
$entity->set('syncStatus', 'unclean');
$this->getEntityManager()->saveEntity($entity);
}
}
}
```
**Hook 2: Document Change → Junction auf "unclean"**
```php
// Hooks/Custom/CDokumente/AfterSave.php
namespace Espo\Custom\Hooks\CDokumente;
class AfterSave extends \Espo\Core\Hooks\Base
{
public function afterSave($entity, $options)
{
if ($entity->isAttributeChanged('description') ||
$entity->isAttributeChanged('md5') ||
$entity->isAttributeChanged('sha256')) {
// Mark all junction entries as unclean
$this->updateJunctionStatuses($entity->id, 'unclean');
// Mark all related CAIKnowledge as unclean
$this->markRelatedKnowledgeUnclean($entity->id);
}
}
}
```
---
## Environment Variables
```bash
# XAI API Keys (erforderlich)
XAI_API_KEY=your_xai_api_key_here
XAI_MANAGEMENT_KEY=your_xai_management_key_here
# Redis (für Locking)
REDIS_HOST=localhost
REDIS_PORT=6379
# EspoCRM
ESPOCRM_API_BASE_URL=https://crm.bitbylaw.com/api/v1
ESPOCRM_API_KEY=your_espocrm_api_key
```
---
## Workflows
### Workflow 1: Neue Knowledge Base erstellen
```
1. User erstellt CAIKnowledge in EspoCRM
└─ activationStatus: "new" (default)
2. Webhook CREATE gefeuert
└─ Event: aiknowledge.sync
3. Sync Handler:
└─ activationStatus="new" → Collection erstellen in XAI
└─ Update EspoCRM:
├─ datenbankId = collection_id
├─ activationStatus = "active"
└─ syncStatus = "unclean"
4. Nächster Webhook (UPDATE):
└─ activationStatus="active" → Dokumente syncen
```
### Workflow 2: Dokumente hinzufügen
```
1. User verknüpft Dokumente mit CAIKnowledge
└─ EspoCRM Hook setzt syncStatus = "unclean"
2. Webhook UPDATE gefeuert
└─ Event: aiknowledge.sync
3. Sync Handler:
└─ Für jedes Junction-Entry:
├─ Check: MIME Type supported?
├─ Check: Hash changed?
├─ Download von EspoCRM
├─ Upload zu XAI mit Metadata
├─ Verify Upload (BLAKE3)
└─ Update Junction: syncstatus="synced"
4. Update CAIKnowledge:
└─ syncStatus = "synced"
└─ lastSync = now()
```
### Workflow 3: Metadata-Änderung
```
1. User ändert Document.description in EspoCRM
└─ EspoCRM Hook setzt Junction syncstatus = "unclean"
└─ EspoCRM Hook setzt CAIKnowledge syncStatus = "unclean"
2. Webhook UPDATE gefeuert
3. Sync Handler:
└─ Berechne Metadata-Hash
└─ Hash unterschiedlich? → PATCH zu XAI
└─ Falls PATCH fehlschlägt → Fallback: Re-upload
└─ Update Junction: syncedMetadataHash
```
### Workflow 4: Knowledge Base deaktivieren
```
1. User setzt activationStatus = "deactivated"
2. Webhook UPDATE gefeuert
3. Sync Handler:
└─ Collection aus XAI löschen
└─ Alle Junction Entries zurücksetzen:
├─ syncstatus = "new"
└─ aiDocumentId = NULL
└─ CAIKnowledge bleibt in EspoCRM (mit datenbankId)
```
### Workflow 5: Daily Full Sync
```
Cron: Täglich um 02:00 Uhr
1. Lade alle CAIKnowledge mit:
└─ activationStatus = "active"
└─ syncStatus IN ("unclean", "failed")
2. Für jedes:
└─ Emit: aiknowledge.sync Event
3. Queue verarbeitet alle sequenziell
└─ Fängt verpasste Webhooks ab
```
---
## Monitoring & Troubleshooting
### Logs prüfen
```bash
# Motia Service Logs
sudo journalctl -u motia-iii -f | grep -i "ai knowledge"
# Letzte 100 Sync-Events
sudo journalctl -u motia-iii -n 100 | grep "AI KNOWLEDGE SYNC"
# Fehler der letzten 24 Stunden
sudo journalctl -u motia-iii --since "24 hours ago" | grep "❌"
```
### EspoCRM Status prüfen
```sql
-- Alle Knowledge Bases mit Status
SELECT
id,
name,
activation_status,
sync_status,
last_sync,
sync_error
FROM c_ai_knowledge
WHERE activation_status = 'active';
-- Junction Entries mit Sync-Problemen
SELECT
j.id,
k.name AS knowledge_name,
d.name AS document_name,
j.syncstatus,
j.last_sync
FROM c_ai_knowledge_c_dokumente j
JOIN c_ai_knowledge k ON j.c_ai_knowledge_id = k.id
JOIN c_dokumente d ON j.c_dokumente_id = d.id
WHERE j.syncstatus IN ('failed', 'unsupported');
```
### Häufige Probleme
#### Problem: "Lock busy for aiknowledge:xyz"
**Ursache**: Vorheriger Sync noch aktiv oder abgestürzt
**Lösung**:
```bash
# Redis lock manuell freigeben
redis-cli
> DEL sync_lock:aiknowledge:xyz
```
#### Problem: "Unsupported MIME type"
**Ursache**: Document hat MIME Type, den XAI nicht unterstützt
**Lösung**:
- Dokument konvertieren (z.B. RTF → PDF)
- Oder: Akzeptieren (bleibt mit Status "unsupported")
#### Problem: "Upload verification failed"
**Ursache**: XAI liefert kein BLAKE3 Hash oder Hash-Mismatch
**Lösung**:
1. Prüfe XAI API Dokumentation (Hash-Format geändert?)
2. Falls temporär: Retry läuft automatisch
3. Falls persistent: XAI Support kontaktieren
#### Problem: "Collection not found"
**Ursache**: Collection wurde manuell in XAI gelöscht
**Lösung**: Automatisch gelöst - Sync erstellt neue Collection
---
## API Endpoints
### Webhook Endpoint
```http
POST /vmh/webhook/aiknowledge/update
Content-Type: application/json
{
"entity_id": "kb-123",
"entity_type": "CAIKnowledge",
"action": "update"
}
```
**Response:**
```json
{
"success": true,
"knowledge_id": "kb-123"
}
```
---
## Performance
### Typische Sync-Zeiten
| Szenario | Zeit | Notizen |
|----------|------|---------|
| Collection erstellen | < 1s | Nur API Call |
| 1 Dokument (1 MB) | 2-4s | Upload + Verify |
| 10 Dokumente (10 MB) | 20-40s | Sequenziell |
| 100 Dokumente (100 MB) | 3-6 min | Lock TTL: 30 min |
| Metadata-only Update | < 1s | Nur PATCH |
| Orphan Cleanup | 1-3s | Pro 10 Dokumente |
### Lock TTLs
- **AIKnowledge Sync**: 30 Minuten (1800 Sekunden)
- **Redis Lock**: Same as above
- **Auto-Release**: Bei Timeout (TTL expired)
### Rate Limits
**XAI API:**
- Files Upload: ~100 requests/minute
- Management API: ~1000 requests/minute
**Strategie bei Rate Limit (429)**:
- Exponential Backoff: 2s, 4s, 8s, 16s, 32s
- Respect `Retry-After` Header
- Max 5 Retries
---
## XAI Collections Metadata
### Document Metadata Fields
Werden für jedes Dokument in XAI gespeichert:
```json
{
"fields": {
"document_name": "Vertrag.pdf",
"description": "Mietvertrag Mustermann",
"created_at": "2024-01-01T00:00:00Z",
"modified_at": "2026-03-10T15:30:00Z",
"espocrm_id": "dok-123"
}
}
```
**inject_into_chunk**: `true` für `document_name` und `description`
→ Verbessert semantische Suche
### Collection Metadata
```json
{
"metadata": {
"espocrm_entity_type": "CAIKnowledge",
"espocrm_entity_id": "kb-123",
"created_at": "2026-03-11T10:00:00Z"
}
}
```
---
## Testing
### Manueller Test
```bash
# 1. Erstelle CAIKnowledge in EspoCRM
# 2. Prüfe Logs
sudo journalctl -u motia-iii -f
# 3. Prüfe Redis Lock
redis-cli
> KEYS sync_lock:aiknowledge:*
# 4. Prüfe XAI Collection
curl -H "Authorization: Bearer $XAI_MANAGEMENT_KEY" \
https://management-api.x.ai/v1/collections
```
### Integration Test
```python
# tests/test_aiknowledge_sync.py
async def test_full_sync_workflow():
"""Test complete sync workflow"""
# 1. Create CAIKnowledge with status "new"
knowledge = await espocrm.create_entity('CAIKnowledge', {
'name': 'Test KB',
'activationStatus': 'new'
})
# 2. Trigger webhook
await trigger_webhook(knowledge['id'])
# 3. Wait for sync
await asyncio.sleep(5)
# 4. Check collection created
knowledge = await espocrm.get_entity('CAIKnowledge', knowledge['id'])
assert knowledge['datenbankId'] is not None
assert knowledge['activationStatus'] == 'active'
# 5. Link document
await espocrm.link_entities('CAIKnowledge', knowledge['id'], 'CDokumente', doc_id)
# 6. Trigger webhook again
await trigger_webhook(knowledge['id'])
await asyncio.sleep(10)
# 7. Check junction synced
junction = await espocrm.get_junction_entries(
'CAIKnowledgeCDokumente',
'cAIKnowledgeId',
knowledge['id']
)
assert junction[0]['syncstatus'] == 'synced'
assert junction[0]['xaiBlake3Hash'] is not None
```
---
## Maintenance
### Wöchentliche Checks
- [ ] Prüfe failed Syncs in EspoCRM
- [ ] Prüfe Redis Memory Usage
- [ ] Prüfe XAI Storage Usage
- [ ] Review Logs für Patterns
### Monatliche Tasks
- [ ] Cleanup alte syncError Messages
- [ ] Verify XAI Collection Integrity
- [ ] Review Performance Metrics
- [ ] Update MIME Type Support List
---
## Support
**Bei Problemen:**
1. **Logs prüfen**: `journalctl -u motia-iii -f`
2. **EspoCRM Status prüfen**: SQL Queries (siehe oben)
3. **Redis Locks prüfen**: `redis-cli KEYS sync_lock:*`
4. **XAI API Status**: https://status.x.ai
**Kontakt:**
- Team: BitByLaw Development
- Motia Docs: `/opt/motia-iii/bitbylaw/docs/INDEX.md`
---
**Version History:**
- **1.0** (11.03.2026) - Initial Release
- Collection Lifecycle Management
- BLAKE3 Hash Verification
- Daily Full Sync
- Metadata Change Detection

File diff suppressed because it is too large Load Diff

View File

@@ -78,6 +78,6 @@ modules:
- class: modules::shell::ExecModule - class: modules::shell::ExecModule
config: config:
watch: watch:
- steps/**/*.py - src/steps/**/*.py
exec: exec:
- /opt/bin/uv run python -m motia.cli run --dir steps - /usr/local/bin/uv run python -m motia.cli run --dir src/steps

View File

@@ -18,5 +18,8 @@ dependencies = [
"google-api-python-client>=2.100.0", # Google Calendar API "google-api-python-client>=2.100.0", # Google Calendar API
"google-auth>=2.23.0", # Google OAuth2 "google-auth>=2.23.0", # Google OAuth2
"backoff>=2.2.1", # Retry/backoff decorator "backoff>=2.2.1", # Retry/backoff decorator
"langchain>=0.3.0", # LangChain framework
"langchain-xai>=0.2.0", # xAI integration for LangChain
"langchain-core>=0.3.0", # LangChain core
] ]

View File

@@ -7,9 +7,6 @@ Basierend auf ADRESSEN_SYNC_ANALYSE.md Abschnitt 12.
from typing import Dict, Any, Optional from typing import Dict, Any, Optional
from datetime import datetime from datetime import datetime
import logging
logger = logging.getLogger(__name__)
class AdressenMapper: class AdressenMapper:

View File

@@ -26,8 +26,6 @@ from services.espocrm import EspoCRMAPI
from services.adressen_mapper import AdressenMapper from services.adressen_mapper import AdressenMapper
from services.notification_utils import NotificationManager from services.notification_utils import NotificationManager
logger = logging.getLogger(__name__)
class AdressenSync: class AdressenSync:
"""Sync-Klasse für Adressen zwischen EspoCRM und Advoware""" """Sync-Klasse für Adressen zwischen EspoCRM und Advoware"""

View File

@@ -8,16 +8,17 @@ import hashlib
import base64 import base64
import os import os
import datetime import datetime
import redis
import logging
from typing import Optional, Dict, Any from typing import Optional, Dict, Any
logger = logging.getLogger(__name__) from services.exceptions import (
AdvowareAPIError,
AdvowareAuthError,
class AdvowareTokenError(Exception): AdvowareTimeoutError,
"""Raised when token acquisition fails""" RetryableError
pass )
from services.redis_client import get_redis_client
from services.config import ADVOWARE_CONFIG, API_CONFIG
from services.logging_utils import get_service_logger
class AdvowareAPI: class AdvowareAPI:
@@ -34,15 +35,8 @@ class AdvowareAPI:
- ADVOWARE_USER - ADVOWARE_USER
- ADVOWARE_ROLE - ADVOWARE_ROLE
- ADVOWARE_PASSWORD - ADVOWARE_PASSWORD
- REDIS_HOST (optional, default: localhost)
- REDIS_PORT (optional, default: 6379)
- REDIS_DB_ADVOWARE_CACHE (optional, default: 1)
""" """
AUTH_URL = "https://security.advo-net.net/api/v1/Token"
TOKEN_CACHE_KEY = 'advoware_access_token'
TOKEN_TIMESTAMP_CACHE_KEY = 'advoware_token_timestamp'
def __init__(self, context=None): def __init__(self, context=None):
""" """
Initialize Advoware API client. Initialize Advoware API client.
@@ -51,7 +45,8 @@ class AdvowareAPI:
context: Motia FlowContext for logging (optional) context: Motia FlowContext for logging (optional)
""" """
self.context = context self.context = context
self._log("AdvowareAPI initializing", level='debug') self.logger = get_service_logger('advoware', context)
self.logger.debug("AdvowareAPI initializing")
# Load configuration from environment # Load configuration from environment
self.API_BASE_URL = os.getenv('ADVOWARE_API_BASE_URL', 'https://www2.advo-net.net:90/') self.API_BASE_URL = os.getenv('ADVOWARE_API_BASE_URL', 'https://www2.advo-net.net:90/')
@@ -63,30 +58,33 @@ class AdvowareAPI:
self.user = os.getenv('ADVOWARE_USER', '') self.user = os.getenv('ADVOWARE_USER', '')
self.role = int(os.getenv('ADVOWARE_ROLE', '2')) self.role = int(os.getenv('ADVOWARE_ROLE', '2'))
self.password = os.getenv('ADVOWARE_PASSWORD', '') self.password = os.getenv('ADVOWARE_PASSWORD', '')
self.token_lifetime_minutes = int(os.getenv('ADVOWARE_TOKEN_LIFETIME_MINUTES', '55')) self.token_lifetime_minutes = ADVOWARE_CONFIG.token_lifetime_minutes
self.api_timeout_seconds = int(os.getenv('ADVOWARE_API_TIMEOUT_SECONDS', '30')) self.api_timeout_seconds = API_CONFIG.default_timeout_seconds
# Initialize Redis for token caching # Initialize Redis for token caching (centralized)
try: self.redis_client = get_redis_client(strict=False)
redis_host = os.getenv('REDIS_HOST', 'localhost') if self.redis_client:
redis_port = int(os.getenv('REDIS_PORT', '6379')) self.logger.info("Connected to Redis for token caching")
redis_db = int(os.getenv('REDIS_DB_ADVOWARE_CACHE', '1')) else:
redis_timeout = int(os.getenv('REDIS_TIMEOUT_SECONDS', '5')) self.logger.warning("⚠️ Redis unavailable - token caching disabled!")
self.redis_client = redis.Redis( self.logger.info("AdvowareAPI initialized")
host=redis_host,
port=redis_port,
db=redis_db,
socket_timeout=redis_timeout,
socket_connect_timeout=redis_timeout
)
self.redis_client.ping()
self._log("Connected to Redis for token caching")
except (redis.exceptions.ConnectionError, Exception) as e:
self._log(f"Could not connect to Redis: {e}. Token caching disabled.", level='warning')
self.redis_client = None
self._log("AdvowareAPI initialized") self._session: Optional[aiohttp.ClientSession] = None
def _log(self, message: str, level: str = 'info') -> None:
"""Internal logging helper"""
log_func = getattr(self.logger, level, self.logger.info)
log_func(message)
async def _get_session(self) -> aiohttp.ClientSession:
if self._session is None or self._session.closed:
self._session = aiohttp.ClientSession()
return self._session
async def close(self) -> None:
if self._session and not self._session.closed:
await self._session.close()
def _generate_hmac(self, request_time_stamp: str, nonce: Optional[str] = None) -> str: def _generate_hmac(self, request_time_stamp: str, nonce: Optional[str] = None) -> str:
"""Generate HMAC-SHA512 signature for authentication""" """Generate HMAC-SHA512 signature for authentication"""
@@ -97,7 +95,7 @@ class AdvowareAPI:
try: try:
api_key_bytes = base64.b64decode(self.api_key) api_key_bytes = base64.b64decode(self.api_key)
logger.debug("API Key decoded from base64") self.logger.debug("API Key decoded from base64")
except Exception as e: except Exception as e:
self._log(f"API Key not base64-encoded, using as-is: {e}", level='debug') self._log(f"API Key not base64-encoded, using as-is: {e}", level='debug')
api_key_bytes = self.api_key.encode('utf-8') if isinstance(self.api_key, str) else self.api_key api_key_bytes = self.api_key.encode('utf-8') if isinstance(self.api_key, str) else self.api_key
@@ -105,9 +103,9 @@ class AdvowareAPI:
signature = hmac.new(api_key_bytes, message, hashlib.sha512) signature = hmac.new(api_key_bytes, message, hashlib.sha512)
return base64.b64encode(signature.digest()).decode('utf-8') return base64.b64encode(signature.digest()).decode('utf-8')
def _fetch_new_access_token(self) -> str: async def _fetch_new_access_token(self) -> str:
"""Fetch new access token from Advoware Auth API""" """Fetch new access token from Advoware Auth API (async)"""
self._log("Fetching new access token from Advoware") self.logger.info("Fetching new access token from Advoware")
nonce = str(uuid.uuid4()) nonce = str(uuid.uuid4())
request_time_stamp = datetime.datetime.utcnow().replace(microsecond=0).isoformat() + "Z" request_time_stamp = datetime.datetime.utcnow().replace(microsecond=0).isoformat() + "Z"
@@ -127,39 +125,61 @@ class AdvowareAPI:
"RequestTimeStamp": request_time_stamp "RequestTimeStamp": request_time_stamp
} }
self._log(f"Token request: AppID={self.app_id}, User={self.user}", level='debug') self.logger.debug(f"Token request: AppID={self.app_id}, User={self.user}")
# Using synchronous requests for token fetch (called from sync context) # Async token fetch using aiohttp
import requests session = await self._get_session()
response = requests.post(
self.AUTH_URL,
json=data,
headers=headers,
timeout=self.api_timeout_seconds
)
self._log(f"Token response status: {response.status_code}") try:
response.raise_for_status() async with session.post(
ADVOWARE_CONFIG.auth_url,
json=data,
headers=headers,
timeout=aiohttp.ClientTimeout(total=self.api_timeout_seconds)
) as response:
self.logger.debug(f"Token response status: {response.status}")
if response.status == 401:
raise AdvowareAuthError(
"Authentication failed - check credentials",
status_code=401
)
if response.status >= 400:
error_text = await response.text()
raise AdvowareAPIError(
f"Token request failed ({response.status}): {error_text}",
status_code=response.status
)
result = await response.json()
except asyncio.TimeoutError:
raise AdvowareTimeoutError(
"Token request timed out",
status_code=408
)
except aiohttp.ClientError as e:
raise AdvowareAPIError(f"Token request failed: {str(e)}")
result = response.json()
access_token = result.get("access_token") access_token = result.get("access_token")
if not access_token: if not access_token:
self._log("No access_token in response", level='error') self.logger.error("No access_token in response")
raise AdvowareTokenError("No access_token received from Advoware") raise AdvowareAuthError("No access_token received from Advoware")
self._log("Access token fetched successfully") self.logger.info("Access token fetched successfully")
# Cache token in Redis # Cache token in Redis
if self.redis_client: if self.redis_client:
effective_ttl = max(1, (self.token_lifetime_minutes - 2) * 60) effective_ttl = max(1, (self.token_lifetime_minutes - 2) * 60)
self.redis_client.set(self.TOKEN_CACHE_KEY, access_token, ex=effective_ttl) self.redis_client.set(ADVOWARE_CONFIG.token_cache_key, access_token, ex=effective_ttl)
self.redis_client.set(self.TOKEN_TIMESTAMP_CACHE_KEY, str(time.time()), ex=effective_ttl) self.redis_client.set(ADVOWARE_CONFIG.token_timestamp_key, str(time.time()), ex=effective_ttl)
self._log(f"Token cached in Redis with TTL {effective_ttl}s") self.logger.debug(f"Token cached in Redis with TTL {effective_ttl}s")
return access_token return access_token
def get_access_token(self, force_refresh: bool = False) -> str: async def get_access_token(self, force_refresh: bool = False) -> str:
""" """
Get valid access token (from cache or fetch new). Get valid access token (from cache or fetch new).
@@ -169,33 +189,34 @@ class AdvowareAPI:
Returns: Returns:
Valid access token Valid access token
""" """
self._log("Getting access token", level='debug') self.logger.debug("Getting access token")
if not self.redis_client: if not self.redis_client:
self._log("No Redis available, fetching new token") self.logger.info("No Redis available, fetching new token")
return self._fetch_new_access_token() return await self._fetch_new_access_token()
if force_refresh: if force_refresh:
self._log("Force refresh requested, fetching new token") self.logger.info("Force refresh requested, fetching new token")
return self._fetch_new_access_token() return await self._fetch_new_access_token()
# Check cache # Check cache
cached_token = self.redis_client.get(self.TOKEN_CACHE_KEY) cached_token = self.redis_client.get(ADVOWARE_CONFIG.token_cache_key)
token_timestamp = self.redis_client.get(self.TOKEN_TIMESTAMP_CACHE_KEY) token_timestamp = self.redis_client.get(ADVOWARE_CONFIG.token_timestamp_key)
if cached_token and token_timestamp: if cached_token and token_timestamp:
try: try:
timestamp = float(token_timestamp.decode('utf-8')) # Redis decode_responses=True returns strings
timestamp = float(token_timestamp)
age_seconds = time.time() - timestamp age_seconds = time.time() - timestamp
if age_seconds < (self.token_lifetime_minutes - 1) * 60: if age_seconds < (self.token_lifetime_minutes - 1) * 60:
self._log(f"Using cached token (age: {age_seconds:.0f}s)", level='debug') self.logger.debug(f"Using cached token (age: {age_seconds:.0f}s)")
return cached_token.decode('utf-8') return cached_token
except (ValueError, AttributeError) as e: except (ValueError, AttributeError, TypeError) as e:
self._log(f"Error reading cached token: {e}", level='debug') self.logger.debug(f"Error reading cached token: {e}")
self._log("Cached token expired or invalid, fetching new") self.logger.info("Cached token expired or invalid, fetching new")
return self._fetch_new_access_token() return await self._fetch_new_access_token()
async def api_call( async def api_call(
self, self,
@@ -223,6 +244,11 @@ class AdvowareAPI:
Returns: Returns:
JSON response or None JSON response or None
Raises:
AdvowareAuthError: Authentication failed
AdvowareTimeoutError: Request timed out
AdvowareAPIError: Other API errors
""" """
# Clean endpoint # Clean endpoint
endpoint = endpoint.lstrip('/') endpoint = endpoint.lstrip('/')
@@ -233,7 +259,12 @@ class AdvowareAPI:
) )
# Get auth token # Get auth token
token = self.get_access_token() try:
token = await self.get_access_token()
except AdvowareAuthError:
raise
except Exception as e:
raise AdvowareAPIError(f"Failed to get access token: {str(e)}")
# Prepare headers # Prepare headers
effective_headers = headers.copy() if headers else {} effective_headers = headers.copy() if headers else {}
@@ -243,21 +274,21 @@ class AdvowareAPI:
# Use 'data' parameter if provided, otherwise 'json_data' # Use 'data' parameter if provided, otherwise 'json_data'
json_payload = data if data is not None else json_data json_payload = data if data is not None else json_data
async with aiohttp.ClientSession(timeout=effective_timeout) as session: session = await self._get_session()
try: try:
self._log(f"API call: {method} {url}", level='debug') with self.logger.api_call(endpoint, method):
async with session.request( async with session.request(
method, method,
url, url,
headers=effective_headers, headers=effective_headers,
params=params, params=params,
json=json_payload json=json_payload,
timeout=effective_timeout
) as response: ) as response:
# Handle 401 - retry with fresh token # Handle 401 - retry with fresh token
if response.status == 401: if response.status == 401:
self._log("401 Unauthorized, refreshing token") self.logger.warning("401 Unauthorized, refreshing token")
token = self.get_access_token(force_refresh=True) token = await self.get_access_token(force_refresh=True)
effective_headers['Authorization'] = f'Bearer {token}' effective_headers['Authorization'] = f'Bearer {token}'
async with session.request( async with session.request(
@@ -265,17 +296,57 @@ class AdvowareAPI:
url, url,
headers=effective_headers, headers=effective_headers,
params=params, params=params,
json=json_payload json=json_payload,
timeout=effective_timeout
) as retry_response: ) as retry_response:
if retry_response.status == 401:
raise AdvowareAuthError(
"Authentication failed even after token refresh",
status_code=401
)
if retry_response.status >= 500:
error_text = await retry_response.text()
raise RetryableError(
f"Server error {retry_response.status}: {error_text}"
)
retry_response.raise_for_status() retry_response.raise_for_status()
return await self._parse_response(retry_response) return await self._parse_response(retry_response)
response.raise_for_status() # Handle other error codes
if response.status == 404:
error_text = await response.text()
raise AdvowareAPIError(
f"Resource not found: {endpoint}",
status_code=404,
response_body=error_text
)
if response.status >= 500:
error_text = await response.text()
raise RetryableError(
f"Server error {response.status}: {error_text}"
)
if response.status >= 400:
error_text = await response.text()
raise AdvowareAPIError(
f"API error {response.status}: {error_text}",
status_code=response.status,
response_body=error_text
)
return await self._parse_response(response) return await self._parse_response(response)
except aiohttp.ClientError as e: except asyncio.TimeoutError:
self._log(f"API call failed: {e}", level='error') raise AdvowareTimeoutError(
raise f"Request timed out after {effective_timeout.total}s",
status_code=408
)
except aiohttp.ClientError as e:
self.logger.error(f"API call failed: {e}")
raise AdvowareAPIError(f"Request failed: {str(e)}")
async def _parse_response(self, response: aiohttp.ClientResponse) -> Any: async def _parse_response(self, response: aiohttp.ClientResponse) -> Any:
"""Parse API response""" """Parse API response"""
@@ -283,27 +354,6 @@ class AdvowareAPI:
try: try:
return await response.json() return await response.json()
except Exception as e: except Exception as e:
self._log(f"JSON parse error: {e}", level='debug') self.logger.debug(f"JSON parse error: {e}")
return None return None
return None return None
def _log(self, message: str, level: str = 'info'):
"""Log message via context or standard logger"""
if self.context:
if level == 'debug':
self.context.logger.debug(message)
elif level == 'warning':
self.context.logger.warning(message)
elif level == 'error':
self.context.logger.error(message)
else:
self.context.logger.info(message)
else:
if level == 'debug':
logger.debug(message)
elif level == 'warning':
logger.warning(message)
elif level == 'error':
logger.error(message)
else:
logger.info(message)

View File

@@ -1,24 +1,29 @@
""" """
Advoware Service Wrapper Advoware Service Wrapper
Erweitert AdvowareAPI mit höheren Operations
Extends AdvowareAPI with higher-level operations for business logic.
""" """
import logging
from typing import Dict, Any, Optional from typing import Dict, Any, Optional
from services.advoware import AdvowareAPI from services.advoware import AdvowareAPI
from services.logging_utils import get_service_logger
logger = logging.getLogger(__name__)
class AdvowareService: class AdvowareService:
""" """
Service-Layer für Advoware Operations Service layer for Advoware operations.
Verwendet AdvowareAPI für API-Calls Uses AdvowareAPI for API calls.
""" """
def __init__(self, context=None): def __init__(self, context=None):
self.api = AdvowareAPI(context) self.api = AdvowareAPI(context)
self.context = context self.context = context
self.logger = get_service_logger('advoware_service', context)
def _log(self, message: str, level: str = 'info') -> None:
"""Internal logging helper"""
log_func = getattr(self.logger, level, self.logger.info)
log_func(message)
async def api_call(self, *args, **kwargs): async def api_call(self, *args, **kwargs):
"""Delegate api_call to underlying AdvowareAPI""" """Delegate api_call to underlying AdvowareAPI"""
@@ -26,29 +31,29 @@ class AdvowareService:
# ========== BETEILIGTE ========== # ========== BETEILIGTE ==========
async def get_beteiligter(self, betnr: int) -> Optional[Dict]: async def get_beteiligter(self, betnr: int) -> Optional[Dict[str, Any]]:
""" """
Lädt Beteiligten mit allen Daten Load Beteiligte with all data.
Returns: Returns:
Beteiligte-Objekt Beteiligte object or None
""" """
try: try:
endpoint = f"api/v1/advonet/Beteiligte/{betnr}" endpoint = f"api/v1/advonet/Beteiligte/{betnr}"
result = await self.api.api_call(endpoint, method='GET') result = await self.api.api_call(endpoint, method='GET')
return result return result
except Exception as e: except Exception as e:
logger.error(f"[ADVO] Fehler beim Laden von Beteiligte {betnr}: {e}", exc_info=True) self._log(f"[ADVO] Error loading Beteiligte {betnr}: {e}", level='error')
return None return None
# ========== KOMMUNIKATION ========== # ========== KOMMUNIKATION ==========
async def create_kommunikation(self, betnr: int, data: Dict[str, Any]) -> Optional[Dict]: async def create_kommunikation(self, betnr: int, data: Dict[str, Any]) -> Optional[Dict[str, Any]]:
""" """
Erstellt neue Kommunikation Create new Kommunikation.
Args: Args:
betnr: Beteiligten-Nummer betnr: Beteiligte number
data: { data: {
'tlf': str, # Required 'tlf': str, # Required
'bemerkung': str, # Optional 'bemerkung': str, # Optional
@@ -57,68 +62,68 @@ class AdvowareService:
} }
Returns: Returns:
Neue Kommunikation mit 'id' New Kommunikation with 'id' or None
""" """
try: try:
endpoint = f"api/v1/advonet/Beteiligte/{betnr}/Kommunikationen" endpoint = f"api/v1/advonet/Beteiligte/{betnr}/Kommunikationen"
result = await self.api.api_call(endpoint, method='POST', json_data=data) result = await self.api.api_call(endpoint, method='POST', json_data=data)
if result: if result:
logger.info(f"[ADVO] ✅ Created Kommunikation: betnr={betnr}, kommKz={data.get('kommKz')}") self._log(f"[ADVO] ✅ Created Kommunikation: betnr={betnr}, kommKz={data.get('kommKz')}")
return result return result
except Exception as e: except Exception as e:
logger.error(f"[ADVO] Fehler beim Erstellen von Kommunikation: {e}", exc_info=True) self._log(f"[ADVO] Error creating Kommunikation: {e}", level='error')
return None return None
async def update_kommunikation(self, betnr: int, komm_id: int, data: Dict[str, Any]) -> bool: async def update_kommunikation(self, betnr: int, komm_id: int, data: Dict[str, Any]) -> bool:
""" """
Aktualisiert bestehende Kommunikation Update existing Kommunikation.
Args: Args:
betnr: Beteiligten-Nummer betnr: Beteiligte number
komm_id: Kommunikation-ID komm_id: Kommunikation ID
data: { data: {
'tlf': str, # Optional 'tlf': str, # Optional
'bemerkung': str, # Optional 'bemerkung': str, # Optional
'online': bool # Optional 'online': bool # Optional
} }
NOTE: kommKz ist READ-ONLY und kann nicht geändert werden NOTE: kommKz is READ-ONLY and cannot be changed
Returns: Returns:
True wenn erfolgreich True if successful
""" """
try: try:
endpoint = f"api/v1/advonet/Beteiligte/{betnr}/Kommunikationen/{komm_id}" endpoint = f"api/v1/advonet/Beteiligte/{betnr}/Kommunikationen/{komm_id}"
await self.api.api_call(endpoint, method='PUT', json_data=data) await self.api.api_call(endpoint, method='PUT', json_data=data)
logger.info(f"[ADVO] ✅ Updated Kommunikation: betnr={betnr}, komm_id={komm_id}") self._log(f"[ADVO] ✅ Updated Kommunikation: betnr={betnr}, komm_id={komm_id}")
return True return True
except Exception as e: except Exception as e:
logger.error(f"[ADVO] Fehler beim Update von Kommunikation: {e}", exc_info=True) self._log(f"[ADVO] Error updating Kommunikation: {e}", level='error')
return False return False
async def delete_kommunikation(self, betnr: int, komm_id: int) -> bool: async def delete_kommunikation(self, betnr: int, komm_id: int) -> bool:
""" """
Löscht Kommunikation (aktuell 403 Forbidden) Delete Kommunikation (currently returns 403 Forbidden).
NOTE: DELETE ist in Advoware API deaktiviert NOTE: DELETE is disabled in Advoware API.
Verwende stattdessen: Leere Slots mit empty_slot_marker Use empty slots with empty_slot_marker instead.
Returns: Returns:
True wenn erfolgreich True if successful
""" """
try: try:
endpoint = f"api/v1/advonet/Beteiligte/{betnr}/Kommunikationen/{komm_id}" endpoint = f"api/v1/advonet/Beteiligte/{betnr}/Kommunikationen/{komm_id}"
await self.api.api_call(endpoint, method='DELETE') await self.api.api_call(endpoint, method='DELETE')
logger.info(f"[ADVO] ✅ Deleted Kommunikation: betnr={betnr}, komm_id={komm_id}") self._log(f"[ADVO] ✅ Deleted Kommunikation: betnr={betnr}, komm_id={komm_id}")
return True return True
except Exception as e: except Exception as e:
# Expected: 403 Forbidden # Expected: 403 Forbidden
logger.warning(f"[ADVO] DELETE not allowed (expected): {e}") self._log(f"[ADVO] DELETE not allowed (expected): {e}", level='warning')
return False return False

View File

@@ -0,0 +1,545 @@
"""
AI Knowledge Sync Utilities
Utility functions for synchronizing CAIKnowledge entities with XAI Collections:
- Collection lifecycle management (create, delete)
- Document synchronization with BLAKE3 hash verification
- Metadata-only updates via PATCH
- Orphan detection and cleanup
"""
import hashlib
import json
from typing import Dict, Any, Optional, List, Tuple
from datetime import datetime
from urllib.parse import unquote
from services.sync_utils_base import BaseSyncUtils
from services.models import (
AIKnowledgeActivationStatus,
AIKnowledgeSyncStatus,
JunctionSyncStatus
)
class AIKnowledgeSync(BaseSyncUtils):
"""Utility class for AI Knowledge ↔ XAI Collections synchronization"""
def _get_lock_key(self, entity_id: str) -> str:
"""Redis lock key for AI Knowledge entities"""
return f"sync_lock:aiknowledge:{entity_id}"
async def acquire_sync_lock(self, knowledge_id: str) -> bool:
"""
Acquire distributed lock via Redis + update EspoCRM syncStatus.
Args:
knowledge_id: CAIKnowledge entity ID
Returns:
True if lock acquired, False if already locked
"""
try:
# STEP 1: Atomic Redis lock
lock_key = self._get_lock_key(knowledge_id)
if not self._acquire_redis_lock(lock_key):
self._log(f"Redis lock already active for {knowledge_id}", level='warn')
return False
# STEP 2: Update syncStatus to pending_sync
try:
await self.espocrm.update_entity('CAIKnowledge', knowledge_id, {
'syncStatus': AIKnowledgeSyncStatus.PENDING_SYNC.value
})
except Exception as e:
self._log(f"Could not set syncStatus: {e}", level='debug')
self._log(f"Sync lock acquired for {knowledge_id}")
return True
except Exception as e:
self._log(f"Error acquiring lock: {e}", level='error')
# Clean up Redis lock on error
lock_key = self._get_lock_key(knowledge_id)
self._release_redis_lock(lock_key)
return False
async def release_sync_lock(
self,
knowledge_id: str,
success: bool = True,
error_message: Optional[str] = None
) -> None:
"""
Release sync lock and set final status.
Args:
knowledge_id: CAIKnowledge entity ID
success: Whether sync succeeded
error_message: Optional error message
"""
try:
update_data = {
'syncStatus': AIKnowledgeSyncStatus.SYNCED.value if success else AIKnowledgeSyncStatus.FAILED.value
}
if success:
update_data['lastSync'] = datetime.now().isoformat()
update_data['syncError'] = None
elif error_message:
update_data['syncError'] = error_message[:2000]
await self.espocrm.update_entity('CAIKnowledge', knowledge_id, update_data)
self._log(f"Sync lock released: {knowledge_id}{'success' if success else 'failed'}")
# Release Redis lock
lock_key = self._get_lock_key(knowledge_id)
self._release_redis_lock(lock_key)
except Exception as e:
self._log(f"Error releasing lock: {e}", level='error')
# Ensure Redis lock is released
lock_key = self._get_lock_key(knowledge_id)
self._release_redis_lock(lock_key)
async def sync_knowledge_to_xai(self, knowledge_id: str, ctx) -> None:
"""
Main sync orchestrator with activation status handling.
Args:
knowledge_id: CAIKnowledge entity ID
ctx: Motia context for logging
"""
from services.espocrm import EspoCRMAPI
from services.xai_service import XAIService
espocrm = EspoCRMAPI(ctx)
xai = XAIService(ctx)
try:
# 1. Load knowledge entity
knowledge = await espocrm.get_entity('CAIKnowledge', knowledge_id)
activation_status = knowledge.get('aktivierungsstatus')
collection_id = knowledge.get('datenbankId')
ctx.logger.info("=" * 80)
ctx.logger.info(f"📋 Processing: {knowledge['name']}")
ctx.logger.info(f" aktivierungsstatus: {activation_status}")
ctx.logger.info(f" datenbankId: {collection_id or 'NONE'}")
ctx.logger.info("=" * 80)
# ═══════════════════════════════════════════════════════════
# CASE 1: NEW → Create Collection
# ═══════════════════════════════════════════════════════════
if activation_status == AIKnowledgeActivationStatus.NEW.value:
ctx.logger.info("🆕 Status 'new' → Creating XAI Collection")
collection = await xai.create_collection(
name=knowledge['name'],
metadata={
'espocrm_entity_type': 'CAIKnowledge',
'espocrm_entity_id': knowledge_id,
'created_at': datetime.now().isoformat()
}
)
# XAI API returns 'collection_id' not 'id'
collection_id = collection.get('collection_id') or collection.get('id')
# Update EspoCRM: Set datenbankId + change status to 'active'
await espocrm.update_entity('CAIKnowledge', knowledge_id, {
'datenbankId': collection_id,
'aktivierungsstatus': AIKnowledgeActivationStatus.ACTIVE.value,
'syncStatus': AIKnowledgeSyncStatus.UNCLEAN.value
})
ctx.logger.info(f"✅ Collection created: {collection_id}")
ctx.logger.info(" Status changed to 'active', now syncing documents...")
# Continue to document sync immediately (don't return)
# Fall through to sync logic below
# ═══════════════════════════════════════════════════════════
# CASE 2: DEACTIVATED → Delete Collection from XAI
# ═══════════════════════════════════════════════════════════
elif activation_status == AIKnowledgeActivationStatus.DEACTIVATED.value:
ctx.logger.info("🗑️ Status 'deactivated' → Deleting XAI Collection")
if collection_id:
try:
await xai.delete_collection(collection_id)
ctx.logger.info(f"✅ Collection deleted from XAI: {collection_id}")
except Exception as e:
ctx.logger.error(f"❌ Failed to delete collection: {e}")
else:
ctx.logger.info("⏭️ No collection ID, nothing to delete")
# Reset junction entries
documents = await espocrm.get_knowledge_documents_with_junction(knowledge_id)
for doc in documents:
doc_id = doc['documentId']
try:
await espocrm.update_knowledge_document_junction(
knowledge_id,
doc_id,
{
'syncstatus': 'new',
'aiDocumentId': None
},
update_last_sync=False
)
except Exception as e:
ctx.logger.warn(f"⚠️ Failed to reset junction for {doc_id}: {e}")
ctx.logger.info(f"✅ Deactivation complete, {len(documents)} junction entries reset")
return
# ═══════════════════════════════════════════════════════════
# CASE 3: PAUSED → Skip Sync
# ═══════════════════════════════════════════════════════════
elif activation_status == AIKnowledgeActivationStatus.PAUSED.value:
ctx.logger.info("⏸️ Status 'paused' → No sync performed")
return
# ═══════════════════════════════════════════════════════════
# CASE 4: ACTIVE → Normal Sync (or just created from NEW)
# ═══════════════════════════════════════════════════════════
if activation_status in (AIKnowledgeActivationStatus.ACTIVE.value, AIKnowledgeActivationStatus.NEW.value):
if not collection_id:
ctx.logger.error("❌ Status 'active' but no datenbankId!")
raise RuntimeError("Active knowledge without collection ID")
if activation_status == AIKnowledgeActivationStatus.ACTIVE.value:
ctx.logger.info(f"🔄 Status 'active' → Syncing documents to {collection_id}")
# Verify collection exists
collection = await xai.get_collection(collection_id)
if not collection:
ctx.logger.warn(f"⚠️ Collection {collection_id} not found, recreating")
collection = await xai.create_collection(
name=knowledge['name'],
metadata={
'espocrm_entity_type': 'CAIKnowledge',
'espocrm_entity_id': knowledge_id
}
)
collection_id = collection['id']
await espocrm.update_entity('CAIKnowledge', knowledge_id, {
'datenbankId': collection_id
})
# Sync documents (both for ACTIVE status and after NEW → ACTIVE transition)
await self._sync_knowledge_documents(knowledge_id, collection_id, ctx)
elif activation_status not in (AIKnowledgeActivationStatus.DEACTIVATED.value, AIKnowledgeActivationStatus.PAUSED.value):
ctx.logger.error(f"❌ Unknown aktivierungsstatus: {activation_status}")
raise ValueError(f"Invalid aktivierungsstatus: {activation_status}")
finally:
await xai.close()
async def _sync_knowledge_documents(
self,
knowledge_id: str,
collection_id: str,
ctx
) -> None:
"""
Sync all documents of a knowledge base to XAI collection.
Uses efficient JunctionData endpoint to get all documents with junction data
and blake3 hashes in a single API call. Hash comparison is always performed.
Args:
knowledge_id: CAIKnowledge entity ID
collection_id: XAI Collection ID
ctx: Motia context
"""
from services.espocrm import EspoCRMAPI
from services.xai_service import XAIService
espocrm = EspoCRMAPI(ctx)
xai = XAIService(ctx)
# ═══════════════════════════════════════════════════════════════
# STEP 1: Load all documents with junction data (single API call)
# ═══════════════════════════════════════════════════════════════
ctx.logger.info(f"📥 Loading documents with junction data for knowledge {knowledge_id}")
documents = await espocrm.get_knowledge_documents_with_junction(knowledge_id)
ctx.logger.info(f"📊 Found {len(documents)} document(s)")
if not documents:
ctx.logger.info("✅ No documents to sync")
return
# ═══════════════════════════════════════════════════════════════
# STEP 2: Sync each document based on status/hash
# ═══════════════════════════════════════════════════════════════
successful = 0
failed = 0
skipped = 0
# Track aiDocumentIds for orphan detection (collected during sync)
synced_file_ids: set = set()
for doc in documents:
doc_id = doc['documentId']
doc_name = doc.get('documentName', 'Unknown')
junction_status = doc.get('syncstatus', 'new')
ai_document_id = doc.get('aiDocumentId')
blake3_hash = doc.get('blake3hash')
ctx.logger.info(f"\n📄 {doc_name} (ID: {doc_id})")
ctx.logger.info(f" Status: {junction_status}")
ctx.logger.info(f" aiDocumentId: {ai_document_id or 'N/A'}")
ctx.logger.info(f" blake3hash: {blake3_hash[:16] if blake3_hash else 'N/A'}...")
try:
# Decide if sync needed
needs_sync = False
reason = ""
if junction_status in ['new', 'unclean', 'failed']:
needs_sync = True
reason = f"status={junction_status}"
elif junction_status == 'synced':
# Synced status should have both blake3_hash and ai_document_id
if not blake3_hash:
needs_sync = True
reason = "inconsistency: synced but no blake3 hash"
ctx.logger.warn(f" ⚠️ Synced document missing blake3 hash!")
elif not ai_document_id:
needs_sync = True
reason = "inconsistency: synced but no aiDocumentId"
ctx.logger.warn(f" ⚠️ Synced document missing aiDocumentId!")
else:
# Verify Blake3 hash with XAI (always, since hash from JunctionData API is free)
try:
xai_doc_info = await xai.get_collection_document(collection_id, ai_document_id)
if xai_doc_info:
xai_blake3 = xai_doc_info.get('blake3_hash')
if xai_blake3 != blake3_hash:
needs_sync = True
reason = f"blake3 mismatch (XAI: {xai_blake3[:16] if xai_blake3 else 'N/A'}... vs EspoCRM: {blake3_hash[:16]}...)"
ctx.logger.info(f" 🔄 Blake3 mismatch detected!")
else:
ctx.logger.info(f" ✅ Blake3 hash matches")
else:
needs_sync = True
reason = "file not found in XAI collection"
ctx.logger.warn(f" ⚠️ Document marked synced but not in XAI!")
except Exception as e:
needs_sync = True
reason = f"verification failed: {e}"
ctx.logger.warn(f" ⚠️ Failed to verify Blake3, will re-sync: {e}")
if not needs_sync:
ctx.logger.info(f" ⏭️ Skipped (no sync needed)")
# Document is already synced, track its aiDocumentId
if ai_document_id:
synced_file_ids.add(ai_document_id)
skipped += 1
continue
ctx.logger.info(f" 🔄 Syncing: {reason}")
# Get complete document entity with attachment info
doc_entity = await espocrm.get_entity('CDokumente', doc_id)
attachment_id = doc_entity.get('dokumentId')
if not attachment_id:
ctx.logger.error(f" ❌ No attachment ID found for document {doc_id}")
failed += 1
continue
# Get attachment details for MIME type and original filename
try:
attachment = await espocrm.get_entity('Attachment', attachment_id)
mime_type = attachment.get('type', 'application/octet-stream')
file_size = attachment.get('size', 0)
original_filename = attachment.get('name', doc_name) # Original filename with extension
# URL-decode filename (fixes special chars like §, ä, ö, ü, etc.)
original_filename = unquote(original_filename)
except Exception as e:
ctx.logger.warn(f" ⚠️ Failed to get attachment details: {e}, using defaults")
mime_type = 'application/octet-stream'
file_size = 0
original_filename = unquote(doc_name) # Also decode fallback name
ctx.logger.info(f" 📎 Attachment: {attachment_id} ({mime_type}, {file_size} bytes)")
ctx.logger.info(f" 📄 Original filename: {original_filename}")
# Download document
file_content = await espocrm.download_attachment(attachment_id)
ctx.logger.info(f" 📥 Downloaded {len(file_content)} bytes")
# Upload to XAI with original filename (includes extension)
filename = original_filename
xai_file_id = await xai.upload_file(file_content, filename, mime_type)
ctx.logger.info(f" 📤 Uploaded to XAI: {xai_file_id}")
# Add to collection
await xai.add_to_collection(collection_id, xai_file_id)
ctx.logger.info(f" ✅ Added to collection {collection_id}")
# Update junction
await espocrm.update_knowledge_document_junction(
knowledge_id,
doc_id,
{
'aiDocumentId': xai_file_id,
'syncstatus': 'synced'
},
update_last_sync=True
)
ctx.logger.info(f" ✅ Junction updated")
# Track the new aiDocumentId for orphan detection
synced_file_ids.add(xai_file_id)
successful += 1
except Exception as e:
failed += 1
ctx.logger.error(f" ❌ Sync failed: {e}")
# Mark as failed in junction
try:
await espocrm.update_knowledge_document_junction(
knowledge_id,
doc_id,
{'syncstatus': 'failed'},
update_last_sync=False
)
except Exception as update_err:
ctx.logger.error(f" ❌ Failed to update junction status: {update_err}")
# ═══════════════════════════════════════════════════════════════
# STEP 3: Remove orphaned documents from XAI collection
# ═══════════════════════════════════════════════════════════════
try:
ctx.logger.info(f"\n🧹 Checking for orphaned documents in XAI collection...")
# Get all files in XAI collection (normalized structure)
xai_documents = await xai.list_collection_documents(collection_id)
xai_file_ids = {doc.get('file_id') for doc in xai_documents if doc.get('file_id')}
# Use synced_file_ids (collected during this sync) for orphan detection
# This includes both pre-existing synced docs and newly uploaded ones
ctx.logger.info(f" XAI has {len(xai_file_ids)} files, we have {len(synced_file_ids)} synced")
# Find orphans (in XAI but not in our current sync)
orphans = xai_file_ids - synced_file_ids
if orphans:
ctx.logger.info(f" Found {len(orphans)} orphaned file(s)")
for orphan_id in orphans:
try:
await xai.remove_from_collection(collection_id, orphan_id)
ctx.logger.info(f" 🗑️ Removed {orphan_id}")
except Exception as e:
ctx.logger.warn(f" ⚠️ Failed to remove {orphan_id}: {e}")
else:
ctx.logger.info(f" ✅ No orphans found")
except Exception as e:
ctx.logger.warn(f"⚠️ Failed to clean up orphans: {e}")
# ═══════════════════════════════════════════════════════════════
# STEP 4: Summary
# ═══════════════════════════════════════════════════════════════
ctx.logger.info("")
ctx.logger.info("=" * 80)
ctx.logger.info(f"📊 Sync Statistics:")
ctx.logger.info(f" ✅ Synced: {successful}")
ctx.logger.info(f" ⏭️ Skipped: {skipped}")
ctx.logger.info(f" ❌ Failed: {failed}")
ctx.logger.info(f" Mode: Blake3 hash verification enabled")
ctx.logger.info("=" * 80)
def _calculate_metadata_hash(self, document: Dict) -> str:
"""
Calculate hash of sync-relevant metadata.
Args:
document: CDokumente entity
Returns:
MD5 hash (32 chars)
"""
metadata = {
'name': document.get('name', ''),
'description': document.get('description', ''),
}
metadata_str = json.dumps(metadata, sort_keys=True)
return hashlib.md5(metadata_str.encode()).hexdigest()
def _build_xai_metadata(self, document: Dict) -> Dict[str, str]:
"""
Build XAI metadata from CDokumente entity.
Args:
document: CDokumente entity
Returns:
Metadata dict for XAI
"""
return {
'document_name': document.get('name', ''),
'description': document.get('description', ''),
'created_at': document.get('createdAt', ''),
'modified_at': document.get('modifiedAt', ''),
'espocrm_id': document.get('id', '')
}
async def _get_document_download_info(
self,
document: Dict,
ctx
) -> Optional[Dict[str, Any]]:
"""
Get download info for CDokumente entity.
Args:
document: CDokumente entity
ctx: Motia context
Returns:
Dict with attachment_id, filename, mime_type
"""
from services.espocrm import EspoCRMAPI
espocrm = EspoCRMAPI(ctx)
# Check for dokumentId (CDokumente custom field)
attachment_id = None
filename = None
if document.get('dokumentId'):
attachment_id = document.get('dokumentId')
filename = document.get('dokumentName')
elif document.get('fileId'):
attachment_id = document.get('fileId')
filename = document.get('fileName')
if not attachment_id:
ctx.logger.error(f"❌ No attachment ID for document {document['id']}")
return None
# Get attachment details
try:
attachment = await espocrm.get_entity('Attachment', attachment_id)
return {
'attachment_id': attachment_id,
'filename': filename or attachment.get('name', 'unknown'),
'mime_type': attachment.get('type', 'application/octet-stream')
}
except Exception as e:
ctx.logger.error(f"❌ Failed to get attachment {attachment_id}: {e}")
return None

View File

@@ -0,0 +1,110 @@
"""Aktenzeichen-Erkennung und Validation
Utility functions für das Erkennen, Validieren und Normalisieren von
Aktenzeichen im Format '1234/56' oder 'ABC/23'.
"""
import re
from typing import Optional
# Regex für Aktenzeichen: 1-4 Zeichen (alphanumerisch) + "/" + 2 Ziffern
AKTENZEICHEN_REGEX = re.compile(r'^([A-Za-z0-9]{1,4}/\d{2})\s*', re.IGNORECASE)
def extract_aktenzeichen(text: str) -> Optional[str]:
"""
Extrahiert Aktenzeichen vom Anfang des Textes.
Pattern: ^[A-Za-z0-9]{1,4}/\d{2}
Examples:
>>> extract_aktenzeichen("1234/56 Was ist der Stand?")
"1234/56"
>>> extract_aktenzeichen("ABC/23 Frage zum Vertrag")
"ABC/23"
>>> extract_aktenzeichen("Kein Aktenzeichen hier")
None
Args:
text: Eingabetext (z.B. erste Message)
Returns:
Aktenzeichen als String, oder None wenn nicht gefunden
"""
if not text or not isinstance(text, str):
return None
match = AKTENZEICHEN_REGEX.match(text.strip())
return match.group(1) if match else None
def remove_aktenzeichen(text: str) -> str:
"""
Entfernt Aktenzeichen vom Anfang des Textes.
Examples:
>>> remove_aktenzeichen("1234/56 Was ist der Stand?")
"Was ist der Stand?"
>>> remove_aktenzeichen("Kein Aktenzeichen")
"Kein Aktenzeichen"
Args:
text: Eingabetext mit Aktenzeichen
Returns:
Text ohne Aktenzeichen (whitespace getrimmt)
"""
if not text or not isinstance(text, str):
return text
return AKTENZEICHEN_REGEX.sub('', text, count=1).strip()
def validate_aktenzeichen(az: str) -> bool:
"""
Validiert Aktenzeichen-Format.
Pattern: ^[A-Za-z0-9]{1,4}/\d{2}$
Examples:
>>> validate_aktenzeichen("1234/56")
True
>>> validate_aktenzeichen("ABC/23")
True
>>> validate_aktenzeichen("12345/567") # Zu lang
False
>>> validate_aktenzeichen("1234-56") # Falsches Trennzeichen
False
Args:
az: Aktenzeichen zum Validieren
Returns:
True wenn valide, False sonst
"""
if not az or not isinstance(az, str):
return False
return bool(re.match(r'^[A-Za-z0-9]{1,4}/\d{2}$', az, re.IGNORECASE))
def normalize_aktenzeichen(az: str) -> str:
"""
Normalisiert Aktenzeichen (uppercase, trim whitespace).
Examples:
>>> normalize_aktenzeichen("abc/23")
"ABC/23"
>>> normalize_aktenzeichen(" 1234/56 ")
"1234/56"
Args:
az: Aktenzeichen zum Normalisieren
Returns:
Normalisiertes Aktenzeichen (uppercase, getrimmt)
"""
if not az or not isinstance(az, str):
return az
return az.strip().upper()

View File

@@ -6,9 +6,6 @@ Transformiert Bankverbindungen zwischen den beiden Systemen
from typing import Dict, Any, Optional, List from typing import Dict, Any, Optional, List
from datetime import datetime from datetime import datetime
import logging
logger = logging.getLogger(__name__)
class BankverbindungenMapper: class BankverbindungenMapper:

View File

@@ -13,63 +13,43 @@ Hilfsfunktionen für Sync-Operationen:
from typing import Dict, Any, Optional, Tuple, Literal from typing import Dict, Any, Optional, Tuple, Literal
from datetime import datetime, timedelta from datetime import datetime, timedelta
import pytz import pytz
import logging
import redis
import os
logger = logging.getLogger(__name__) from services.exceptions import LockAcquisitionError, SyncError, ValidationError
from services.redis_client import get_redis_client
from services.config import SYNC_CONFIG, get_lock_key, get_retry_delay_seconds
from services.logging_utils import get_service_logger
import redis
# Timestamp-Vergleich Ergebnis-Typen # Timestamp-Vergleich Ergebnis-Typen
TimestampResult = Literal["espocrm_newer", "advoware_newer", "conflict", "no_change"] TimestampResult = Literal["espocrm_newer", "advoware_newer", "conflict", "no_change"]
# Max retry before permanent failure
MAX_SYNC_RETRIES = 5
# Lock TTL in seconds (prevents deadlocks)
LOCK_TTL_SECONDS = 900 # 15 minutes
# Retry backoff: Wartezeit zwischen Retries (in Minuten)
RETRY_BACKOFF_MINUTES = [1, 5, 15, 60, 240] # 1min, 5min, 15min, 1h, 4h
# Auto-Reset nach 24h (für permanently_failed entities)
AUTO_RESET_HOURS = 24
class BeteiligteSync: class BeteiligteSync:
"""Utility-Klasse für Beteiligte-Synchronisation""" """Utility-Klasse für Beteiligte-Synchronisation"""
def __init__(self, espocrm_api, redis_client: redis.Redis = None, context=None): def __init__(self, espocrm_api, redis_client: Optional[redis.Redis] = None, context=None):
self.espocrm = espocrm_api self.espocrm = espocrm_api
self.context = context self.context = context
self.logger = context.logger if context else logger self.logger = get_service_logger('beteiligte_sync', context)
self.redis = redis_client or self._init_redis()
# Use provided Redis client or get from factory
self.redis = redis_client or get_redis_client(strict=False)
if not self.redis:
self.logger.error(
"⚠️ KRITISCH: Redis nicht verfügbar! "
"Distributed Locking deaktiviert - Race Conditions möglich!"
)
# Import NotificationManager only when needed # Import NotificationManager only when needed
from services.notification_utils import NotificationManager from services.notification_utils import NotificationManager
self.notification_manager = NotificationManager(espocrm_api=self.espocrm, context=context) self.notification_manager = NotificationManager(espocrm_api=self.espocrm, context=context)
def _init_redis(self) -> redis.Redis: def _log(self, message: str, level: str = 'info') -> None:
"""Initialize Redis client for distributed locking""" """Delegate logging to the logger with optional level"""
try: log_func = getattr(self.logger, level, self.logger.info)
redis_host = os.getenv('REDIS_HOST', 'localhost') log_func(message)
redis_port = int(os.getenv('REDIS_PORT', '6379'))
redis_db = int(os.getenv('REDIS_DB_ADVOWARE_CACHE', '1'))
client = redis.Redis(
host=redis_host,
port=redis_port,
db=redis_db,
decode_responses=True
)
client.ping()
return client
except Exception as e:
self._log(f"Redis connection failed: {e}", level='error')
return None
def _log(self, message: str, level: str = 'info'):
"""Logging mit Context-Support"""
if self.context and hasattr(self.context, 'logger'):
getattr(self.context.logger, level)(message)
else:
getattr(logger, level)(message)
async def acquire_sync_lock(self, entity_id: str) -> bool: async def acquire_sync_lock(self, entity_id: str) -> bool:
""" """
@@ -80,27 +60,39 @@ class BeteiligteSync:
Returns: Returns:
True wenn Lock erfolgreich, False wenn bereits im Sync True wenn Lock erfolgreich, False wenn bereits im Sync
Raises:
SyncError: Bei kritischen Sync-Problemen
""" """
try: try:
# STEP 1: Atomic Redis lock (prevents race conditions) # STEP 1: Atomic Redis lock (prevents race conditions)
if self.redis: if self.redis:
lock_key = f"sync_lock:cbeteiligte:{entity_id}" lock_key = get_lock_key('cbeteiligte', entity_id)
acquired = self.redis.set(lock_key, "locked", nx=True, ex=LOCK_TTL_SECONDS) acquired = self.redis.set(
lock_key,
"locked",
nx=True,
ex=SYNC_CONFIG.lock_ttl_seconds
)
if not acquired: if not acquired:
self._log(f"Redis lock bereits aktiv für {entity_id}", level='warn') self.logger.warning(f"Redis lock bereits aktiv für {entity_id}")
return False return False
else:
self.logger.error(
f"⚠️ WARNUNG: Sync ohne Redis-Lock für {entity_id} - Race Condition möglich!"
)
# STEP 2: Update syncStatus (für UI visibility) # STEP 2: Update syncStatus (für UI visibility)
await self.espocrm.update_entity('CBeteiligte', entity_id, { await self.espocrm.update_entity('CBeteiligte', entity_id, {
'syncStatus': 'syncing' 'syncStatus': 'syncing'
}) })
self._log(f"Sync-Lock für {entity_id} erworben") self.logger.info(f"Sync-Lock für {entity_id} erworben")
return True return True
except Exception as e: except Exception as e:
self._log(f"Fehler beim Acquire Lock: {e}", level='error') self.logger.error(f"Fehler beim Acquire Lock: {e}")
# Clean up Redis lock on error # Clean up Redis lock on error
if self.redis: if self.redis:
try: try:
@@ -152,32 +144,42 @@ class BeteiligteSync:
update_data['syncRetryCount'] = new_retry update_data['syncRetryCount'] = new_retry
# Exponential backoff - berechne nächsten Retry-Zeitpunkt # Exponential backoff - berechne nächsten Retry-Zeitpunkt
if new_retry <= len(RETRY_BACKOFF_MINUTES): backoff_minutes = SYNC_CONFIG.retry_backoff_minutes
backoff_minutes = RETRY_BACKOFF_MINUTES[new_retry - 1] if new_retry <= len(backoff_minutes):
backoff_min = backoff_minutes[new_retry - 1]
else: else:
backoff_minutes = RETRY_BACKOFF_MINUTES[-1] # Letzte Backoff-Zeit backoff_min = backoff_minutes[-1] # Letzte Backoff-Zeit
next_retry = now_utc + timedelta(minutes=backoff_minutes) next_retry = now_utc + timedelta(minutes=backoff_min)
update_data['syncNextRetry'] = next_retry.strftime('%Y-%m-%d %H:%M:%S') update_data['syncNextRetry'] = next_retry.strftime('%Y-%m-%d %H:%M:%S')
self._log(f"Retry {new_retry}/{MAX_SYNC_RETRIES}, nächster Versuch in {backoff_minutes} Minuten") self.logger.info(
f"Retry {new_retry}/{SYNC_CONFIG.max_retries}, "
f"nächster Versuch in {backoff_min} Minuten"
)
# Check max retries - mark as permanently failed # Check max retries - mark as permanently failed
if new_retry >= MAX_SYNC_RETRIES: if new_retry >= SYNC_CONFIG.max_retries:
update_data['syncStatus'] = 'permanently_failed' update_data['syncStatus'] = 'permanently_failed'
# Auto-Reset Timestamp für Wiederherstellung nach 24h # Auto-Reset Timestamp für Wiederherstellung nach 24h
auto_reset_time = now_utc + timedelta(hours=AUTO_RESET_HOURS) auto_reset_time = now_utc + timedelta(hours=SYNC_CONFIG.auto_reset_hours)
update_data['syncAutoResetAt'] = auto_reset_time.strftime('%Y-%m-%d %H:%M:%S') update_data['syncAutoResetAt'] = auto_reset_time.strftime('%Y-%m-%d %H:%M:%S')
await self.send_notification( await self.send_notification(
entity_id, entity_id,
'error', 'error',
extra_data={ extra_data={
'message': f"Sync fehlgeschlagen nach {MAX_SYNC_RETRIES} Versuchen. Auto-Reset in {AUTO_RESET_HOURS}h." 'message': (
f"Sync fehlgeschlagen nach {SYNC_CONFIG.max_retries} Versuchen. "
f"Auto-Reset in {SYNC_CONFIG.auto_reset_hours}h."
)
} }
) )
self._log(f"Max retries ({MAX_SYNC_RETRIES}) erreicht für {entity_id}, Auto-Reset um {auto_reset_time}", level='error') self.logger.error(
f"Max retries ({SYNC_CONFIG.max_retries}) erreicht für {entity_id}, "
f"Auto-Reset um {auto_reset_time}"
)
else: else:
update_data['syncRetryCount'] = 0 update_data['syncRetryCount'] = 0
update_data['syncNextRetry'] = None update_data['syncNextRetry'] = None
@@ -188,33 +190,32 @@ class BeteiligteSync:
await self.espocrm.update_entity('CBeteiligte', entity_id, update_data) await self.espocrm.update_entity('CBeteiligte', entity_id, update_data)
self._log(f"Sync-Lock released: {entity_id}{new_status}") self.logger.info(f"Sync-Lock released: {entity_id}{new_status}")
# Release Redis lock # Release Redis lock
if self.redis: if self.redis:
lock_key = f"sync_lock:cbeteiligte:{entity_id}" lock_key = get_lock_key('cbeteiligte', entity_id)
self.redis.delete(lock_key) self.redis.delete(lock_key)
except Exception as e: except Exception as e:
self._log(f"Fehler beim Release Lock: {e}", level='error') self.logger.error(f"Fehler beim Release Lock: {e}")
# Ensure Redis lock is released even on error # Ensure Redis lock is released even on error
if self.redis: if self.redis:
try: try:
lock_key = f"sync_lock:cbeteiligte:{entity_id}" lock_key = get_lock_key('cbeteiligte', entity_id)
self.redis.delete(lock_key) self.redis.delete(lock_key)
except: except:
pass pass
@staticmethod def parse_timestamp(self, ts: Any) -> Optional[datetime]:
def parse_timestamp(ts: Any) -> Optional[datetime]:
""" """
Parse verschiedene Timestamp-Formate zu datetime Parse various timestamp formats to datetime.
Args: Args:
ts: String, datetime oder None ts: String, datetime or None
Returns: Returns:
datetime-Objekt oder None datetime object or None
""" """
if not ts: if not ts:
return None return None
@@ -223,13 +224,13 @@ class BeteiligteSync:
return ts return ts
if isinstance(ts, str): if isinstance(ts, str):
# EspoCRM Format: "2026-02-07 14:30:00" # EspoCRM format: "2026-02-07 14:30:00"
# Advoware Format: "2026-02-07T14:30:00" oder "2026-02-07T14:30:00Z" # Advoware format: "2026-02-07T14:30:00" or "2026-02-07T14:30:00Z"
try: try:
# Entferne trailing Z falls vorhanden # Remove trailing Z if present
ts = ts.rstrip('Z') ts = ts.rstrip('Z')
# Versuche verschiedene Formate # Try various formats
for fmt in [ for fmt in [
'%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M:%S',
'%Y-%m-%dT%H:%M:%S', '%Y-%m-%dT%H:%M:%S',
@@ -240,11 +241,11 @@ class BeteiligteSync:
except ValueError: except ValueError:
continue continue
# Fallback: ISO-Format # Fallback: ISO format
return datetime.fromisoformat(ts) return datetime.fromisoformat(ts)
except Exception as e: except Exception as e:
logger.warning(f"Konnte Timestamp nicht parsen: {ts} - {e}") self._log(f"Could not parse timestamp: {ts} - {e}", level='warning')
return None return None
return None return None

338
services/config.py Normal file
View File

@@ -0,0 +1,338 @@
"""
Zentrale Konfiguration für BitByLaw Integration
Alle Magic Numbers und Strings sind hier zentralisiert.
"""
from typing import List, Dict
from dataclasses import dataclass
import os
# ========== Sync Configuration ==========
@dataclass
class SyncConfig:
"""Konfiguration für Sync-Operationen"""
# Retry-Konfiguration
max_retries: int = 5
"""Maximale Anzahl von Retry-Versuchen"""
retry_backoff_minutes: List[int] = None
"""Exponential Backoff in Minuten: [1, 5, 15, 60, 240]"""
auto_reset_hours: int = 24
"""Auto-Reset für permanently_failed Entities (in Stunden)"""
# Lock-Konfiguration
lock_ttl_seconds: int = 900 # 15 Minuten
"""TTL für distributed locks (verhindert Deadlocks)"""
lock_prefix: str = "sync_lock"
"""Prefix für Redis Lock Keys"""
# Validation
validate_before_sync: bool = True
"""Validiere Entities vor dem Sync (empfohlen)"""
# Change Detection
use_rowid_change_detection: bool = True
"""Nutze rowId für Change Detection (Advoware)"""
def __post_init__(self):
if self.retry_backoff_minutes is None:
# Default exponential backoff: 1, 5, 15, 60, 240 Minuten
self.retry_backoff_minutes = [1, 5, 15, 60, 240]
# Singleton Instance
SYNC_CONFIG = SyncConfig()
# ========== API Configuration ==========
@dataclass
class APIConfig:
"""API-spezifische Konfiguration"""
# Timeouts
default_timeout_seconds: int = 30
"""Default Timeout für API-Calls"""
long_running_timeout_seconds: int = 120
"""Timeout für lange Operations (z.B. Uploads)"""
# Retry
max_api_retries: int = 3
"""Anzahl Retries bei API-Fehlern"""
retry_status_codes: List[int] = None
"""HTTP Status Codes die Retry auslösen"""
# Rate Limiting
rate_limit_enabled: bool = True
"""Aktiviere Rate Limiting"""
rate_limit_calls_per_minute: int = 60
"""Max. API-Calls pro Minute"""
def __post_init__(self):
if self.retry_status_codes is None:
# Retry bei: 408 (Timeout), 429 (Rate Limit), 500, 502, 503, 504
self.retry_status_codes = [408, 429, 500, 502, 503, 504]
API_CONFIG = APIConfig()
# ========== Advoware Configuration ==========
@dataclass
class AdvowareConfig:
"""Advoware-spezifische Konfiguration"""
# Token Management
token_lifetime_minutes: int = 55
"""Token-Lifetime (tatsächlich 60min, aber 5min Puffer)"""
token_cache_key: str = "advoware_access_token"
"""Redis Key für Token Cache"""
token_timestamp_key: str = "advoware_token_timestamp"
"""Redis Key für Token Timestamp"""
# Auth
auth_url: str = "https://security.advo-net.net/api/v1/Token"
"""Advoware Auth-Endpoint"""
product_id: int = 64
"""Advoware Product ID"""
# Field Mapping
readonly_fields: List[str] = None
"""Felder die nicht via PUT geändert werden können"""
def __post_init__(self):
if self.readonly_fields is None:
# Diese Felder können nicht via PUT geändert werden
self.readonly_fields = [
'betNr', 'rowId', 'kommKz', # Kommunikation: kommKz ist read-only!
'handelsRegisterNummer', 'registergericht' # Werden ignoriert von API
]
ADVOWARE_CONFIG = AdvowareConfig()
# ========== EspoCRM Configuration ==========
@dataclass
class EspoCRMConfig:
"""EspoCRM-spezifische Konfiguration"""
# API
default_page_size: int = 50
"""Default Seitengröße für Listen-Abfragen"""
max_page_size: int = 200
"""Maximale Seitengröße"""
# Sync Status Fields
sync_status_field: str = "syncStatus"
"""Feldname für Sync-Status"""
sync_error_field: str = "syncErrorMessage"
"""Feldname für Sync-Fehler"""
sync_retry_field: str = "syncRetryCount"
"""Feldname für Retry-Counter"""
# Notifications
notification_enabled: bool = True
"""In-App Notifications aktivieren"""
notification_user_id: str = "1"
"""User-ID für Notifications (Marvin)"""
ESPOCRM_CONFIG = EspoCRMConfig()
# ========== Redis Configuration ==========
@dataclass
class RedisConfig:
"""Redis-spezifische Konfiguration"""
# Connection
host: str = "localhost"
port: int = 6379
db: int = 1
timeout_seconds: int = 5
max_connections: int = 50
# Behavior
decode_responses: bool = True
"""Auto-decode bytes zu strings"""
health_check_interval: int = 30
"""Health-Check Interval in Sekunden"""
# Keys
key_prefix: str = "bitbylaw"
"""Prefix für alle Redis Keys"""
def get_key(self, key: str) -> str:
"""Gibt vollen Redis Key mit Prefix zurück"""
return f"{self.key_prefix}:{key}"
@classmethod
def from_env(cls) -> 'RedisConfig':
"""Lädt Redis-Config aus Environment Variables"""
return cls(
host=os.getenv('REDIS_HOST', 'localhost'),
port=int(os.getenv('REDIS_PORT', '6379')),
db=int(os.getenv('REDIS_DB_ADVOWARE_CACHE', '1')),
timeout_seconds=int(os.getenv('REDIS_TIMEOUT_SECONDS', '5')),
max_connections=int(os.getenv('REDIS_MAX_CONNECTIONS', '50'))
)
REDIS_CONFIG = RedisConfig.from_env()
# ========== Logging Configuration ==========
@dataclass
class LoggingConfig:
"""Logging-Konfiguration"""
# Levels
default_level: str = "INFO"
"""Default Log-Level"""
api_level: str = "INFO"
"""Log-Level für API-Calls"""
sync_level: str = "INFO"
"""Log-Level für Sync-Operations"""
# Format
log_format: str = "[{timestamp}] {level} {logger}: {message}"
"""Log-Format"""
include_context: bool = True
"""Motia FlowContext in Logs einbinden"""
# Performance
log_api_timings: bool = True
"""API Call Timings loggen"""
log_sync_duration: bool = True
"""Sync-Dauer loggen"""
LOGGING_CONFIG = LoggingConfig()
# ========== Calendar Sync Configuration ==========
@dataclass
class CalendarSyncConfig:
"""Konfiguration für Google Calendar Sync"""
# Sync Window
sync_days_past: int = 7
"""Tage in die Vergangenheit syncen"""
sync_days_future: int = 90
"""Tage in die Zukunft syncen"""
# Cron
cron_schedule: str = "0 */15 * * * *"
"""Cron-Schedule (jede 15 Minuten)"""
# Batch Size
batch_size: int = 10
"""Anzahl Mitarbeiter pro Batch"""
CALENDAR_SYNC_CONFIG = CalendarSyncConfig()
# ========== Feature Flags ==========
@dataclass
class FeatureFlags:
"""Feature Flags für schrittweises Rollout"""
# Validation
strict_validation: bool = True
"""Strenge Validierung mit Pydantic"""
# Sync Features
kommunikation_sync_enabled: bool = False
"""Kommunikation-Sync aktivieren (noch in Entwicklung)"""
document_sync_enabled: bool = False
"""Document-Sync aktivieren (noch in Entwicklung)"""
# Advanced Features
parallel_sync_enabled: bool = False
"""Parallele Sync-Operations (experimentell)"""
auto_conflict_resolution: bool = False
"""Automatische Konfliktauflösung (experimentell)"""
# Debug
debug_mode: bool = False
"""Debug-Modus (mehr Logging, langsamer)"""
FEATURE_FLAGS = FeatureFlags()
# ========== Helper Functions ==========
def get_retry_delay_seconds(attempt: int) -> int:
"""
Gibt Retry-Delay in Sekunden für gegebenen Versuch zurück.
Args:
attempt: Versuchs-Nummer (0-indexed)
Returns:
Delay in Sekunden
"""
backoff_minutes = SYNC_CONFIG.retry_backoff_minutes
if attempt < len(backoff_minutes):
return backoff_minutes[attempt] * 60
return backoff_minutes[-1] * 60
def get_lock_key(entity_type: str, entity_id: str) -> str:
"""
Erzeugt Redis Lock-Key für Entity.
Args:
entity_type: Entity-Typ (z.B. 'cbeteiligte')
entity_id: Entity-ID
Returns:
Redis Key
"""
return f"{SYNC_CONFIG.lock_prefix}:{entity_type.lower()}:{entity_id}"
def is_retryable_status_code(status_code: int) -> bool:
"""
Prüft ob HTTP Status Code Retry auslösen soll.
Args:
status_code: HTTP Status Code
Returns:
True wenn retryable
"""
return status_code in API_CONFIG.retry_status_codes

View File

@@ -1,20 +1,19 @@
""" """
Document Sync Utilities Document Sync Utilities
Hilfsfunktionen für Document-Synchronisation mit xAI: Utility functions for document synchronization with xAI:
- Distributed locking via Redis + syncStatus - Distributed locking via Redis + syncStatus
- Entscheidungslogik: Wann muss ein Document zu xAI? - Decision logic: When does a document need xAI sync?
- Related Entities ermitteln (Many-to-Many Attachments) - Related entities determination (Many-to-Many attachments)
- xAI Collection Management - xAI Collection management
""" """
from typing import Dict, Any, Optional, List, Tuple from typing import Dict, Any, Optional, List, Tuple
from datetime import datetime, timedelta from datetime import datetime, timedelta
import logging from urllib.parse import unquote
from services.sync_utils_base import BaseSyncUtils from services.sync_utils_base import BaseSyncUtils
from services.models import FileStatus, XAISyncStatus
logger = logging.getLogger(__name__)
# Max retry before permanent failure # Max retry before permanent failure
MAX_SYNC_RETRIES = 5 MAX_SYNC_RETRIES = 5
@@ -22,12 +21,18 @@ MAX_SYNC_RETRIES = 5
# Retry backoff: Wartezeit zwischen Retries (in Minuten) # Retry backoff: Wartezeit zwischen Retries (in Minuten)
RETRY_BACKOFF_MINUTES = [1, 5, 15, 60, 240] # 1min, 5min, 15min, 1h, 4h RETRY_BACKOFF_MINUTES = [1, 5, 15, 60, 240] # 1min, 5min, 15min, 1h, 4h
# Legacy file status values (for backward compatibility)
# These are old German and English status values that may still exist in the database
LEGACY_NEW_STATUS_VALUES = {'neu', 'Neu', 'New'}
LEGACY_CHANGED_STATUS_VALUES = {'geändert', 'Geändert', 'Changed'}
LEGACY_SYNCED_STATUS_VALUES = {'synced', 'Synced', 'synchronized', 'Synchronized'}
class DocumentSync(BaseSyncUtils): class DocumentSync(BaseSyncUtils):
"""Utility-Klasse für Document-Synchronisation mit xAI""" """Utility class for document synchronization with xAI"""
def _get_lock_key(self, entity_id: str) -> str: def _get_lock_key(self, entity_id: str) -> str:
"""Redis Lock-Key für Documents""" """Redis lock key for documents"""
return f"sync_lock:document:{entity_id}" return f"sync_lock:document:{entity_id}"
async def acquire_sync_lock(self, entity_id: str, entity_type: str = 'CDokumente') -> bool: async def acquire_sync_lock(self, entity_id: str, entity_type: str = 'CDokumente') -> bool:
@@ -48,15 +53,13 @@ class DocumentSync(BaseSyncUtils):
self._log(f"Redis lock bereits aktiv für {entity_type} {entity_id}", level='warn') self._log(f"Redis lock bereits aktiv für {entity_type} {entity_id}", level='warn')
return False return False
# STEP 2: Update syncStatus (für UI visibility) - nur bei Document Entity # STEP 2: Update xaiSyncStatus to pending_sync
# CDokumente hat dieses Feld nicht - überspringen try:
if entity_type == 'Document': await self.espocrm.update_entity(entity_type, entity_id, {
try: 'xaiSyncStatus': XAISyncStatus.PENDING_SYNC.value
await self.espocrm.update_entity(entity_type, entity_id, { })
'xaiSyncStatus': 'syncing' except Exception as e:
}) self._log(f"Could not set xaiSyncStatus: {e}", level='debug')
except Exception as e:
self._log(f"Konnte xaiSyncStatus nicht setzen: {e}", level='debug')
self._log(f"Sync-Lock für {entity_type} {entity_id} erworben") self._log(f"Sync-Lock für {entity_type} {entity_id} erworben")
return True return True
@@ -89,17 +92,16 @@ class DocumentSync(BaseSyncUtils):
try: try:
update_data = {} update_data = {}
# Status-Felder nur bei Document Entity (CDokumente hat diese Felder nicht) # Set xaiSyncStatus: clean on success, failed on error
if entity_type == 'Document': try:
try: update_data['xaiSyncStatus'] = XAISyncStatus.CLEAN.value if success else XAISyncStatus.FAILED.value
update_data['xaiSyncStatus'] = 'synced' if success else 'failed'
if error_message: if error_message:
update_data['xaiSyncError'] = error_message[:2000] update_data['xaiSyncError'] = error_message[:2000]
else: else:
update_data['xaiSyncError'] = None update_data['xaiSyncError'] = None
except: except:
pass # Felder existieren evtl. nicht pass # Fields may not exist
# Merge extra fields (z.B. xaiFileId, xaiCollections) # Merge extra fields (z.B. xaiFileId, xaiCollections)
if extra_fields: if extra_fields:
@@ -120,164 +122,187 @@ class DocumentSync(BaseSyncUtils):
lock_key = self._get_lock_key(entity_id) lock_key = self._get_lock_key(entity_id)
self._release_redis_lock(lock_key) self._release_redis_lock(lock_key)
async def should_sync_to_xai(self, document: Dict[str, Any]) -> Tuple[bool, List[str], str]: async def should_sync_to_xai(
self,
document: Dict[str, Any],
entity_type: str = 'CDokumente'
) -> Tuple[bool, List[str], str]:
""" """
Entscheidet ob ein Document zu xAI synchronisiert werden muss Decide if a document needs to be synchronized to xAI.
Prüft: Checks:
1. Datei-Status Feld ("Neu", "Geändert") 1. File status field ("new", "changed")
2. Hash-Werte für Change Detection 2. Hash values for change detection
3. Related Entities mit xAI Collections 3. Related entities with xAI collections
Args: Args:
document: Vollständiges Document Entity von EspoCRM document: Complete document entity from EspoCRM
Returns: Returns:
Tuple[bool, List[str], str]: Tuple[bool, List[str], str]:
- bool: Ob Sync nötig ist - bool: Whether sync is needed
- List[str]: Liste der Collection-IDs in die das Document soll - List[str]: List of collection IDs where the document should go
- str: Grund/Beschreibung der Entscheidung - str: Reason/description of the decision
""" """
doc_id = document.get('id') doc_id = document.get('id')
doc_name = document.get('name', 'Unbenannt') doc_name = document.get('name', 'Unbenannt')
# xAI-relevante Felder # xAI-relevant fields
xai_file_id = document.get('xaiFileId') xai_file_id = document.get('xaiFileId')
xai_collections = document.get('xaiCollections') or [] xai_collections = document.get('xaiCollections') or []
xai_sync_status = document.get('xaiSyncStatus')
# Datei-Status und Hash-Felder # File status and hash fields
datei_status = document.get('dateiStatus') or document.get('fileStatus') datei_status = document.get('dateiStatus') or document.get('fileStatus')
file_md5 = document.get('md5') or document.get('fileMd5') file_md5 = document.get('md5') or document.get('fileMd5')
file_sha = document.get('sha') or document.get('fileSha') file_sha = document.get('sha') or document.get('fileSha')
xai_synced_hash = document.get('xaiSyncedHash') # Hash beim letzten xAI-Sync xai_synced_hash = document.get('xaiSyncedHash') # Hash at last xAI sync
self._log(f"📋 Document Analysis: {doc_name} (ID: {doc_id})") self._log(f"📋 Document analysis: {doc_name} (ID: {doc_id})")
self._log(f" xaiFileId: {xai_file_id or 'N/A'}") self._log(f" xaiFileId: {xai_file_id or 'N/A'}")
self._log(f" xaiCollections: {xai_collections}") self._log(f" xaiCollections: {xai_collections}")
self._log(f" Datei-Status: {datei_status or 'N/A'}") self._log(f" xaiSyncStatus: {xai_sync_status or 'N/A'}")
self._log(f" fileStatus: {datei_status or 'N/A'}")
self._log(f" MD5: {file_md5[:16] if file_md5 else 'N/A'}...") self._log(f" MD5: {file_md5[:16] if file_md5 else 'N/A'}...")
self._log(f" SHA: {file_sha[:16] if file_sha else 'N/A'}...") self._log(f" SHA: {file_sha[:16] if file_sha else 'N/A'}...")
self._log(f" xaiSyncedHash: {xai_synced_hash[:16] if xai_synced_hash else 'N/A'}...") self._log(f" xaiSyncedHash: {xai_synced_hash[:16] if xai_synced_hash else 'N/A'}...")
# ═══════════════════════════════════════════════════════════════ # Determine target collections from relations (CDokumente -> linked entities)
# PRIORITY CHECK: Datei-Status "Neu" oder "Geändert" target_collections = await self._get_required_collections_from_relations(
# ═══════════════════════════════════════════════════════════════ doc_id,
if datei_status in ['Neu', 'Geändert', 'neu', 'geändert', 'New', 'Changed']: entity_type=entity_type
self._log(f"🆕 Datei-Status: '{datei_status}' → xAI-Sync ERFORDERLICH") )
# Hole Collections (entweder existierende oder von Related Entities) # Check xaiSyncStatus="no_sync" -> no sync for this document
if xai_collections: if xai_sync_status == XAISyncStatus.NO_SYNC.value:
target_collections = xai_collections self._log("⏭️ No xAI sync needed: xaiSyncStatus='no_sync'")
else: return (False, [], "xaiSyncStatus is 'no_sync'")
target_collections = await self._get_required_collections_from_relations(doc_id)
if not target_collections:
self._log("⏭️ No xAI sync needed: No related entities with xAI collections")
return (False, [], "No linked entities with xAI collections")
# ═══════════════════════════════════════════════════════════════
# PRIORITY CHECK 1: xaiSyncStatus="unclean" -> document was changed
# ═══════════════════════════════════════════════════════════════
if xai_sync_status == XAISyncStatus.UNCLEAN.value:
self._log(f"🆕 xaiSyncStatus='unclean' → xAI sync REQUIRED")
return (True, target_collections, "xaiSyncStatus='unclean'")
# ═══════════════════════════════════════════════════════════════
# PRIORITY CHECK 2: fileStatus "new" or "changed"
# ═══════════════════════════════════════════════════════════════
# Check for standard enum values and legacy values
is_new = (datei_status == FileStatus.NEW.value or datei_status in LEGACY_NEW_STATUS_VALUES)
is_changed = (datei_status == FileStatus.CHANGED.value or datei_status in LEGACY_CHANGED_STATUS_VALUES)
if is_new or is_changed:
self._log(f"🆕 fileStatus: '{datei_status}' → xAI sync REQUIRED")
if target_collections: if target_collections:
return (True, target_collections, f"Datei-Status: {datei_status}") return (True, target_collections, f"fileStatus: {datei_status}")
else: else:
# Datei ist neu/geändert aber keine Collections gefunden # File is new/changed but no collections found
self._log(f"⚠️ Datei-Status '{datei_status}' aber keine Collections gefunden - überspringe Sync") self._log(f"⚠️ fileStatus '{datei_status}' but no collections found - skipping sync")
return (False, [], f"Datei-Status: {datei_status}, aber keine Collections") return (False, [], f"fileStatus: {datei_status}, but no collections")
# ═══════════════════════════════════════════════════════════════ # ═══════════════════════════════════════════════════════════════
# FALL 1: Document ist bereits in xAI UND Collections sind gesetzt # CASE 1: Document is already in xAI AND collections are set
# ═══════════════════════════════════════════════════════════════ # ═══════════════════════════════════════════════════════════════
if xai_file_id and xai_collections: if xai_file_id:
self._log(f"✅ Document bereits in xAI gesynct mit {len(xai_collections)} Collection(s)") self._log(f"✅ Document already synced to xAI with {len(target_collections)} collection(s)")
# Prüfe ob File-Inhalt geändert wurde (Hash-Vergleich) # Check if file content was changed (hash comparison)
current_hash = file_md5 or file_sha current_hash = file_md5 or file_sha
if current_hash and xai_synced_hash: if current_hash and xai_synced_hash:
if current_hash != xai_synced_hash: if current_hash != xai_synced_hash:
self._log(f"🔄 Hash-Änderung erkannt! RESYNC erforderlich") self._log(f"🔄 Hash change detected! RESYNC required")
self._log(f" Alt: {xai_synced_hash[:16]}...") self._log(f" Old: {xai_synced_hash[:16]}...")
self._log(f" Neu: {current_hash[:16]}...") self._log(f" New: {current_hash[:16]}...")
return (True, xai_collections, "File-Inhalt geändert (Hash-Mismatch)") return (True, target_collections, "File content changed (hash mismatch)")
else: else:
self._log(f"✅ Hash identisch - keine Änderung") self._log(f"✅ Hash identical - no change")
else: else:
self._log(f"⚠️ Keine Hash-Werte verfügbar für Vergleich") self._log(f"⚠️ No hash values available for comparison")
return (False, xai_collections, "Bereits gesynct, keine Änderung erkannt") return (False, target_collections, "Already synced, no change detected")
# ═══════════════════════════════════════════════════════════════ # ═══════════════════════════════════════════════════════════════
# FALL 2: Document hat xaiFileId aber Collections ist leer/None # CASE 2: Document has xaiFileId but collections is empty/None
# ═══════════════════════════════════════════════════════════════ # ═══════════════════════════════════════════════════════════════
if xai_file_id and not xai_collections:
self._log(f"⚠️ Document hat xaiFileId aber keine Collections - prüfe Related Entities")
# Fallthrough zu FALL 3 - prüfe Related Entities
# ═══════════════════════════════════════════════════════════════ # ═══════════════════════════════════════════════════════════════
# FALL 3: Prüfe Related Entities (Attachments) # CASE 3: Collections present but no status/hash trigger
# ═══════════════════════════════════════════════════════════════ # ═══════════════════════════════════════════════════════════════
required_collections = await self._get_required_collections_from_relations(doc_id) self._log(f"✅ Document is linked to {len(target_collections)} entity/ies with collections")
return (True, target_collections, "Linked to entities that require collections")
if required_collections: async def _get_required_collections_from_relations(
self._log(f"✅ Document ist mit {len(required_collections)} Entity/ies verknüpft die Collections haben") self,
return (True, required_collections, f"Verknüpft mit Entities die Collections benötigen") document_id: str,
entity_type: str = 'Document'
# ═══════════════════════════════════════════════════════════════ ) -> List[str]:
# FALL 4: Keine Collections gefunden → kein Sync nötig
# ═══════════════════════════════════════════════════════════════
self._log(f"⏭️ Kein xAI-Sync nötig: Keine Related Entities mit Collections")
return (False, [], "Keine verknüpften Entities mit xAI Collections")
async def _get_required_collections_from_relations(self, document_id: str) -> List[str]:
""" """
Ermittelt alle xAI Collection-IDs von Entities die mit diesem Document verknüpft sind Determine all xAI collection IDs of CAIKnowledge entities linked to this document.
EspoCRM Many-to-Many: Document kann mit beliebigen Entities verknüpft sein Checks CAIKnowledgeCDokumente junction table:
(CBeteiligte, Account, CVmhErstgespraech, etc.) - Status 'active' + datenbankId: Returns collection ID
- Status 'new': Returns "NEW:{knowledge_id}" marker (collection must be created first)
- Other statuses (paused, deactivated): Skips
Args: Args:
document_id: Document ID document_id: Document ID
entity_type: Entity type (e.g., 'CDokumente')
Returns: Returns:
Liste von xAI Collection-IDs (dedupliziert) List of collection IDs or markers:
- Normal IDs: "abc123..." (existing collections)
- New markers: "NEW:kb-id..." (collection needs to be created via knowledge sync)
""" """
collections = set() collections = set()
# Liste von Entity-Types die xAI Collections haben können self._log(f"🔍 Checking relations of {entity_type} {document_id}...")
# NOTE: Erweiterbar für andere Entities
entity_types_with_collections = [
'CBeteiligte',
'Account',
'CVmhErstgespraech',
# Weitere Entity-Types hier hinzufügen
]
self._log(f"🔍 Prüfe Attachments von Document {document_id}...") # ═══════════════════════════════════════════════════════════════
# SPECIAL HANDLING: CAIKnowledge via Junction Table
# ═══════════════════════════════════════════════════════════════
try:
junction_entries = await self.espocrm.get_junction_entries(
'CAIKnowledgeCDokumente',
'cDokumenteId',
document_id
)
for entity_type in entity_types_with_collections: if junction_entries:
try: self._log(f" 📋 Found {len(junction_entries)} CAIKnowledge link(s)")
# Finde alle Entities dieses Typs die dieses Document attached haben
# EspoCRM API: Suche wo documentsIds das Document enthält
result = await self.espocrm.list_entities(
entity_type,
where=[{
'type': 'arrayAnyOf',
'attribute': 'documentsIds',
'value': [document_id]
}],
select='id,name,xaiCollectionId',
max_size=100
)
entities = result.get('list', []) for junction in junction_entries:
knowledge_id = junction.get('cAIKnowledgeId')
if not knowledge_id:
continue
if entities: try:
self._log(f"{len(entities)} {entity_type}(s) gefunden") knowledge = await self.espocrm.get_entity('CAIKnowledge', knowledge_id)
activation_status = knowledge.get('aktivierungsstatus')
collection_id = knowledge.get('datenbankId')
for entity in entities: if activation_status == 'active' and collection_id:
collection_id = entity.get('xaiCollectionId') # Existing collection - use it
if collection_id:
collections.add(collection_id) collections.add(collection_id)
self._log(f" {entity.get('name')}: Collection {collection_id[:16]}...") self._log(f" ✅ CAIKnowledge {knowledge_id}: {collection_id} (active)")
elif activation_status == 'new':
# Collection doesn't exist yet - return special marker
# Format: "NEW:{knowledge_id}" signals to caller: trigger knowledge sync first
collections.add(f"NEW:{knowledge_id}")
self._log(f" 🆕 CAIKnowledge {knowledge_id}: status='new' → collection must be created first")
else:
self._log(f" ⏭️ CAIKnowledge {knowledge_id}: status={activation_status}, datenbankId={collection_id or 'N/A'}")
except Exception as e: except Exception as e:
self._log(f" ⚠️ Fehler beim Prüfen von {entity_type}: {e}", level='warn') self._log(f" ⚠️ Failed to load CAIKnowledge {knowledge_id}: {e}", level='warn')
continue
except Exception as e:
self._log(f" ⚠️ Failed to check CAIKnowledge junction: {e}", level='warn')
result = list(collections) result = list(collections)
self._log(f"📊 Gesamt: {len(result)} eindeutige Collection(s) gefunden") self._log(f"📊 Gesamt: {len(result)} eindeutige Collection(s) gefunden")
@@ -341,6 +366,10 @@ class DocumentSync(BaseSyncUtils):
# Filename: Nutze dokumentName/fileName falls vorhanden, sonst aus Attachment # Filename: Nutze dokumentName/fileName falls vorhanden, sonst aus Attachment
final_filename = filename or attachment.get('name', 'unknown') final_filename = filename or attachment.get('name', 'unknown')
# URL-decode filename (fixes special chars like §, ä, ö, ü, etc.)
# EspoCRM stores filenames URL-encoded: %C2%A7 → §
final_filename = unquote(final_filename)
return { return {
'attachment_id': attachment_id, 'attachment_id': attachment_id,
'download_url': f"/api/v1/Attachment/file/{attachment_id}", 'download_url': f"/api/v1/Attachment/file/{attachment_id}",
@@ -468,10 +497,11 @@ class DocumentSync(BaseSyncUtils):
collection_ids: Optional[List[str]] = None, collection_ids: Optional[List[str]] = None,
file_hash: Optional[str] = None, file_hash: Optional[str] = None,
preview_data: Optional[bytes] = None, preview_data: Optional[bytes] = None,
reset_file_status: bool = False,
entity_type: str = 'CDokumente' entity_type: str = 'CDokumente'
) -> None: ) -> None:
""" """
Updated Document-Metadaten nach erfolgreichem xAI-Sync Updated Document-Metadaten nach erfolgreichem xAI-Sync oder Preview-Generierung
Args: Args:
document_id: EspoCRM Document ID document_id: EspoCRM Document ID
@@ -479,6 +509,7 @@ class DocumentSync(BaseSyncUtils):
collection_ids: Liste der xAI Collection IDs (optional) collection_ids: Liste der xAI Collection IDs (optional)
file_hash: MD5/SHA Hash des gesyncten Files file_hash: MD5/SHA Hash des gesyncten Files
preview_data: Vorschaubild (WebP) als bytes preview_data: Vorschaubild (WebP) als bytes
reset_file_status: Ob fileStatus/dateiStatus zurückgesetzt werden soll
entity_type: Entity-Type (CDokumente oder Document) entity_type: Entity-Type (CDokumente oder Document)
""" """
try: try:
@@ -495,13 +526,17 @@ class DocumentSync(BaseSyncUtils):
if collection_ids is not None: if collection_ids is not None:
update_data['xaiCollections'] = collection_ids update_data['xaiCollections'] = collection_ids
# Nur Status auf "Gesynct" setzen wenn xAI-File-ID vorhanden # fileStatus auf "unchanged" setzen wenn Dokument verarbeitet/clean ist
if xai_file_id: if reset_file_status:
# CDokumente verwendet fileStatus, Document verwendet dateiStatus
if entity_type == 'CDokumente': if entity_type == 'CDokumente':
update_data['fileStatus'] = 'synced' update_data['fileStatus'] = 'unchanged'
else: else:
update_data['dateiStatus'] = 'Gesynct' # Document Entity hat kein fileStatus, nur dateiStatus
update_data['dateiStatus'] = 'unchanged'
# xaiSyncStatus auf "clean" setzen wenn xAI-Sync erfolgreich war
if xai_file_id:
update_data['xaiSyncStatus'] = 'clean'
# Hash speichern für zukünftige Change Detection # Hash speichern für zukünftige Change Detection
if file_hash: if file_hash:

View File

@@ -2,21 +2,20 @@
import aiohttp import aiohttp
import asyncio import asyncio
import logging import logging
import redis import time
import os
from typing import Optional, Dict, Any, List from typing import Optional, Dict, Any, List
import os
logger = logging.getLogger(__name__) from services.exceptions import (
EspoCRMAPIError,
EspoCRMAuthError,
class EspoCRMError(Exception): EspoCRMTimeoutError,
"""Base exception for EspoCRM API errors""" RetryableError,
pass ValidationError
)
from services.redis_client import get_redis_client
class EspoCRMAuthError(EspoCRMError): from services.config import ESPOCRM_CONFIG, API_CONFIG
"""Authentication error""" from services.logging_utils import get_service_logger
pass
class EspoCRMAPI: class EspoCRMAPI:
@@ -32,7 +31,6 @@ class EspoCRMAPI:
- ESPOCRM_API_BASE_URL (e.g., https://crm.bitbylaw.com/api/v1) - ESPOCRM_API_BASE_URL (e.g., https://crm.bitbylaw.com/api/v1)
- ESPOCRM_API_KEY (Marvin API key) - ESPOCRM_API_KEY (Marvin API key)
- ESPOCRM_API_TIMEOUT_SECONDS (optional, default: 30) - ESPOCRM_API_TIMEOUT_SECONDS (optional, default: 30)
- REDIS_HOST, REDIS_PORT, REDIS_DB_ADVOWARE_CACHE (for caching)
""" """
def __init__(self, context=None): def __init__(self, context=None):
@@ -43,47 +41,38 @@ class EspoCRMAPI:
context: Motia FlowContext for logging (optional) context: Motia FlowContext for logging (optional)
""" """
self.context = context self.context = context
self._log("EspoCRMAPI initializing", level='debug') self.logger = get_service_logger('espocrm', context)
self.logger.debug("EspoCRMAPI initializing")
# Load configuration from environment # Load configuration from environment
self.api_base_url = os.getenv('ESPOCRM_API_BASE_URL', 'https://crm.bitbylaw.com/api/v1') self.api_base_url = os.getenv('ESPOCRM_API_BASE_URL', 'https://crm.bitbylaw.com/api/v1')
self.api_key = os.getenv('ESPOCRM_API_KEY', '') self.api_key = os.getenv('ESPOCRM_API_KEY', '')
self.api_timeout_seconds = int(os.getenv('ESPOCRM_API_TIMEOUT_SECONDS', '30')) self.api_timeout_seconds = int(os.getenv('ESPOCRM_API_TIMEOUT_SECONDS', str(API_CONFIG.default_timeout_seconds)))
if not self.api_key: if not self.api_key:
raise EspoCRMAuthError("ESPOCRM_API_KEY not configured in environment") raise EspoCRMAuthError("ESPOCRM_API_KEY not configured in environment")
self._log(f"EspoCRM API initialized with base URL: {self.api_base_url}") self.logger.info(f"EspoCRM API initialized with base URL: {self.api_base_url}")
# Optional Redis for caching/rate limiting self._session: Optional[aiohttp.ClientSession] = None
try: self._entity_defs_cache: Dict[str, Dict[str, Any]] = {}
redis_host = os.getenv('REDIS_HOST', 'localhost') self._entity_defs_cache_ttl_seconds = int(os.getenv('ESPOCRM_METADATA_TTL_SECONDS', '300'))
redis_port = int(os.getenv('REDIS_PORT', '6379'))
redis_db = int(os.getenv('REDIS_DB_ADVOWARE_CACHE', '1'))
redis_timeout = int(os.getenv('REDIS_TIMEOUT_SECONDS', '5'))
self.redis_client = redis.Redis( # Metadata cache (complete metadata loaded once)
host=redis_host, self._metadata_cache: Optional[Dict[str, Any]] = None
port=redis_port, self._metadata_cache_ts: float = 0
db=redis_db,
socket_timeout=redis_timeout,
socket_connect_timeout=redis_timeout,
decode_responses=True
)
self.redis_client.ping()
self._log("Connected to Redis for EspoCRM operations")
except Exception as e:
self._log(f"Could not connect to Redis: {e}. Continuing without caching.", level='warning')
self.redis_client = None
def _log(self, message: str, level: str = 'info'): # Optional Redis for caching/rate limiting (centralized)
"""Log message via context.logger if available, otherwise use module logger""" self.redis_client = get_redis_client(strict=False)
if self.context and hasattr(self.context, 'logger'): if self.redis_client:
log_func = getattr(self.context.logger, level, self.context.logger.info) self.logger.info("Connected to Redis for EspoCRM operations")
log_func(f"[EspoCRM] {message}")
else: else:
log_func = getattr(logger, level, logger.info) self.logger.warning("⚠️ Redis unavailable - caching disabled")
log_func(f"[EspoCRM] {message}")
def _log(self, message: str, level: str = 'info') -> None:
"""Delegate to IntegrationLogger with optional level"""
log_func = getattr(self.logger, level, self.logger.info)
log_func(message)
def _get_headers(self) -> Dict[str, str]: def _get_headers(self) -> Dict[str, str]:
"""Generate request headers with API key""" """Generate request headers with API key"""
@@ -93,6 +82,86 @@ class EspoCRMAPI:
'Accept': 'application/json' 'Accept': 'application/json'
} }
async def _get_session(self) -> aiohttp.ClientSession:
if self._session is None or self._session.closed:
self._session = aiohttp.ClientSession()
return self._session
async def close(self) -> None:
if self._session and not self._session.closed:
await self._session.close()
async def get_metadata(self) -> Dict[str, Any]:
"""
Get complete EspoCRM metadata (cached).
Loads once and caches for TTL duration.
Much faster than individual entity def calls.
Returns:
Complete metadata dict with entityDefs, clientDefs, etc.
"""
now = time.monotonic()
# Return cached if still valid
if (self._metadata_cache is not None and
(now - self._metadata_cache_ts) < self._entity_defs_cache_ttl_seconds):
return self._metadata_cache
# Load fresh metadata
try:
self._log("📥 Loading complete EspoCRM metadata...", level='debug')
metadata = await self.api_call("/Metadata", method='GET')
if not isinstance(metadata, dict):
self._log("⚠️ Metadata response is not a dict, using empty", level='warn')
metadata = {}
# Cache it
self._metadata_cache = metadata
self._metadata_cache_ts = now
entity_count = len(metadata.get('entityDefs', {}))
self._log(f"✅ Metadata cached: {entity_count} entity definitions", level='debug')
return metadata
except Exception as e:
self._log(f"❌ Failed to load metadata: {e}", level='error')
# Return empty dict as fallback
return {}
async def get_entity_def(self, entity_type: str) -> Dict[str, Any]:
"""
Get entity definition for a specific entity type (cached via metadata).
Uses complete metadata cache - much faster and correct API usage.
Args:
entity_type: Entity type (e.g., 'Document', 'CDokumente', 'Account')
Returns:
Entity definition dict with fields, links, etc.
"""
try:
metadata = await self.get_metadata()
entity_defs = metadata.get('entityDefs', {})
if not isinstance(entity_defs, dict):
self._log(f"⚠️ entityDefs is not a dict for {entity_type}", level='warn')
return {}
entity_def = entity_defs.get(entity_type, {})
if not entity_def:
self._log(f"⚠️ No entity definition found for '{entity_type}'", level='debug')
return entity_def
except Exception as e:
self._log(f"⚠️ Could not load entity def for {entity_type}: {e}", level='warn')
return {}
async def api_call( async def api_call(
self, self,
endpoint: str, endpoint: str,
@@ -115,7 +184,9 @@ class EspoCRMAPI:
Parsed JSON response or None Parsed JSON response or None
Raises: Raises:
EspoCRMError: On API errors EspoCRMAuthError: Authentication failed
EspoCRMTimeoutError: Request timed out
EspoCRMAPIError: Other API errors
""" """
# Ensure endpoint starts with / # Ensure endpoint starts with /
if not endpoint.startswith('/'): if not endpoint.startswith('/'):
@@ -127,45 +198,62 @@ class EspoCRMAPI:
total=timeout_seconds or self.api_timeout_seconds total=timeout_seconds or self.api_timeout_seconds
) )
self._log(f"API call: {method} {url}", level='debug') session = await self._get_session()
if params: try:
self._log(f"Params: {params}", level='debug') with self.logger.api_call(endpoint, method):
async with aiohttp.ClientSession(timeout=effective_timeout) as session:
try:
async with session.request( async with session.request(
method, method,
url, url,
headers=headers, headers=headers,
params=params, params=params,
json=json_data json=json_data,
timeout=effective_timeout
) as response: ) as response:
# Log response status
self._log(f"Response status: {response.status}", level='debug')
# Handle errors # Handle errors
if response.status == 401: if response.status == 401:
raise EspoCRMAuthError("Authentication failed - check API key") raise EspoCRMAuthError(
"Authentication failed - check API key",
status_code=401
)
elif response.status == 403: elif response.status == 403:
raise EspoCRMError("Access forbidden") raise EspoCRMAPIError(
"Access forbidden",
status_code=403
)
elif response.status == 404: elif response.status == 404:
raise EspoCRMError(f"Resource not found: {endpoint}") raise EspoCRMAPIError(
f"Resource not found: {endpoint}",
status_code=404
)
elif response.status >= 500:
error_text = await response.text()
raise RetryableError(
f"Server error {response.status}: {error_text}"
)
elif response.status >= 400: elif response.status >= 400:
error_text = await response.text() error_text = await response.text()
raise EspoCRMError(f"API error {response.status}: {error_text}") raise EspoCRMAPIError(
f"API error {response.status}: {error_text}",
status_code=response.status,
response_body=error_text
)
# Parse response # Parse response
if response.content_type == 'application/json': if response.content_type == 'application/json':
result = await response.json() result = await response.json()
self._log(f"Response received", level='debug')
return result return result
else: else:
# For DELETE or other non-JSON responses # For DELETE or other non-JSON responses
return None return None
except aiohttp.ClientError as e: except asyncio.TimeoutError:
self._log(f"API call failed: {e}", level='error') raise EspoCRMTimeoutError(
raise EspoCRMError(f"Request failed: {e}") from e f"Request timed out after {effective_timeout.total}s",
status_code=408
)
except aiohttp.ClientError as e:
self.logger.error(f"API call failed: {e}")
raise EspoCRMAPIError(f"Request failed: {str(e)}")
async def get_entity(self, entity_type: str, entity_id: str) -> Dict[str, Any]: async def get_entity(self, entity_type: str, entity_id: str) -> Dict[str, Any]:
""" """
@@ -221,6 +309,36 @@ class EspoCRMAPI:
self._log(f"Listing {entity_type} entities") self._log(f"Listing {entity_type} entities")
return await self.api_call(f"/{entity_type}", method='GET', params=params) return await self.api_call(f"/{entity_type}", method='GET', params=params)
async def list_related(
self,
entity_type: str,
entity_id: str,
link: str,
where: Optional[List[Dict]] = None,
select: Optional[str] = None,
order_by: Optional[str] = None,
order: Optional[str] = None,
offset: int = 0,
max_size: int = 50
) -> Dict[str, Any]:
params = {
'offset': offset,
'maxSize': max_size
}
if where:
import json
params['where'] = where if isinstance(where, str) else json.dumps(where)
if select:
params['select'] = select
if order_by:
params['orderBy'] = order_by
if order:
params['order'] = order
self._log(f"Listing related {entity_type}/{entity_id}/{link}")
return await self.api_call(f"/{entity_type}/{entity_id}/{link}", method='GET', params=params)
async def create_entity( async def create_entity(
self, self,
entity_type: str, entity_type: str,
@@ -345,36 +463,36 @@ class EspoCRMAPI:
effective_timeout = aiohttp.ClientTimeout(total=self.api_timeout_seconds) effective_timeout = aiohttp.ClientTimeout(total=self.api_timeout_seconds)
async with aiohttp.ClientSession(timeout=effective_timeout) as session: session = await self._get_session()
try: try:
async with session.post(url, headers=headers, data=form_data) as response: async with session.post(url, headers=headers, data=form_data, timeout=effective_timeout) as response:
self._log(f"Upload response status: {response.status}") self._log(f"Upload response status: {response.status}")
if response.status == 401: if response.status == 401:
raise EspoCRMAuthError("Authentication failed - check API key") raise EspoCRMAuthError("Authentication failed - check API key")
elif response.status == 403: elif response.status == 403:
raise EspoCRMError("Access forbidden") raise EspoCRMError("Access forbidden")
elif response.status == 404: elif response.status == 404:
raise EspoCRMError(f"Attachment endpoint not found") raise EspoCRMError(f"Attachment endpoint not found")
elif response.status >= 400: elif response.status >= 400:
error_text = await response.text() error_text = await response.text()
self._log(f"❌ Upload failed with {response.status}. Response: {error_text}", level='error') self._log(f"❌ Upload failed with {response.status}. Response: {error_text}", level='error')
raise EspoCRMError(f"Upload error {response.status}: {error_text}") raise EspoCRMError(f"Upload error {response.status}: {error_text}")
# Parse response # Parse response
if response.content_type == 'application/json': if response.content_type == 'application/json':
result = await response.json() result = await response.json()
attachment_id = result.get('id') attachment_id = result.get('id')
self._log(f"✅ Attachment uploaded successfully: {attachment_id}") self._log(f"✅ Attachment uploaded successfully: {attachment_id}")
return result return result
else: else:
response_text = await response.text() response_text = await response.text()
self._log(f"⚠️ Non-JSON response: {response_text[:200]}", level='warn') self._log(f"⚠️ Non-JSON response: {response_text[:200]}", level='warn')
return {'success': True, 'response': response_text} return {'success': True, 'response': response_text}
except aiohttp.ClientError as e: except aiohttp.ClientError as e:
self._log(f"Upload failed: {e}", level='error') self._log(f"Upload failed: {e}", level='error')
raise EspoCRMError(f"Upload request failed: {e}") from e raise EspoCRMError(f"Upload request failed: {e}") from e
async def download_attachment(self, attachment_id: str) -> bytes: async def download_attachment(self, attachment_id: str) -> bytes:
""" """
@@ -395,23 +513,219 @@ class EspoCRMAPI:
effective_timeout = aiohttp.ClientTimeout(total=self.api_timeout_seconds) effective_timeout = aiohttp.ClientTimeout(total=self.api_timeout_seconds)
async with aiohttp.ClientSession(timeout=effective_timeout) as session: session = await self._get_session()
try: try:
async with session.get(url, headers=headers) as response: async with session.get(url, headers=headers, timeout=effective_timeout) as response:
if response.status == 401: if response.status == 401:
raise EspoCRMAuthError("Authentication failed - check API key") raise EspoCRMAuthError("Authentication failed - check API key")
elif response.status == 403: elif response.status == 403:
raise EspoCRMError("Access forbidden") raise EspoCRMError("Access forbidden")
elif response.status == 404: elif response.status == 404:
raise EspoCRMError(f"Attachment not found: {attachment_id}") raise EspoCRMError(f"Attachment not found: {attachment_id}")
elif response.status >= 400: elif response.status >= 400:
error_text = await response.text() error_text = await response.text()
raise EspoCRMError(f"Download error {response.status}: {error_text}") raise EspoCRMError(f"Download error {response.status}: {error_text}")
content = await response.read() content = await response.read()
self._log(f"✅ Downloaded {len(content)} bytes") self._log(f"✅ Downloaded {len(content)} bytes")
return content return content
except aiohttp.ClientError as e: except aiohttp.ClientError as e:
self._log(f"Download failed: {e}", level='error') self._log(f"Download failed: {e}", level='error')
raise EspoCRMError(f"Download request failed: {e}") from e raise EspoCRMError(f"Download request failed: {e}") from e
# ========== Junction Table Operations ==========
async def get_junction_entries(
self,
junction_entity: str,
filter_field: str,
filter_value: str,
max_size: int = 1000
) -> List[Dict[str, Any]]:
"""
Load junction table entries with filtering.
Args:
junction_entity: Junction entity name (e.g., 'CAIKnowledgeCDokumente')
filter_field: Field to filter on (e.g., 'cAIKnowledgeId')
filter_value: Value to match
max_size: Maximum entries to return
Returns:
List of junction records with ALL additionalColumns
Example:
entries = await espocrm.get_junction_entries(
'CAIKnowledgeCDokumente',
'cAIKnowledgeId',
'kb-123'
)
"""
self._log(f"Loading junction entries: {junction_entity} where {filter_field}={filter_value}")
result = await self.list_entities(
junction_entity,
where=[{
'type': 'equals',
'attribute': filter_field,
'value': filter_value
}],
max_size=max_size
)
entries = result.get('list', [])
self._log(f"✅ Loaded {len(entries)} junction entries")
return entries
async def update_junction_entry(
self,
junction_entity: str,
junction_id: str,
fields: Dict[str, Any]
) -> None:
"""
Update junction table entry.
Args:
junction_entity: Junction entity name
junction_id: Junction entry ID
fields: Fields to update
Example:
await espocrm.update_junction_entry(
'CAIKnowledgeCDokumente',
'jct-123',
{'syncstatus': 'synced', 'lastSync': '2026-03-11T20:00:00Z'}
)
"""
await self.update_entity(junction_entity, junction_id, fields)
async def get_knowledge_documents_with_junction(
self,
knowledge_id: str
) -> List[Dict[str, Any]]:
"""
Get all documents linked to a CAIKnowledge entry with junction data.
Uses custom EspoCRM endpoint: GET /JunctionData/CAIKnowledge/{knowledge_id}/dokumentes
Returns enriched list with:
- junctionId: Junction table ID
- cAIKnowledgeId, cDokumenteId: Junction keys
- aiDocumentId: XAI document ID from junction
- syncstatus: Sync status from junction (new, synced, failed, unclean)
- lastSync: Last sync timestamp from junction
- documentId, documentName: Document info
- blake3hash: Blake3 hash from document entity
- documentCreatedAt, documentModifiedAt: Document timestamps
This consolidates multiple API calls into one efficient query.
Args:
knowledge_id: CAIKnowledge entity ID
Returns:
List of document dicts with junction data
Example:
docs = await espocrm.get_knowledge_documents_with_junction('69b1b03582bb6e2da')
for doc in docs:
print(f"{doc['documentName']}: {doc['syncstatus']}")
"""
# JunctionData uses API Gateway URL, not direct EspoCRM
# Use gateway URL from env or construct from ESPOCRM_API_BASE_URL
gateway_url = os.getenv('ESPOCRM_GATEWAY_URL', 'https://api.bitbylaw.com/vmh/crm')
url = f"{gateway_url}/JunctionData/CAIKnowledge/{knowledge_id}/dokumentes"
self._log(f"GET {url}")
try:
session = await self._get_session()
timeout = aiohttp.ClientTimeout(total=self.api_timeout_seconds)
async with session.get(url, headers=self._get_headers(), timeout=timeout) as response:
self._log(f"Response status: {response.status}")
if response.status == 404:
# Knowledge base not found or no documents linked
return []
if response.status >= 400:
error_text = await response.text()
raise EspoCRMAPIError(f"JunctionData GET failed: {response.status} - {error_text}")
result = await response.json()
documents = result.get('list', [])
self._log(f"✅ Loaded {len(documents)} document(s) with junction data")
return documents
except asyncio.TimeoutError:
raise EspoCRMTimeoutError(f"Timeout getting junction data for knowledge {knowledge_id}")
except aiohttp.ClientError as e:
raise EspoCRMAPIError(f"Network error getting junction data: {e}")
async def update_knowledge_document_junction(
self,
knowledge_id: str,
document_id: str,
fields: Dict[str, Any],
update_last_sync: bool = True
) -> Dict[str, Any]:
"""
Update junction columns for a specific document link.
Uses custom EspoCRM endpoint:
PUT /JunctionData/CAIKnowledge/{knowledge_id}/dokumentes/{document_id}
Args:
knowledge_id: CAIKnowledge entity ID
document_id: CDokumente entity ID
fields: Junction fields to update (aiDocumentId, syncstatus, etc.)
update_last_sync: Whether to update lastSync timestamp (default: True)
Returns:
Updated junction data
Example:
await espocrm.update_knowledge_document_junction(
'69b1b03582bb6e2da',
'69a68b556a39771bf',
{
'aiDocumentId': 'xai-file-abc123',
'syncstatus': 'synced'
},
update_last_sync=True
)
"""
# JunctionData uses API Gateway URL, not direct EspoCRM
gateway_url = os.getenv('ESPOCRM_GATEWAY_URL', 'https://api.bitbylaw.com/vmh/crm')
url = f"{gateway_url}/JunctionData/CAIKnowledge/{knowledge_id}/dokumentes/{document_id}"
payload = {**fields}
if update_last_sync:
payload['updateLastSync'] = True
self._log(f"PUT {url}")
self._log(f" Payload: {payload}")
try:
session = await self._get_session()
timeout = aiohttp.ClientTimeout(total=self.api_timeout_seconds)
async with session.put(url, headers=self._get_headers(), json=payload, timeout=timeout) as response:
self._log(f"Response status: {response.status}")
if response.status >= 400:
error_text = await response.text()
raise EspoCRMAPIError(f"JunctionData PUT failed: {response.status} - {error_text}")
result = await response.json()
self._log(f"✅ Junction updated: junctionId={result.get('junctionId')}")
return result
except asyncio.TimeoutError:
raise EspoCRMTimeoutError(f"Timeout updating junction data")
except aiohttp.ClientError as e:
raise EspoCRMAPIError(f"Network error updating junction data: {e}")

View File

@@ -8,7 +8,15 @@ from typing import Dict, Any, Optional, List
from datetime import datetime from datetime import datetime
import logging import logging
logger = logging.getLogger(__name__) from services.models import (
AdvowareBeteiligteCreate,
AdvowareBeteiligteUpdate,
EspoCRMBeteiligteCreate,
validate_beteiligte_advoware,
validate_beteiligte_espocrm
)
from services.exceptions import ValidationError
from services.config import FEATURE_FLAGS
class BeteiligteMapper: class BeteiligteMapper:
@@ -27,6 +35,9 @@ class BeteiligteMapper:
Returns: Returns:
Dict mit Stammdaten für Advoware API (POST/PUT /api/v1/advonet/Beteiligte) Dict mit Stammdaten für Advoware API (POST/PUT /api/v1/advonet/Beteiligte)
Raises:
ValidationError: Bei Validierungsfehlern (wenn strict_validation aktiviert)
""" """
logger.debug(f"Mapping EspoCRM → Advoware STAMMDATEN: {espo_entity.get('id')}") logger.debug(f"Mapping EspoCRM → Advoware STAMMDATEN: {espo_entity.get('id')}")
@@ -78,6 +89,14 @@ class BeteiligteMapper:
logger.debug(f"Mapped to Advoware STAMMDATEN: name={advo_data.get('name')}, vorname={advo_data.get('vorname')}, rechtsform={advo_data.get('rechtsform')}") logger.debug(f"Mapped to Advoware STAMMDATEN: name={advo_data.get('name')}, vorname={advo_data.get('vorname')}, rechtsform={advo_data.get('rechtsform')}")
# Optional: Validiere mit Pydantic wenn aktiviert
if FEATURE_FLAGS.strict_validation:
try:
validate_beteiligte_advoware(advo_data)
except ValidationError as e:
logger.warning(f"Validation warning: {e}")
# Continue anyway - validation ist optional
return advo_data return advo_data
@staticmethod @staticmethod

217
services/exceptions.py Normal file
View File

@@ -0,0 +1,217 @@
"""
Custom Exception Classes für BitByLaw Integration
Hierarchie:
- IntegrationError (Base)
- APIError
- AdvowareAPIError
- AdvowareAuthError
- AdvowareTimeoutError
- EspoCRMAPIError
- EspoCRMAuthError
- EspoCRMTimeoutError
- SyncError
- LockAcquisitionError
- ValidationError
- ConflictError
- RetryableError
- NonRetryableError
"""
from typing import Optional, Dict, Any
class IntegrationError(Exception):
"""Base exception for all integration errors"""
def __init__(self, message: str, details: Optional[Dict[str, Any]] = None):
super().__init__(message)
self.message = message
self.details = details or {}
# ========== API Errors ==========
class APIError(IntegrationError):
"""Base class for all API-related errors"""
def __init__(
self,
message: str,
status_code: Optional[int] = None,
response_body: Optional[str] = None,
details: Optional[Dict[str, Any]] = None
):
super().__init__(message, details)
self.status_code = status_code
self.response_body = response_body
class AdvowareAPIError(APIError):
"""Advoware API error"""
pass
class AdvowareAuthError(AdvowareAPIError):
"""Advoware authentication error"""
pass
class AdvowareTimeoutError(AdvowareAPIError):
"""Advoware API timeout"""
pass
class EspoCRMAPIError(APIError):
"""EspoCRM API error"""
pass
class EspoCRMAuthError(EspoCRMAPIError):
"""EspoCRM authentication error"""
pass
class EspoCRMTimeoutError(EspoCRMAPIError):
"""EspoCRM API timeout"""
pass
# ========== Sync Errors ==========
class SyncError(IntegrationError):
"""Base class for synchronization errors"""
pass
class LockAcquisitionError(SyncError):
"""Failed to acquire distributed lock"""
def __init__(self, entity_id: str, lock_key: str, message: Optional[str] = None):
super().__init__(
message or f"Could not acquire lock for entity {entity_id}",
details={"entity_id": entity_id, "lock_key": lock_key}
)
self.entity_id = entity_id
self.lock_key = lock_key
class ValidationError(SyncError):
"""Data validation error"""
def __init__(self, message: str, field: Optional[str] = None, value: Any = None):
super().__init__(
message,
details={"field": field, "value": value}
)
self.field = field
self.value = value
class ConflictError(SyncError):
"""Data conflict during synchronization"""
def __init__(
self,
message: str,
entity_id: str,
source_system: Optional[str] = None,
target_system: Optional[str] = None
):
super().__init__(
message,
details={
"entity_id": entity_id,
"source_system": source_system,
"target_system": target_system
}
)
self.entity_id = entity_id
# ========== Retry Classification ==========
class RetryableError(IntegrationError):
"""Error that should trigger retry logic"""
def __init__(
self,
message: str,
retry_after_seconds: Optional[int] = None,
details: Optional[Dict[str, Any]] = None
):
super().__init__(message, details)
self.retry_after_seconds = retry_after_seconds
class NonRetryableError(IntegrationError):
"""Error that should NOT trigger retry (e.g., validation errors)"""
pass
# ========== Redis Errors ==========
class RedisError(IntegrationError):
"""Redis connection or operation error"""
def __init__(self, message: str, operation: Optional[str] = None):
super().__init__(message, details={"operation": operation})
self.operation = operation
class RedisConnectionError(RedisError):
"""Redis connection failed"""
pass
# ========== Helper Functions ==========
def is_retryable(error: Exception) -> bool:
"""
Determine if an error should trigger retry logic.
Args:
error: Exception to check
Returns:
True if error is retryable
"""
if isinstance(error, NonRetryableError):
return False
if isinstance(error, RetryableError):
return True
if isinstance(error, (AdvowareTimeoutError, EspoCRMTimeoutError)):
return True
if isinstance(error, ValidationError):
return False
# Default: assume retryable for API errors
if isinstance(error, APIError):
return True
return False
def get_retry_delay(error: Exception, attempt: int) -> int:
"""
Calculate retry delay based on error type and attempt number.
Args:
error: The error that occurred
attempt: Current retry attempt (0-indexed)
Returns:
Delay in seconds
"""
if isinstance(error, RetryableError) and error.retry_after_seconds:
return error.retry_after_seconds
# Exponential backoff: [1, 5, 15, 60, 240] minutes
backoff_minutes = [1, 5, 15, 60, 240]
if attempt < len(backoff_minutes):
return backoff_minutes[attempt] * 60
return backoff_minutes[-1] * 60

View File

@@ -24,8 +24,6 @@ from services.kommunikation_mapper import (
from services.advoware_service import AdvowareService from services.advoware_service import AdvowareService
from services.espocrm import EspoCRMAPI from services.espocrm import EspoCRMAPI
logger = logging.getLogger(__name__)
class KommunikationSyncManager: class KommunikationSyncManager:
"""Manager für Kommunikation-Synchronisation""" """Manager für Kommunikation-Synchronisation"""

View File

@@ -0,0 +1,218 @@
"""LangChain xAI Integration Service
Service für LangChain ChatXAI Integration mit File Search Binding.
Analog zu xai_service.py für xAI Files API.
"""
import os
from typing import Dict, List, Any, Optional, AsyncIterator
from services.logging_utils import get_service_logger
class LangChainXAIService:
"""
Wrapper für LangChain ChatXAI mit Motia-Integration.
Benötigte Umgebungsvariablen:
- XAI_API_KEY: API Key für xAI (für ChatXAI model)
Usage:
service = LangChainXAIService(ctx)
model = service.get_chat_model(model="grok-4-1-fast-reasoning")
model_with_tools = service.bind_file_search(model, collection_id)
result = await service.invoke_chat(model_with_tools, messages)
"""
def __init__(self, ctx=None):
"""
Initialize LangChain xAI Service.
Args:
ctx: Optional Motia context for logging
Raises:
ValueError: If XAI_API_KEY not configured
"""
self.api_key = os.getenv('XAI_API_KEY', '')
self.ctx = ctx
self.logger = get_service_logger('langchain_xai', ctx)
if not self.api_key:
raise ValueError("XAI_API_KEY not configured in environment")
def _log(self, msg: str, level: str = 'info') -> None:
"""Delegate logging to service logger"""
log_func = getattr(self.logger, level, self.logger.info)
log_func(msg)
def get_chat_model(
self,
model: str = "grok-4-1-fast-reasoning",
temperature: float = 0.7,
max_tokens: Optional[int] = None
):
"""
Initialisiert ChatXAI Model.
Args:
model: Model name (default: grok-4-1-fast-reasoning)
temperature: Sampling temperature 0.0-1.0
max_tokens: Optional max tokens for response
Returns:
ChatXAI model instance
Raises:
ImportError: If langchain_xai not installed
"""
try:
from langchain_xai import ChatXAI
except ImportError:
raise ImportError(
"langchain_xai not installed. "
"Run: pip install langchain-xai>=0.2.0"
)
self._log(f"🤖 Initializing ChatXAI: model={model}, temp={temperature}")
kwargs = {
"model": model,
"api_key": self.api_key,
"temperature": temperature
}
if max_tokens:
kwargs["max_tokens"] = max_tokens
return ChatXAI(**kwargs)
def bind_tools(
self,
model,
collection_id: Optional[str] = None,
enable_web_search: bool = False,
web_search_config: Optional[Dict[str, Any]] = None,
max_num_results: int = 10
):
"""
Bindet xAI Tools (file_search und/oder web_search) an Model.
Args:
model: ChatXAI model instance
collection_id: Optional xAI Collection ID für file_search
enable_web_search: Enable web search tool (default: False)
web_search_config: Optional web search configuration:
{
'allowed_domains': ['example.com'], # Max 5 domains
'excluded_domains': ['spam.com'], # Max 5 domains
'enable_image_understanding': True
}
max_num_results: Max results from file search (default: 10)
Returns:
Model with requested tools bound (file_search and/or web_search)
"""
tools = []
# Add file_search tool if collection_id provided
if collection_id:
self._log(f"🔍 Binding file_search: collection={collection_id}")
tools.append({
"type": "file_search",
"vector_store_ids": [collection_id],
"max_num_results": max_num_results
})
# Add web_search tool if enabled
if enable_web_search:
self._log("🌐 Binding web_search")
web_search_tool = {"type": "web_search"}
# Add optional web search filters
if web_search_config:
if 'allowed_domains' in web_search_config:
domains = web_search_config['allowed_domains'][:5] # Max 5
web_search_tool['filters'] = {'allowed_domains': domains}
self._log(f" Allowed domains: {domains}")
elif 'excluded_domains' in web_search_config:
domains = web_search_config['excluded_domains'][:5] # Max 5
web_search_tool['filters'] = {'excluded_domains': domains}
self._log(f" Excluded domains: {domains}")
if web_search_config.get('enable_image_understanding'):
web_search_tool['enable_image_understanding'] = True
self._log(" Image understanding: enabled")
tools.append(web_search_tool)
if not tools:
self._log("⚠️ No tools to bind (no collection_id and web_search disabled)", level='warn')
return model
self._log(f"🔧 Binding {len(tools)} tool(s) to model")
return model.bind_tools(tools)
def bind_file_search(
self,
model,
collection_id: str,
max_num_results: int = 10
):
"""
Legacy method: Bindet nur file_search Tool an Model.
Use bind_tools() for more flexibility.
"""
return self.bind_tools(
model=model,
collection_id=collection_id,
max_num_results=max_num_results
)
async def invoke_chat(
self,
model,
messages: List[Dict[str, Any]]
) -> Any:
"""
Non-streaming Chat Completion.
Args:
model: ChatXAI model (with or without tools)
messages: List of message dicts [{"role": "user", "content": "..."}]
Returns:
LangChain AIMessage with response
Raises:
Exception: If API call fails
"""
self._log(f"💬 Invoking chat: {len(messages)} messages", level='debug')
result = await model.ainvoke(messages)
self._log(f"✅ Response received: {len(result.content)} chars", level='debug')
return result
async def astream_chat(
self,
model,
messages: List[Dict[str, Any]]
) -> AsyncIterator:
"""
Streaming Chat Completion.
Args:
model: ChatXAI model (with or without tools)
messages: List of message dicts
Yields:
Chunks from streaming response
Example:
async for chunk in service.astream_chat(model, messages):
delta = chunk.content if hasattr(chunk, "content") else ""
# Process delta...
"""
self._log(f"💬 Streaming chat: {len(messages)} messages", level='debug')
async for chunk in model.astream(messages):
yield chunk

416
services/logging_utils.py Normal file
View File

@@ -0,0 +1,416 @@
"""
Konsistenter Logging Wrapper für BitByLaw Integration
Vereinheitlicht Logging über:
- Standard Python Logger
- Motia FlowContext Logger
- Structured Logging
Usage Guidelines:
=================
FOR SERVICES: Use get_service_logger('service_name', context)
-----------------------------------------------------------------
Example:
from services.logging_utils import get_service_logger
class XAIService:
def __init__(self, ctx=None):
self.logger = get_service_logger('xai', ctx)
def upload(self):
self.logger.info("Uploading file...")
FOR STEPS: Use ctx.logger directly (preferred)
-----------------------------------------------------------------
Steps already have ctx.logger available - use it directly:
async def handler(event_data, ctx: FlowContext):
ctx.logger.info("Processing event")
Alternative: Use get_step_logger() for additional loggers:
step_logger = get_step_logger('beteiligte_sync', ctx)
FOR SYNC UTILS: Inherit from BaseSyncUtils (provides self.logger)
-----------------------------------------------------------------
from services.sync_utils_base import BaseSyncUtils
class MySync(BaseSyncUtils):
def __init__(self, espocrm, redis, context):
super().__init__(espocrm, redis, context)
# self.logger is now available
def sync(self):
self._log("Syncing...", level='info')
FOR STANDALONE UTILITIES: Use get_logger()
-----------------------------------------------------------------
from services.logging_utils import get_logger
logger = get_logger('my_module', context)
logger.info("Processing...")
CONSISTENCY RULES:
==================
✅ Services: get_service_logger('service_name', ctx)
✅ Steps: ctx.logger (direct) or get_step_logger('step_name', ctx)
✅ Sync Utils: Inherit from BaseSyncUtils → use self._log() or self.logger
✅ Standalone: get_logger('module_name', ctx)
❌ DO NOT: Use module-level logging.getLogger(__name__)
❌ DO NOT: Mix get_logger() and get_service_logger() in same module
"""
import logging
import time
from typing import Optional, Any, Dict
from contextlib import contextmanager
from datetime import datetime
class IntegrationLogger:
"""
Unified Logger mit Support für:
- Motia FlowContext
- Standard Python Logging
- Structured Logging
- Performance Tracking
"""
def __init__(
self,
name: str,
context: Optional[Any] = None,
extra_fields: Optional[Dict[str, Any]] = None
):
"""
Initialize logger.
Args:
name: Logger name (z.B. 'advoware.api')
context: Optional Motia FlowContext
extra_fields: Optional extra fields für structured logging
"""
self.name = name
self.context = context
self.extra_fields = extra_fields or {}
self._standard_logger = logging.getLogger(name)
def _format_message(self, message: str, **kwargs) -> str:
"""
Formatiert Log-Message mit optionalen Feldern.
Args:
message: Base message
**kwargs: Extra fields
Returns:
Formatted message
"""
if not kwargs and not self.extra_fields:
return message
# Merge extra fields
fields = {**self.extra_fields, **kwargs}
if fields:
field_str = " | ".join(f"{k}={v}" for k, v in fields.items())
return f"{message} | {field_str}"
return message
def _log(
self,
level: str,
message: str,
exc_info: bool = False,
**kwargs
) -> None:
"""
Internal logging method.
Args:
level: Log level (debug, info, warning, error, critical)
message: Log message
exc_info: Include exception info
**kwargs: Extra fields for structured logging
"""
formatted_msg = self._format_message(message, **kwargs)
# Log to FlowContext if available
if self.context and hasattr(self.context, 'logger'):
try:
log_func = getattr(self.context.logger, level, self.context.logger.info)
log_func(formatted_msg)
except Exception:
# Fallback to standard logger
pass
# Always log to standard Python logger
log_func = getattr(self._standard_logger, level, self._standard_logger.info)
log_func(formatted_msg, exc_info=exc_info)
def debug(self, message: str, **kwargs) -> None:
"""Log debug message"""
self._log('debug', message, **kwargs)
def info(self, message: str, **kwargs) -> None:
"""Log info message"""
self._log('info', message, **kwargs)
def warning(self, message: str, **kwargs) -> None:
"""Log warning message"""
self._log('warning', message, **kwargs)
def warn(self, message: str, **kwargs) -> None:
"""Alias for warning"""
self.warning(message, **kwargs)
def error(self, message: str, exc_info: bool = True, **kwargs) -> None:
"""Log error message (with exception info by default)"""
self._log('error', message, exc_info=exc_info, **kwargs)
def critical(self, message: str, exc_info: bool = True, **kwargs) -> None:
"""Log critical message"""
self._log('critical', message, exc_info=exc_info, **kwargs)
def exception(self, message: str, **kwargs) -> None:
"""Log exception with traceback"""
self._log('error', message, exc_info=True, **kwargs)
@contextmanager
def operation(self, operation_name: str, **context_fields):
"""
Context manager für Operations mit automatischem Timing.
Args:
operation_name: Name der Operation
**context_fields: Context fields für logging
Example:
with logger.operation('sync_beteiligte', entity_id='123'):
# Do sync
pass
"""
start_time = time.time()
self.info(f"▶️ Starting: {operation_name}", **context_fields)
try:
yield
duration_ms = int((time.time() - start_time) * 1000)
self.info(
f"✅ Completed: {operation_name}",
duration_ms=duration_ms,
**context_fields
)
except Exception as e:
duration_ms = int((time.time() - start_time) * 1000)
self.error(
f"❌ Failed: {operation_name} - {str(e)}",
duration_ms=duration_ms,
error_type=type(e).__name__,
**context_fields
)
raise
@contextmanager
def api_call(self, endpoint: str, method: str = 'GET', **context_fields):
"""
Context manager speziell für API-Calls.
Args:
endpoint: API endpoint
method: HTTP method
**context_fields: Extra context
Example:
with logger.api_call('/api/v1/Beteiligte', method='POST'):
result = await api.post(...)
"""
start_time = time.time()
self.debug(f"API Call: {method} {endpoint}", **context_fields)
try:
yield
duration_ms = int((time.time() - start_time) * 1000)
self.debug(
f"API Success: {method} {endpoint}",
duration_ms=duration_ms,
**context_fields
)
except Exception as e:
duration_ms = int((time.time() - start_time) * 1000)
self.error(
f"API Error: {method} {endpoint} - {str(e)}",
duration_ms=duration_ms,
error_type=type(e).__name__,
**context_fields
)
raise
def with_context(self, **extra_fields) -> 'IntegrationLogger':
"""
Erstellt neuen Logger mit zusätzlichen Context-Feldern.
Args:
**extra_fields: Additional context fields
Returns:
New logger instance with merged context
"""
merged_fields = {**self.extra_fields, **extra_fields}
return IntegrationLogger(
name=self.name,
context=self.context,
extra_fields=merged_fields
)
# ========== Factory Functions ==========
def get_logger(
name: str,
context: Optional[Any] = None,
**extra_fields
) -> IntegrationLogger:
"""
Factory function für Logger.
Args:
name: Logger name
context: Optional Motia FlowContext
**extra_fields: Extra context fields
Returns:
Configured logger
Example:
logger = get_logger('advoware.sync', context=ctx, entity_id='123')
logger.info("Starting sync")
"""
return IntegrationLogger(name, context, extra_fields)
def get_service_logger(
service_name: str,
context: Optional[Any] = None
) -> IntegrationLogger:
"""
Factory für Service-Logger.
Args:
service_name: Service name (z.B. 'advoware', 'espocrm')
context: Optional FlowContext
Returns:
Service logger
"""
return IntegrationLogger(f"services.{service_name}", context)
def get_step_logger(
step_name: str,
context: Optional[Any] = None
) -> IntegrationLogger:
"""
Factory für Step-Logger.
Args:
step_name: Step name
context: FlowContext (required for steps)
Returns:
Step logger
"""
return IntegrationLogger(f"steps.{step_name}", context)
# ========== Decorator for Logging ==========
def log_operation(operation_name: str):
"""
Decorator für automatisches Operation-Logging.
Args:
operation_name: Name der Operation
Example:
@log_operation('sync_beteiligte')
async def sync_entity(entity_id: str):
...
"""
def decorator(func):
async def async_wrapper(*args, **kwargs):
# Try to find context in args
context = None
for arg in args:
if hasattr(arg, 'logger'):
context = arg
break
logger = get_logger(func.__module__, context)
with logger.operation(operation_name):
return await func(*args, **kwargs)
def sync_wrapper(*args, **kwargs):
context = None
for arg in args:
if hasattr(arg, 'logger'):
context = arg
break
logger = get_logger(func.__module__, context)
with logger.operation(operation_name):
return func(*args, **kwargs)
# Return appropriate wrapper
import asyncio
if asyncio.iscoroutinefunction(func):
return async_wrapper
else:
return sync_wrapper
return decorator
# ========== Performance Tracking ==========
class PerformanceTracker:
"""Track performance metrics for operations"""
def __init__(self, logger: IntegrationLogger):
self.logger = logger
self.metrics: Dict[str, list] = {}
def record(self, operation: str, duration_ms: int) -> None:
"""Record operation duration"""
if operation not in self.metrics:
self.metrics[operation] = []
self.metrics[operation].append(duration_ms)
def get_stats(self, operation: str) -> Dict[str, float]:
"""Get statistics for operation"""
if operation not in self.metrics:
return {}
durations = self.metrics[operation]
return {
'count': len(durations),
'avg_ms': sum(durations) / len(durations),
'min_ms': min(durations),
'max_ms': max(durations),
'total_ms': sum(durations)
}
def log_summary(self) -> None:
"""Log summary of all operations"""
self.logger.info("=== Performance Summary ===")
for operation, durations in self.metrics.items():
stats = self.get_stats(operation)
self.logger.info(
f"{operation}: {stats['count']} calls, "
f"avg {stats['avg_ms']:.1f}ms, "
f"min {stats['min_ms']:.1f}ms, "
f"max {stats['max_ms']:.1f}ms"
)

315
services/models.py Normal file
View File

@@ -0,0 +1,315 @@
"""
Pydantic Models für Datenvalidierung
Definiert strenge Schemas für:
- Advoware Entities
- EspoCRM Entities
- Sync Operations
"""
from pydantic import BaseModel, Field, field_validator, ConfigDict
from typing import Optional, Literal
from datetime import date, datetime
from enum import Enum
# ========== Enums ==========
class Rechtsform(str, Enum):
"""Legal forms for Beteiligte"""
NATUERLICHE_PERSON = ""
GMBH = "GmbH"
AG = "AG"
GMBH_CO_KG = "GmbH & Co. KG"
KG = "KG"
OHG = "OHG"
EV = "e.V."
EINZELUNTERNEHMEN = "Einzelunternehmen"
FREIBERUFLER = "Freiberufler"
class SyncStatus(str, Enum):
"""Sync status for EspoCRM entities (Beteiligte)"""
PENDING_SYNC = "pending_sync"
SYNCING = "syncing"
CLEAN = "clean"
FAILED = "failed"
CONFLICT = "conflict"
PERMANENTLY_FAILED = "permanently_failed"
class FileStatus(str, Enum):
"""Valid values for CDokumente.fileStatus field"""
NEW = "new"
CHANGED = "changed"
SYNCED = "synced"
def __str__(self) -> str:
return self.value
class XAISyncStatus(str, Enum):
"""Valid values for CDokumente.xaiSyncStatus field"""
NO_SYNC = "no_sync" # Entity has no xAI collections
PENDING_SYNC = "pending_sync" # Sync in progress (locked)
CLEAN = "clean" # Synced successfully
UNCLEAN = "unclean" # Needs re-sync (file changed)
FAILED = "failed" # Sync failed (see xaiSyncError)
def __str__(self) -> str:
return self.value
class SalutationType(str, Enum):
"""Salutation types"""
HERR = "Herr"
FRAU = "Frau"
DIVERS = "Divers"
FIRMA = ""
class AIKnowledgeActivationStatus(str, Enum):
"""Activation status for CAIKnowledge collections"""
NEW = "new" # Collection noch nicht in XAI erstellt
ACTIVE = "active" # Collection aktiv, Sync läuft
PAUSED = "paused" # Collection existiert, aber kein Sync
DEACTIVATED = "deactivated" # Collection aus XAI gelöscht
def __str__(self) -> str:
return self.value
class AIKnowledgeSyncStatus(str, Enum):
"""Sync status for CAIKnowledge"""
UNCLEAN = "unclean" # Änderungen pending
PENDING_SYNC = "pending_sync" # Sync läuft (locked)
SYNCED = "synced" # Alles synced
FAILED = "failed" # Sync fehlgeschlagen
def __str__(self) -> str:
return self.value
class JunctionSyncStatus(str, Enum):
"""Sync status for junction tables (CAIKnowledgeCDokumente)"""
NEW = "new"
UNCLEAN = "unclean"
SYNCED = "synced"
FAILED = "failed"
UNSUPPORTED = "unsupported"
def __str__(self) -> str:
return self.value
# ========== Advoware Models ==========
class AdvowareBeteiligteBase(BaseModel):
"""Base Model für Advoware Beteiligte (POST/PUT)"""
model_config = ConfigDict(str_strip_whitespace=True, validate_assignment=True)
name: str = Field(..., min_length=1, max_length=200)
vorname: Optional[str] = Field(None, max_length=100)
rechtsform: str = Field(default="")
anrede: Optional[str] = Field(None, max_length=50)
titel: Optional[str] = Field(None, max_length=50)
bAnrede: Optional[str] = Field(None, max_length=200, description="Briefanrede")
zusatz: Optional[str] = Field(None, max_length=200)
geburtsdatum: Optional[date] = None
@field_validator('name')
@classmethod
def validate_name(cls, v: str) -> str:
if not v or not v.strip():
raise ValueError('Name darf nicht leer sein')
return v.strip()
@field_validator('geburtsdatum')
@classmethod
def validate_birthdate(cls, v: Optional[date]) -> Optional[date]:
if v and v > date.today():
raise ValueError('Geburtsdatum kann nicht in der Zukunft liegen')
if v and v.year < 1900:
raise ValueError('Geburtsdatum vor 1900 nicht erlaubt')
return v
class AdvowareBeteiligteRead(AdvowareBeteiligteBase):
"""Advoware Beteiligte Response (GET)"""
betNr: int = Field(..., ge=1)
rowId: str = Field(..., description="Change detection ID")
# Optional fields die Advoware zurückgibt
strasse: Optional[str] = None
plz: Optional[str] = None
ort: Optional[str] = None
land: Optional[str] = None
class AdvowareBeteiligteCreate(AdvowareBeteiligteBase):
"""Advoware Beteiligte für POST"""
pass
class AdvowareBeteiligteUpdate(AdvowareBeteiligteBase):
"""Advoware Beteiligte für PUT"""
pass
# ========== EspoCRM Models ==========
class EspoCRMBeteiligteBase(BaseModel):
"""Base Model für EspoCRM CBeteiligte"""
model_config = ConfigDict(str_strip_whitespace=True, validate_assignment=True)
name: str = Field(..., min_length=1, max_length=255)
firstName: Optional[str] = Field(None, max_length=100)
lastName: Optional[str] = Field(None, max_length=100)
firmenname: Optional[str] = Field(None, max_length=255)
rechtsform: str = Field(default="")
salutationName: Optional[str] = None
titel: Optional[str] = Field(None, max_length=100)
briefAnrede: Optional[str] = Field(None, max_length=255)
zusatz: Optional[str] = Field(None, max_length=255)
dateOfBirth: Optional[date] = None
@field_validator('name')
@classmethod
def validate_name(cls, v: str) -> str:
if not v or not v.strip():
raise ValueError('Name darf nicht leer sein')
return v.strip()
@field_validator('dateOfBirth')
@classmethod
def validate_birthdate(cls, v: Optional[date]) -> Optional[date]:
if v and v > date.today():
raise ValueError('Geburtsdatum kann nicht in der Zukunft liegen')
if v and v.year < 1900:
raise ValueError('Geburtsdatum vor 1900 nicht erlaubt')
return v
@field_validator('firstName', 'lastName')
@classmethod
def validate_person_fields(cls, v: Optional[str]) -> Optional[str]:
"""Validiere dass Person-Felder nur bei natürlichen Personen gesetzt sind"""
if v:
return v.strip()
return None
class EspoCRMBeteiligteRead(EspoCRMBeteiligteBase):
"""EspoCRM CBeteiligte Response (GET)"""
id: str = Field(..., min_length=1)
betnr: Optional[int] = Field(None, ge=1)
advowareRowId: Optional[str] = None
syncStatus: SyncStatus = Field(default=SyncStatus.PENDING_SYNC)
syncRetryCount: int = Field(default=0, ge=0, le=10)
syncErrorMessage: Optional[str] = None
advowareLastSync: Optional[datetime] = None
syncNextRetry: Optional[datetime] = None
syncAutoResetAt: Optional[datetime] = None
class EspoCRMBeteiligteCreate(EspoCRMBeteiligteBase):
"""EspoCRM CBeteiligte für POST"""
syncStatus: SyncStatus = Field(default=SyncStatus.PENDING_SYNC)
class EspoCRMBeteiligteUpdate(BaseModel):
"""EspoCRM CBeteiligte für PUT (alle Felder optional)"""
model_config = ConfigDict(str_strip_whitespace=True, validate_assignment=True)
name: Optional[str] = Field(None, min_length=1, max_length=255)
firstName: Optional[str] = Field(None, max_length=100)
lastName: Optional[str] = Field(None, max_length=100)
firmenname: Optional[str] = Field(None, max_length=255)
rechtsform: Optional[str] = None
salutationName: Optional[str] = None
titel: Optional[str] = Field(None, max_length=100)
briefAnrede: Optional[str] = Field(None, max_length=255)
zusatz: Optional[str] = Field(None, max_length=255)
dateOfBirth: Optional[date] = None
betnr: Optional[int] = Field(None, ge=1)
advowareRowId: Optional[str] = None
syncStatus: Optional[SyncStatus] = None
syncRetryCount: Optional[int] = Field(None, ge=0, le=10)
syncErrorMessage: Optional[str] = Field(None, max_length=2000)
advowareLastSync: Optional[datetime] = None
syncNextRetry: Optional[datetime] = None
def model_dump_clean(self) -> dict:
"""Gibt nur nicht-None Werte zurück (für PATCH-ähnliches Update)"""
return {k: v for k, v in self.model_dump().items() if v is not None}
# ========== Sync Operation Models ==========
class SyncOperation(BaseModel):
"""Model für Sync-Operation Tracking"""
entity_id: str
action: Literal["create", "update", "delete", "sync_check"]
source: Literal["webhook", "cron", "api", "manual"]
timestamp: datetime = Field(default_factory=datetime.utcnow)
entity_type: str = "CBeteiligte"
class SyncResult(BaseModel):
"""Result einer Sync-Operation"""
success: bool
entity_id: str
action: str
message: Optional[str] = None
error: Optional[str] = None
details: Optional[dict] = None
duration_ms: Optional[int] = None
# ========== Validation Helpers ==========
def validate_beteiligte_advoware(data: dict) -> AdvowareBeteiligteCreate:
"""
Validiert Advoware Beteiligte Daten.
Args:
data: Dict mit Advoware Daten
Returns:
Validiertes Model
Raises:
ValidationError: Bei Validierungsfehlern
"""
try:
return AdvowareBeteiligteCreate.model_validate(data)
except Exception as e:
from services.exceptions import ValidationError
raise ValidationError(f"Invalid Advoware data: {e}")
def validate_beteiligte_espocrm(data: dict) -> EspoCRMBeteiligteCreate:
"""
Validiert EspoCRM Beteiligte Daten.
Args:
data: Dict mit EspoCRM Daten
Returns:
Validiertes Model
Raises:
ValidationError: Bei Validierungsfehlern
"""
try:
return EspoCRMBeteiligteCreate.model_validate(data)
except Exception as e:
from services.exceptions import ValidationError
raise ValidationError(f"Invalid EspoCRM data: {e}")

202
services/redis_client.py Normal file
View File

@@ -0,0 +1,202 @@
"""
Redis Client Factory
Centralized Redis client management with:
- Singleton pattern
- Connection pooling
- Automatic reconnection
- Health checks
"""
import redis
import os
from typing import Optional
from services.exceptions import RedisConnectionError
from services.logging_utils import get_service_logger
class RedisClientFactory:
"""
Singleton factory for Redis clients.
Benefits:
- Centralized configuration
- Connection pooling
- Lazy initialization
- Better error handling
"""
_instance: Optional[redis.Redis] = None
_connection_pool: Optional[redis.ConnectionPool] = None
_logger = None
@classmethod
def _get_logger(cls):
"""Get logger instance (lazy initialization)"""
if cls._logger is None:
cls._logger = get_service_logger('redis_factory', None)
return cls._logger
@classmethod
def get_client(cls, strict: bool = False) -> Optional[redis.Redis]:
"""
Return Redis client (creates if needed).
Args:
strict: If True, raises exception on connection failures.
If False, returns None (for optional Redis usage).
Returns:
Redis client or None (if strict=False and connection fails)
Raises:
RedisConnectionError: If strict=True and connection fails
"""
logger = cls._get_logger()
if cls._instance is None:
try:
cls._instance = cls._create_client()
logger.info("Redis client created successfully")
except Exception as e:
logger.error(f"Failed to create Redis client: {e}")
if strict:
raise RedisConnectionError(
f"Could not connect to Redis: {e}",
operation="get_client"
)
logger.warning("Redis unavailable - continuing without caching")
return None
return cls._instance
@classmethod
def _create_client(cls) -> redis.Redis:
"""
Create new Redis client with connection pool.
Returns:
Configured Redis client
Raises:
redis.ConnectionError: On connection problems
"""
logger = cls._get_logger()
# Load configuration from environment
redis_host = os.getenv('REDIS_HOST', 'localhost')
redis_port = int(os.getenv('REDIS_PORT', '6379'))
redis_db = int(os.getenv('REDIS_DB_ADVOWARE_CACHE', '1'))
redis_timeout = int(os.getenv('REDIS_TIMEOUT_SECONDS', '5'))
redis_max_connections = int(os.getenv('REDIS_MAX_CONNECTIONS', '50'))
logger.info(
f"Creating Redis client: {redis_host}:{redis_port} "
f"(db={redis_db}, timeout={redis_timeout}s)"
)
# Create connection pool
if cls._connection_pool is None:
cls._connection_pool = redis.ConnectionPool(
host=redis_host,
port=redis_port,
db=redis_db,
socket_timeout=redis_timeout,
socket_connect_timeout=redis_timeout,
max_connections=redis_max_connections,
decode_responses=True # Auto-decode bytes to strings
)
# Create client from pool
client = redis.Redis(connection_pool=cls._connection_pool)
# Verify connection
client.ping()
return client
@classmethod
def reset(cls) -> None:
"""
Reset factory state (mainly for tests).
Closes existing connections and resets singleton.
"""
logger = cls._get_logger()
if cls._instance:
try:
cls._instance.close()
except Exception as e:
logger.warning(f"Error closing Redis client: {e}")
if cls._connection_pool:
try:
cls._connection_pool.disconnect()
except Exception as e:
logger.warning(f"Error closing connection pool: {e}")
cls._instance = None
cls._connection_pool = None
logger.info("Redis factory reset")
@classmethod
def health_check(cls) -> bool:
"""
Check Redis connection.
Returns:
True if Redis is reachable, False otherwise
"""
logger = cls._get_logger()
try:
client = cls.get_client(strict=False)
if client is None:
return False
client.ping()
return True
except Exception as e:
logger.warning(f"Redis health check failed: {e}")
return False
@classmethod
def get_info(cls) -> Optional[dict]:
"""
Return Redis server info (for monitoring).
Returns:
Redis info dict or None on error
"""
logger = cls._get_logger()
try:
client = cls.get_client(strict=False)
if client is None:
return None
return client.info()
except Exception as e:
logger.error(f"Failed to get Redis info: {e}")
return None
# ========== Convenience Functions ==========
def get_redis_client(strict: bool = False) -> Optional[redis.Redis]:
"""
Convenience function for Redis client.
Args:
strict: If True, raises exception on error
Returns:
Redis client or None
"""
return RedisClientFactory.get_client(strict=strict)
def is_redis_available() -> bool:
"""
Check if Redis is available.
Returns:
True if Redis is reachable
"""
return RedisClientFactory.health_check()

View File

@@ -9,62 +9,43 @@ Gemeinsame Funktionalität für alle Sync-Operationen:
from typing import Dict, Any, Optional from typing import Dict, Any, Optional
from datetime import datetime from datetime import datetime
import logging
import redis
import os
import pytz import pytz
logger = logging.getLogger(__name__) from services.exceptions import RedisConnectionError, LockAcquisitionError
from services.redis_client import get_redis_client
from services.config import SYNC_CONFIG, get_lock_key
from services.logging_utils import get_service_logger
# Lock TTL in seconds (prevents deadlocks) import redis
LOCK_TTL_SECONDS = 900 # 15 minutes
class BaseSyncUtils: class BaseSyncUtils:
"""Base-Klasse mit gemeinsamer Sync-Funktionalität""" """Base-Klasse mit gemeinsamer Sync-Funktionalität"""
def __init__(self, espocrm_api, redis_client: redis.Redis = None, context=None): def __init__(self, espocrm_api, redis_client: Optional[redis.Redis] = None, context=None):
""" """
Args: Args:
espocrm_api: EspoCRM API client instance espocrm_api: EspoCRM API client instance
redis_client: Optional Redis client (wird sonst initialisiert) redis_client: Optional Redis client (wird sonst über Factory initialisiert)
context: Optional Motia FlowContext für Logging context: Optional Motia FlowContext für Logging
""" """
self.espocrm = espocrm_api self.espocrm = espocrm_api
self.context = context self.context = context
self.logger = context.logger if context else logger self.logger = get_service_logger('sync_utils', context)
self.redis = redis_client or self._init_redis()
def _init_redis(self) -> Optional[redis.Redis]: # Use provided Redis client or get from factory
"""Initialize Redis client for distributed locking""" self.redis = redis_client or get_redis_client(strict=False)
try:
redis_host = os.getenv('REDIS_HOST', 'localhost')
redis_port = int(os.getenv('REDIS_PORT', '6379'))
redis_db = int(os.getenv('REDIS_DB_ADVOWARE_CACHE', '1'))
client = redis.Redis( if not self.redis:
host=redis_host, self.logger.error(
port=redis_port, "⚠️ WARNUNG: Redis nicht verfügbar! "
db=redis_db, "Distributed Locking deaktiviert - Race Conditions möglich!"
decode_responses=True
) )
client.ping()
return client
except Exception as e:
self._log(f"Redis connection failed: {e}", level='error')
return None
def _log(self, message: str, level: str = 'info'): def _log(self, message: str, level: str = 'info') -> None:
""" """Delegate logging to the logger with optional level"""
Context-aware logging log_func = getattr(self.logger, level, self.logger.info)
log_func(message)
Falls ein FlowContext vorhanden ist, wird dessen Logger verwendet.
Sonst fallback auf Standard-Logger.
"""
if self.context and hasattr(self.context, 'logger'):
getattr(self.context.logger, level)(message)
else:
getattr(logger, level)(message)
def _get_lock_key(self, entity_id: str) -> str: def _get_lock_key(self, entity_id: str) -> str:
""" """
@@ -84,17 +65,30 @@ class BaseSyncUtils:
Returns: Returns:
True wenn Lock erfolgreich, False wenn bereits locked True wenn Lock erfolgreich, False wenn bereits locked
Raises:
LockAcquisitionError: Bei kritischen Lock-Problemen (wenn strict mode)
""" """
if not self.redis: if not self.redis:
self._log("Redis nicht verfügbar, Lock-Mechanismus deaktiviert", level='warn') self.logger.error(
return True # Fallback: Wenn kein Redis, immer lock erlauben "CRITICAL: Distributed Locking deaktiviert - Redis nicht verfügbar!"
)
# In production: Dies könnte zu Race Conditions führen!
# Für jetzt erlauben wir Fortsetzung, aber mit Warning
return True
try: try:
acquired = self.redis.set(lock_key, "locked", nx=True, ex=LOCK_TTL_SECONDS) acquired = self.redis.set(
lock_key,
"locked",
nx=True,
ex=SYNC_CONFIG.lock_ttl_seconds
)
return bool(acquired) return bool(acquired)
except Exception as e: except redis.RedisError as e:
self._log(f"Redis lock error: {e}", level='error') self.logger.error(f"Redis lock error: {e}")
return True # Bei Fehler: Lock erlauben, um Deadlocks zu vermeiden # Bei Redis-Fehler: Lock erlauben, um Deadlocks zu vermeiden
return True
def _release_redis_lock(self, lock_key: str) -> None: def _release_redis_lock(self, lock_key: str) -> None:
""" """
@@ -108,8 +102,8 @@ class BaseSyncUtils:
try: try:
self.redis.delete(lock_key) self.redis.delete(lock_key)
except Exception as e: except redis.RedisError as e:
self._log(f"Redis unlock error: {e}", level='error') self.logger.error(f"Redis unlock error: {e}")
def _get_espocrm_datetime(self, dt: Optional[datetime] = None) -> str: def _get_espocrm_datetime(self, dt: Optional[datetime] = None) -> str:
""" """

529
services/xai_service.py Normal file
View File

@@ -0,0 +1,529 @@
"""xAI Files & Collections Service"""
import os
import asyncio
import aiohttp
from typing import Optional, List, Dict, Tuple
from services.logging_utils import get_service_logger
XAI_FILES_URL = "https://api.x.ai"
XAI_MANAGEMENT_URL = "https://management-api.x.ai"
class XAIService:
"""
Client für xAI Files API und Collections Management API.
Benötigte Umgebungsvariablen:
- XAI_API_KEY regulärer API-Key für File-Uploads (api.x.ai)
- XAI_MANAGEMENT_KEY Management-API-Key für Collection-Operationen (management-api.x.ai)
"""
def __init__(self, ctx=None):
self.api_key = os.getenv('XAI_API_KEY', '')
self.management_key = os.getenv('XAI_MANAGEMENT_KEY', '')
self.ctx = ctx
self.logger = get_service_logger('xai', ctx)
self._session: Optional[aiohttp.ClientSession] = None
if not self.api_key:
raise ValueError("XAI_API_KEY not configured in environment")
if not self.management_key:
raise ValueError("XAI_MANAGEMENT_KEY not configured in environment")
def _log(self, msg: str, level: str = 'info') -> None:
"""Delegate logging to service logger"""
log_func = getattr(self.logger, level, self.logger.info)
log_func(msg)
async def _get_session(self) -> aiohttp.ClientSession:
if self._session is None or self._session.closed:
self._session = aiohttp.ClientSession(
timeout=aiohttp.ClientTimeout(total=120)
)
return self._session
async def close(self) -> None:
if self._session and not self._session.closed:
await self._session.close()
async def upload_file(
self,
file_content: bytes,
filename: str,
mime_type: str = 'application/octet-stream'
) -> str:
"""
Lädt eine Datei zur xAI Files API hoch (multipart/form-data).
POST https://api.x.ai/v1/files
Returns:
xAI file_id (str)
Raises:
RuntimeError: bei HTTP-Fehler oder fehlendem file_id in der Antwort
"""
# Normalize MIME type: xAI needs correct Content-Type for proper processing
# If generic octet-stream but file is clearly a PDF, fix it
if mime_type == 'application/octet-stream' and filename.lower().endswith('.pdf'):
mime_type = 'application/pdf'
self._log(f"⚠️ Corrected MIME type to application/pdf for {filename}")
self._log(f"📤 Uploading {len(file_content)} bytes to xAI: {filename} ({mime_type})")
session = await self._get_session()
url = f"{XAI_FILES_URL}/v1/files"
headers = {"Authorization": f"Bearer {self.api_key}"}
# Create multipart form with explicit UTF-8 filename encoding
# aiohttp automatically URL-encodes filenames with special chars,
# but xAI expects raw UTF-8 in the filename parameter
form = aiohttp.FormData(quote_fields=False)
form.add_field(
'file',
file_content,
filename=filename,
content_type=mime_type
)
# CRITICAL: purpose="file_search" enables proper PDF processing
# Without this, xAI throws "internal error" on complex PDFs
form.add_field('purpose', 'file_search')
async with session.post(url, data=form, headers=headers) as response:
try:
data = await response.json()
except Exception:
raw = await response.text()
data = {"_raw": raw}
if response.status not in (200, 201):
raise RuntimeError(
f"xAI file upload failed ({response.status}): {data}"
)
file_id = data.get('id') or data.get('file_id')
if not file_id:
raise RuntimeError(
f"No file_id in xAI upload response: {data}"
)
self._log(f"✅ xAI file uploaded: {file_id}")
return file_id
async def add_to_collection(self, collection_id: str, file_id: str) -> None:
"""
Fügt eine Datei einer xAI-Collection (Vector Store) hinzu.
POST https://api.x.ai/v1/vector_stores/{vector_store_id}/files
Uses the OpenAI-compatible API pattern for adding files to vector stores.
This triggers proper indexing and processing.
Raises:
RuntimeError: bei HTTP-Fehler
"""
self._log(f"📚 Adding file {file_id} to collection {collection_id}")
session = await self._get_session()
# Use the OpenAI-compatible endpoint (not management API)
url = f"{XAI_FILES_URL}/v1/vector_stores/{collection_id}/files"
headers = {
"Authorization": f"Bearer {self.api_key}",
"Content-Type": "application/json",
}
payload = {"file_id": file_id}
async with session.post(url, json=payload, headers=headers) as response:
if response.status not in (200, 201):
raw = await response.text()
raise RuntimeError(
f"Failed to add file to collection {collection_id} ({response.status}): {raw}"
)
self._log(f"✅ File {file_id} added to collection {collection_id}")
async def remove_from_collection(self, collection_id: str, file_id: str) -> None:
"""
Entfernt eine Datei aus einer xAI-Collection.
Die Datei selbst wird NICHT gelöscht sie kann in anderen Collections sein.
DELETE https://management-api.x.ai/v1/collections/{collection_id}/documents/{file_id}
Raises:
RuntimeError: bei HTTP-Fehler
"""
self._log(f"🗑️ Removing file {file_id} from collection {collection_id}")
session = await self._get_session()
url = f"{XAI_MANAGEMENT_URL}/v1/collections/{collection_id}/documents/{file_id}"
headers = {"Authorization": f"Bearer {self.management_key}"}
async with session.delete(url, headers=headers) as response:
if response.status not in (200, 204):
raw = await response.text()
raise RuntimeError(
f"Failed to remove file from collection {collection_id} ({response.status}): {raw}"
)
self._log(f"✅ File {file_id} removed from collection {collection_id}")
async def add_to_collections(self, collection_ids: List[str], file_id: str) -> List[str]:
"""
Fügt eine Datei zu mehreren Collections hinzu.
Returns:
Liste der erfolgreich hinzugefügten Collection-IDs
"""
added = []
for collection_id in collection_ids:
try:
await self.add_to_collection(collection_id, file_id)
added.append(collection_id)
except Exception as e:
self._log(
f"⚠️ Fehler beim Hinzufügen zu Collection {collection_id}: {e}",
level='warn'
)
return added
async def remove_from_collections(self, collection_ids: List[str], file_id: str) -> None:
"""Entfernt eine Datei aus mehreren Collections (ignoriert Fehler pro Collection)."""
for collection_id in collection_ids:
try:
await self.remove_from_collection(collection_id, file_id)
except Exception as e:
self._log(
f"⚠️ Fehler beim Entfernen aus Collection {collection_id}: {e}",
level='warn'
)
# ========== Collection Management ==========
async def create_collection(
self,
name: str,
metadata: Optional[Dict[str, str]] = None,
field_definitions: Optional[List[Dict]] = None
) -> Dict:
"""
Erstellt eine neue xAI Collection.
POST https://management-api.x.ai/v1/collections
Args:
name: Collection name
metadata: Optional metadata dict
field_definitions: Optional field definitions for metadata fields
Returns:
Collection object mit 'id' field
Raises:
RuntimeError: bei HTTP-Fehler
"""
self._log(f"📚 Creating collection: {name}")
# Standard field definitions für document metadata
if field_definitions is None:
field_definitions = [
{"key": "document_name", "inject_into_chunk": True},
{"key": "description", "inject_into_chunk": True},
{"key": "created_at", "inject_into_chunk": False},
{"key": "modified_at", "inject_into_chunk": False},
{"key": "espocrm_id", "inject_into_chunk": False}
]
session = await self._get_session()
url = f"{XAI_MANAGEMENT_URL}/v1/collections"
headers = {
"Authorization": f"Bearer {self.management_key}",
"Content-Type": "application/json"
}
body = {
"collection_name": name,
"field_definitions": field_definitions
}
# Add metadata if provided
if metadata:
body["metadata"] = metadata
async with session.post(url, json=body, headers=headers) as response:
if response.status not in (200, 201):
raw = await response.text()
raise RuntimeError(
f"Failed to create collection ({response.status}): {raw}"
)
data = await response.json()
# API returns 'collection_id' not 'id'
collection_id = data.get('collection_id') or data.get('id')
self._log(f"✅ Collection created: {collection_id}")
return data
async def get_collection(self, collection_id: str) -> Optional[Dict]:
"""
Holt Collection-Details.
GET https://management-api.x.ai/v1/collections/{collection_id}
Returns:
Collection object or None if not found
Raises:
RuntimeError: bei HTTP-Fehler (außer 404)
"""
self._log(f"📄 Getting collection: {collection_id}")
session = await self._get_session()
url = f"{XAI_MANAGEMENT_URL}/v1/collections/{collection_id}"
headers = {"Authorization": f"Bearer {self.management_key}"}
async with session.get(url, headers=headers) as response:
if response.status == 404:
self._log(f"⚠️ Collection not found: {collection_id}", level='warn')
return None
if response.status not in (200,):
raw = await response.text()
raise RuntimeError(
f"Failed to get collection ({response.status}): {raw}"
)
data = await response.json()
self._log(f"✅ Collection retrieved: {data.get('collection_name', 'N/A')}")
return data
async def delete_collection(self, collection_id: str) -> None:
"""
Löscht eine XAI Collection.
DELETE https://management-api.x.ai/v1/collections/{collection_id}
NOTE: Documents in der Collection werden NICHT gelöscht!
Sie können noch in anderen Collections sein.
Raises:
RuntimeError: bei HTTP-Fehler
"""
self._log(f"🗑️ Deleting collection {collection_id}")
session = await self._get_session()
url = f"{XAI_MANAGEMENT_URL}/v1/collections/{collection_id}"
headers = {"Authorization": f"Bearer {self.management_key}"}
async with session.delete(url, headers=headers) as response:
if response.status not in (200, 204):
raw = await response.text()
raise RuntimeError(
f"Failed to delete collection {collection_id} ({response.status}): {raw}"
)
self._log(f"✅ Collection deleted: {collection_id}")
async def list_collection_documents(self, collection_id: str) -> List[Dict]:
"""
Listet alle Dokumente in einer Collection.
GET https://management-api.x.ai/v1/collections/{collection_id}/documents
Returns:
List von normalized document objects:
[
{
'file_id': 'file_...',
'filename': 'doc.pdf',
'blake3_hash': 'hex_string', # Plain hex, kein prefix
'size_bytes': 12345,
'content_type': 'application/pdf',
'fields': {}, # Custom metadata
'status': 'DOCUMENT_STATUS_...'
}
]
Raises:
RuntimeError: bei HTTP-Fehler
"""
self._log(f"📋 Listing documents in collection {collection_id}")
session = await self._get_session()
url = f"{XAI_MANAGEMENT_URL}/v1/collections/{collection_id}/documents"
headers = {"Authorization": f"Bearer {self.management_key}"}
async with session.get(url, headers=headers) as response:
if response.status not in (200,):
raw = await response.text()
raise RuntimeError(
f"Failed to list documents ({response.status}): {raw}"
)
data = await response.json()
# API gibt Liste zurück oder dict mit 'documents' key
if isinstance(data, list):
raw_documents = data
elif isinstance(data, dict) and 'documents' in data:
raw_documents = data['documents']
else:
raw_documents = []
# Normalize nested structure: file_metadata -> top-level
normalized = []
for doc in raw_documents:
file_meta = doc.get('file_metadata', {})
normalized.append({
'file_id': file_meta.get('file_id'),
'filename': file_meta.get('name'),
'blake3_hash': file_meta.get('hash'), # Plain hex string
'size_bytes': int(file_meta.get('size_bytes', 0)) if file_meta.get('size_bytes') else 0,
'content_type': file_meta.get('content_type'),
'created_at': file_meta.get('created_at'),
'fields': doc.get('fields', {}),
'status': doc.get('status')
})
self._log(f"✅ Listed {len(normalized)} documents")
return normalized
async def get_collection_document(self, collection_id: str, file_id: str) -> Optional[Dict]:
"""
Holt Dokument-Details aus einer XAI Collection.
GET https://management-api.x.ai/v1/collections/{collection_id}/documents/{file_id}
Returns:
Normalized dict mit document info:
{
'file_id': 'file_xyz',
'filename': 'document.pdf',
'blake3_hash': 'hex_string', # Plain hex, kein prefix
'size_bytes': 12345,
'content_type': 'application/pdf',
'fields': {...} # Custom metadata
}
Returns None if not found.
"""
self._log(f"📄 Getting document {file_id} from collection {collection_id}")
session = await self._get_session()
url = f"{XAI_MANAGEMENT_URL}/v1/collections/{collection_id}/documents/{file_id}"
headers = {"Authorization": f"Bearer {self.management_key}"}
async with session.get(url, headers=headers) as response:
if response.status == 404:
return None
if response.status not in (200,):
raw = await response.text()
raise RuntimeError(
f"Failed to get document from collection ({response.status}): {raw}"
)
data = await response.json()
# Normalize nested structure
file_meta = data.get('file_metadata', {})
normalized = {
'file_id': file_meta.get('file_id'),
'filename': file_meta.get('name'),
'blake3_hash': file_meta.get('hash'), # Plain hex
'size_bytes': int(file_meta.get('size_bytes', 0)) if file_meta.get('size_bytes') else 0,
'content_type': file_meta.get('content_type'),
'created_at': file_meta.get('created_at'),
'fields': data.get('fields', {}),
'status': data.get('status')
}
self._log(f"✅ Document info retrieved: {normalized.get('filename', 'N/A')}")
return normalized
async def update_document_metadata(
self,
collection_id: str,
file_id: str,
metadata: Dict[str, str]
) -> None:
"""
Aktualisiert nur Metadaten eines Documents (kein File-Upload).
PATCH https://management-api.x.ai/v1/collections/{collection_id}/documents/{file_id}
Args:
collection_id: XAI Collection ID
file_id: XAI file_id
metadata: Updated metadata fields
Raises:
RuntimeError: bei HTTP-Fehler
"""
self._log(f"📝 Updating metadata for document {file_id}")
session = await self._get_session()
url = f"{XAI_MANAGEMENT_URL}/v1/collections/{collection_id}/documents/{file_id}"
headers = {
"Authorization": f"Bearer {self.management_key}",
"Content-Type": "application/json"
}
body = {"fields": metadata}
async with session.patch(url, json=body, headers=headers) as response:
if response.status not in (200, 204):
raw = await response.text()
raise RuntimeError(
f"Failed to update document metadata ({response.status}): {raw}"
)
self._log(f"✅ Metadata updated for {file_id}")
def is_mime_type_supported(self, mime_type: str) -> bool:
"""
Prüft, ob XAI diesen MIME-Type unterstützt.
Args:
mime_type: MIME type string
Returns:
True wenn unterstützt, False sonst
"""
# Liste der unterstützten MIME-Types basierend auf XAI Dokumentation
supported_types = {
# Documents
'application/pdf',
'application/msword',
'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
'application/vnd.ms-excel',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
'application/vnd.oasis.opendocument.text',
'application/epub+zip',
'application/vnd.openxmlformats-officedocument.presentationml.presentation',
# Text
'text/plain',
'text/html',
'text/markdown',
'text/csv',
'text/xml',
# Code
'text/javascript',
'application/json',
'application/xml',
'text/x-python',
'text/x-java-source',
'text/x-c',
'text/x-c++src',
# Other
'application/zip',
}
# Normalisiere MIME-Type (lowercase, strip whitespace)
normalized = mime_type.lower().strip()
return normalized in supported_types

View File

@@ -17,7 +17,7 @@ from calendar_sync_utils import (
import math import math
import time import time
from datetime import datetime from datetime import datetime
from typing import Any from typing import Any, Dict
from motia import queue, FlowContext from motia import queue, FlowContext
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
from services.advoware_service import AdvowareService from services.advoware_service import AdvowareService
@@ -33,7 +33,7 @@ config = {
} }
async def handler(input_data: dict, ctx: FlowContext): async def handler(input_data: Dict[str, Any], ctx: FlowContext) -> None:
""" """
Handler that fetches all employees, sorts by last sync time, Handler that fetches all employees, sorts by last sync time,
and emits calendar_sync_employee events for the oldest ones. and emits calendar_sync_employee events for the oldest ones.

View File

@@ -7,7 +7,7 @@ Supports syncing a single employee or all employees.
import sys import sys
from pathlib import Path from pathlib import Path
sys.path.insert(0, str(Path(__file__).parent)) sys.path.insert(0, str(Path(__file__).parent))
from calendar_sync_utils import get_redis_client, set_employee_lock, log_operation from calendar_sync_utils import get_redis_client, set_employee_lock, get_logger
from motia import http, ApiRequest, ApiResponse, FlowContext from motia import http, ApiRequest, ApiResponse, FlowContext
@@ -41,7 +41,7 @@ async def handler(request: ApiRequest, ctx: FlowContext) -> ApiResponse:
status=400, status=400,
body={ body={
'error': 'kuerzel required', 'error': 'kuerzel required',
'message': 'Bitte kuerzel im Body angeben' 'message': 'Please provide kuerzel in body'
} }
) )
@@ -49,7 +49,7 @@ async def handler(request: ApiRequest, ctx: FlowContext) -> ApiResponse:
if kuerzel_upper == 'ALL': if kuerzel_upper == 'ALL':
# Emit sync-all event # Emit sync-all event
log_operation('info', "Calendar Sync API: Emitting sync-all event", context=ctx) ctx.logger.info("Calendar Sync API: Emitting sync-all event")
await ctx.enqueue({ await ctx.enqueue({
"topic": "calendar_sync_all", "topic": "calendar_sync_all",
"data": { "data": {
@@ -60,7 +60,7 @@ async def handler(request: ApiRequest, ctx: FlowContext) -> ApiResponse:
status=200, status=200,
body={ body={
'status': 'triggered', 'status': 'triggered',
'message': 'Calendar sync wurde für alle Mitarbeiter ausgelöst', 'message': 'Calendar sync triggered for all employees',
'triggered_by': 'api' 'triggered_by': 'api'
} }
) )
@@ -69,7 +69,7 @@ async def handler(request: ApiRequest, ctx: FlowContext) -> ApiResponse:
redis_client = get_redis_client(ctx) redis_client = get_redis_client(ctx)
if not set_employee_lock(redis_client, kuerzel_upper, 'api', ctx): if not set_employee_lock(redis_client, kuerzel_upper, 'api', ctx):
log_operation('info', f"Calendar Sync API: Sync already active for {kuerzel_upper}, skipping", context=ctx) ctx.logger.info(f"Calendar Sync API: Sync already active for {kuerzel_upper}, skipping")
return ApiResponse( return ApiResponse(
status=409, status=409,
body={ body={
@@ -80,7 +80,7 @@ async def handler(request: ApiRequest, ctx: FlowContext) -> ApiResponse:
} }
) )
log_operation('info', f"Calendar Sync API called for {kuerzel_upper}", context=ctx) ctx.logger.info(f"Calendar Sync API called for {kuerzel_upper}")
# Lock successfully set, now emit event # Lock successfully set, now emit event
await ctx.enqueue({ await ctx.enqueue({
@@ -95,14 +95,14 @@ async def handler(request: ApiRequest, ctx: FlowContext) -> ApiResponse:
status=200, status=200,
body={ body={
'status': 'triggered', 'status': 'triggered',
'message': f'Calendar sync was triggered for {kuerzel_upper}', 'message': f'Calendar sync triggered for {kuerzel_upper}',
'kuerzel': kuerzel_upper, 'kuerzel': kuerzel_upper,
'triggered_by': 'api' 'triggered_by': 'api'
} }
) )
except Exception as e: except Exception as e:
log_operation('error', f"Error in API trigger: {e}", context=ctx) ctx.logger.error(f"Error in API trigger: {e}")
return ApiResponse( return ApiResponse(
status=500, status=500,
body={ body={

View File

@@ -9,6 +9,7 @@ from pathlib import Path
sys.path.insert(0, str(Path(__file__).parent)) sys.path.insert(0, str(Path(__file__).parent))
from calendar_sync_utils import log_operation from calendar_sync_utils import log_operation
from typing import Dict, Any
from motia import cron, FlowContext from motia import cron, FlowContext
@@ -17,16 +18,19 @@ config = {
'description': 'Runs calendar sync automatically every 15 minutes', 'description': 'Runs calendar sync automatically every 15 minutes',
'flows': ['advoware-calendar-sync'], 'flows': ['advoware-calendar-sync'],
'triggers': [ 'triggers': [
cron("0 */15 1o * * *") # Every 15 minutes at second 0 (6-field: sec min hour day month weekday) cron("0 15 1 * * *") # Every 15 minutes at second 0 (6-field: sec min hour day month weekday)
], ],
'enqueues': ['calendar_sync_all'] 'enqueues': ['calendar_sync_all']
} }
async def handler(input_data: dict, ctx: FlowContext): async def handler(input_data: None, ctx: FlowContext) -> None:
"""Cron handler that triggers the calendar sync cascade.""" """Cron handler that triggers the calendar sync cascade."""
try: try:
log_operation('info', "Calendar Sync Cron: Starting to emit sync-all event", context=ctx) ctx.logger.info("=" * 80)
ctx.logger.info("🕐 CALENDAR SYNC CRON: STARTING")
ctx.logger.info("=" * 80)
ctx.logger.info("Emitting sync-all event")
# Enqueue sync-all event # Enqueue sync-all event
await ctx.enqueue({ await ctx.enqueue({
@@ -36,15 +40,11 @@ async def handler(input_data: dict, ctx: FlowContext):
} }
}) })
log_operation('info', "Calendar Sync Cron: Emitted sync-all event", context=ctx) ctx.logger.info("Calendar sync-all event emitted successfully")
return { ctx.logger.info("=" * 80)
'status': 'completed',
'triggered_by': 'cron'
}
except Exception as e: except Exception as e:
log_operation('error', f"Fehler beim Cron-Job: {e}", context=ctx) ctx.logger.error("=" * 80)
return { ctx.logger.error("❌ ERROR: CALENDAR SYNC CRON")
'status': 'error', ctx.logger.error(f"Error: {e}")
'error': str(e) ctx.logger.error("=" * 80)
}

View File

@@ -14,6 +14,7 @@ import asyncio
import os import os
import datetime import datetime
from datetime import timedelta from datetime import timedelta
from typing import Dict, Any
import pytz import pytz
import backoff import backoff
import time import time
@@ -64,7 +65,8 @@ async def enforce_global_rate_limit(context=None):
socket_timeout=int(os.getenv('REDIS_TIMEOUT_SECONDS', '5')) socket_timeout=int(os.getenv('REDIS_TIMEOUT_SECONDS', '5'))
) )
lua_script = """ try:
lua_script = """
local key = KEYS[1] local key = KEYS[1]
local current_time_ms = tonumber(ARGV[1]) local current_time_ms = tonumber(ARGV[1])
local max_tokens = tonumber(ARGV[2]) local max_tokens = tonumber(ARGV[2])
@@ -96,7 +98,6 @@ async def enforce_global_rate_limit(context=None):
end end
""" """
try:
script = redis_client.register_script(lua_script) script = redis_client.register_script(lua_script)
while True: while True:
@@ -120,6 +121,12 @@ async def enforce_global_rate_limit(context=None):
except Exception as e: except Exception as e:
log_operation('error', f"Rate limiting failed: {e}. Proceeding without limit.", context=context) log_operation('error', f"Rate limiting failed: {e}. Proceeding without limit.", context=context)
finally:
# Always close Redis connection to prevent resource leaks
try:
redis_client.close()
except Exception:
pass
@backoff.on_exception(backoff.expo, HttpError, max_tries=4, base=3, @backoff.on_exception(backoff.expo, HttpError, max_tries=4, base=3,
@@ -945,18 +952,19 @@ config = {
} }
async def handler(input_data: dict, ctx: FlowContext): async def handler(input_data: Dict[str, Any], ctx: FlowContext) -> None:
"""Main event handler for calendar sync.""" """Main event handler for calendar sync."""
start_time = time.time() start_time = time.time()
kuerzel = input_data.get('kuerzel') kuerzel = input_data.get('kuerzel')
if not kuerzel: if not kuerzel:
log_operation('error', "No kuerzel provided in event", context=ctx) log_operation('error', "No kuerzel provided in event", context=ctx)
return {'status': 400, 'body': {'error': 'No kuerzel provided'}} return
log_operation('info', f"Starting calendar sync for employee {kuerzel}", context=ctx) log_operation('info', f"Starting calendar sync for employee {kuerzel}", context=ctx)
redis_client = get_redis_client(ctx) redis_client = get_redis_client(ctx)
service = None
try: try:
log_operation('debug', "Initializing Advoware service", context=ctx) log_operation('debug', "Initializing Advoware service", context=ctx)
@@ -1052,6 +1060,19 @@ async def handler(input_data: dict, ctx: FlowContext):
log_operation('error', f"Sync failed for {kuerzel}: {e}", context=ctx) log_operation('error', f"Sync failed for {kuerzel}: {e}", context=ctx)
log_operation('info', f"Handler duration (failed): {time.time() - start_time}", context=ctx) log_operation('info', f"Handler duration (failed): {time.time() - start_time}", context=ctx)
return {'status': 500, 'body': {'error': str(e)}} return {'status': 500, 'body': {'error': str(e)}}
finally: finally:
# Always close resources to prevent memory leaks
if service is not None:
try:
service.close()
except Exception as e:
log_operation('debug', f"Error closing Google service: {e}", context=ctx)
try:
redis_client.close()
except Exception as e:
log_operation('debug', f"Error closing Redis client: {e}", context=ctx)
# Ensure lock is always released # Ensure lock is always released
clear_employee_lock(redis_client, kuerzel, ctx) clear_employee_lock(redis_client, kuerzel, ctx)

View File

@@ -3,50 +3,44 @@ Calendar Sync Utilities
Shared utility functions for calendar synchronization between Google Calendar and Advoware. Shared utility functions for calendar synchronization between Google Calendar and Advoware.
""" """
import logging
import asyncpg import asyncpg
import os import os
import redis import redis
import time import time
from typing import Optional, Any, List
from googleapiclient.discovery import build from googleapiclient.discovery import build
from google.oauth2 import service_account from google.oauth2 import service_account
from services.logging_utils import get_service_logger
# Configure logging
logger = logging.getLogger(__name__)
def log_operation(level: str, message: str, context=None, **context_vars): def get_logger(context=None):
"""Centralized logging with context, supporting file and console logging.""" """Get logger for calendar sync operations"""
context_str = ' '.join(f"{k}={v}" for k, v in context_vars.items() if v is not None) return get_service_logger('calendar_sync', context)
full_message = f"{message} {context_str}".strip()
# Use ctx.logger if context is available (Motia III FlowContext)
if context and hasattr(context, 'logger'): def log_operation(level: str, message: str, context=None, **extra):
if level == 'info': """
context.logger.info(full_message) Log calendar sync operations with structured context.
elif level == 'warning':
context.logger.warning(full_message) Args:
elif level == 'error': level: Log level ('debug', 'info', 'warning', 'error')
context.logger.error(full_message) message: Log message
elif level == 'debug': context: FlowContext if available
context.logger.debug(full_message) **extra: Additional key-value pairs to log
"""
logger = get_logger(context)
log_func = getattr(logger, level.lower(), logger.info)
if extra:
extra_str = " | " + " | ".join(f"{k}={v}" for k, v in extra.items())
log_func(message + extra_str)
else: else:
# Fallback to standard logger log_func(message)
if level == 'info':
logger.info(full_message)
elif level == 'warning':
logger.warning(full_message)
elif level == 'error':
logger.error(full_message)
elif level == 'debug':
logger.debug(full_message)
# Also log to console for journalctl visibility
print(f"[{level.upper()}] {full_message}")
async def connect_db(context=None): async def connect_db(context=None):
"""Connect to Postgres DB from environment variables.""" """Connect to Postgres DB from environment variables."""
logger = get_logger(context)
try: try:
conn = await asyncpg.connect( conn = await asyncpg.connect(
host=os.getenv('POSTGRES_HOST', 'localhost'), host=os.getenv('POSTGRES_HOST', 'localhost'),
@@ -57,12 +51,13 @@ async def connect_db(context=None):
) )
return conn return conn
except Exception as e: except Exception as e:
log_operation('error', f"Failed to connect to DB: {e}", context=context) logger.error(f"Failed to connect to DB: {e}")
raise raise
async def get_google_service(context=None): async def get_google_service(context=None):
"""Initialize Google Calendar service.""" """Initialize Google Calendar service."""
logger = get_logger(context)
try: try:
service_account_path = os.getenv('GOOGLE_CALENDAR_SERVICE_ACCOUNT_PATH', 'service-account.json') service_account_path = os.getenv('GOOGLE_CALENDAR_SERVICE_ACCOUNT_PATH', 'service-account.json')
if not os.path.exists(service_account_path): if not os.path.exists(service_account_path):
@@ -75,48 +70,53 @@ async def get_google_service(context=None):
service = build('calendar', 'v3', credentials=creds) service = build('calendar', 'v3', credentials=creds)
return service return service
except Exception as e: except Exception as e:
log_operation('error', f"Failed to initialize Google service: {e}", context=context) logger.error(f"Failed to initialize Google service: {e}")
raise raise
def get_redis_client(context=None): def get_redis_client(context=None) -> redis.Redis:
"""Initialize Redis client for calendar sync operations.""" """Initialize Redis client for calendar sync operations."""
logger = get_logger(context)
try: try:
redis_client = redis.Redis( redis_client = redis.Redis(
host=os.getenv('REDIS_HOST', 'localhost'), host=os.getenv('REDIS_HOST', 'localhost'),
port=int(os.getenv('REDIS_PORT', '6379')), port=int(os.getenv('REDIS_PORT', '6379')),
db=int(os.getenv('REDIS_DB_CALENDAR_SYNC', '2')), db=int(os.getenv('REDIS_DB_CALENDAR_SYNC', '2')),
socket_timeout=int(os.getenv('REDIS_TIMEOUT_SECONDS', '5')) socket_timeout=int(os.getenv('REDIS_TIMEOUT_SECONDS', '5')),
decode_responses=True
) )
return redis_client return redis_client
except Exception as e: except Exception as e:
log_operation('error', f"Failed to initialize Redis client: {e}", context=context) logger.error(f"Failed to initialize Redis client: {e}")
raise raise
async def get_advoware_employees(advoware, context=None): async def get_advoware_employees(advoware, context=None) -> List[Any]:
"""Fetch list of employees from Advoware.""" """Fetch list of employees from Advoware."""
logger = get_logger(context)
try: try:
result = await advoware.api_call('api/v1/advonet/Mitarbeiter', method='GET', params={'aktiv': 'true'}) result = await advoware.api_call('api/v1/advonet/Mitarbeiter', method='GET', params={'aktiv': 'true'})
employees = result if isinstance(result, list) else [] employees = result if isinstance(result, list) else []
log_operation('info', f"Fetched {len(employees)} Advoware employees", context=context) logger.info(f"Fetched {len(employees)} Advoware employees")
return employees return employees
except Exception as e: except Exception as e:
log_operation('error', f"Failed to fetch Advoware employees: {e}", context=context) logger.error(f"Failed to fetch Advoware employees: {e}")
raise raise
def set_employee_lock(redis_client, kuerzel: str, triggered_by: str, context=None) -> bool: def set_employee_lock(redis_client: redis.Redis, kuerzel: str, triggered_by: str, context=None) -> bool:
"""Set lock for employee sync operation.""" """Set lock for employee sync operation."""
logger = get_logger(context)
employee_lock_key = f'calendar_sync_lock_{kuerzel}' employee_lock_key = f'calendar_sync_lock_{kuerzel}'
if redis_client.set(employee_lock_key, triggered_by, ex=1800, nx=True) is None: if redis_client.set(employee_lock_key, triggered_by, ex=1800, nx=True) is None:
log_operation('info', f"Sync already active for {kuerzel}, skipping", context=context) logger.info(f"Sync already active for {kuerzel}, skipping")
return False return False
return True return True
def clear_employee_lock(redis_client, kuerzel: str, context=None): def clear_employee_lock(redis_client: redis.Redis, kuerzel: str, context=None) -> None:
"""Clear lock for employee sync operation and update last-synced timestamp.""" """Clear lock for employee sync operation and update last-synced timestamp."""
logger = get_logger(context)
try: try:
employee_lock_key = f'calendar_sync_lock_{kuerzel}' employee_lock_key = f'calendar_sync_lock_{kuerzel}'
employee_last_synced_key = f'calendar_sync_last_synced_{kuerzel}' employee_last_synced_key = f'calendar_sync_last_synced_{kuerzel}'
@@ -128,6 +128,6 @@ def clear_employee_lock(redis_client, kuerzel: str, context=None):
# Delete the lock # Delete the lock
redis_client.delete(employee_lock_key) redis_client.delete(employee_lock_key)
log_operation('debug', f"Cleared lock and updated last-synced for {kuerzel} to {current_time}", context=context) logger.debug(f"Cleared lock and updated last-synced for {kuerzel} to {current_time}")
except Exception as e: except Exception as e:
log_operation('warning', f"Failed to clear lock and update last-synced for {kuerzel}: {e}", context=context) logger.warning(f"Failed to clear lock and update last-synced for {kuerzel}: {e}")

View File

@@ -32,23 +32,33 @@ async def handler(request: ApiRequest, ctx: FlowContext[Any]) -> ApiResponse:
body={'error': 'Endpoint required as query parameter'} body={'error': 'Endpoint required as query parameter'}
) )
ctx.logger.info("=" * 80)
ctx.logger.info("🔄 ADVOWARE PROXY: DELETE REQUEST")
ctx.logger.info("=" * 80)
ctx.logger.info(f"Endpoint: {endpoint}")
ctx.logger.info("=" * 80)
# Initialize Advoware client # Initialize Advoware client
advoware = AdvowareAPI(ctx) advoware = AdvowareAPI(ctx)
# Forward all query params except 'endpoint' # Forward all query params except 'endpoint'
params = {k: v for k, v in request.query_params.items() if k != 'endpoint'} params = {k: v for k, v in request.query_params.items() if k != 'endpoint'}
ctx.logger.info(f"Proxying DELETE request to Advoware: {endpoint}")
result = await advoware.api_call( result = await advoware.api_call(
endpoint, endpoint,
method='DELETE', method='DELETE',
params=params params=params
) )
ctx.logger.info("✅ Proxy DELETE erfolgreich")
return ApiResponse(status=200, body={'result': result}) return ApiResponse(status=200, body={'result': result})
except Exception as e: except Exception as e:
ctx.logger.error(f"Proxy error: {e}") ctx.logger.error("=" * 80)
ctx.logger.error("❌ ADVOWARE PROXY DELETE FEHLER")
ctx.logger.error(f"Endpoint: {request.query_params.get('endpoint', 'N/A')}")
ctx.logger.error(f"Error: {e}")
ctx.logger.error("=" * 80)
return ApiResponse( return ApiResponse(
status=500, status=500,
body={'error': 'Internal server error', 'details': str(e)} body={'error': 'Internal server error', 'details': str(e)}

View File

@@ -32,23 +32,33 @@ async def handler(request: ApiRequest, ctx: FlowContext[Any]) -> ApiResponse:
body={'error': 'Endpoint required as query parameter'} body={'error': 'Endpoint required as query parameter'}
) )
ctx.logger.info("=" * 80)
ctx.logger.info("🔄 ADVOWARE PROXY: GET REQUEST")
ctx.logger.info("=" * 80)
ctx.logger.info(f"Endpoint: {endpoint}")
ctx.logger.info("=" * 80)
# Initialize Advoware client # Initialize Advoware client
advoware = AdvowareAPI(ctx) advoware = AdvowareAPI(ctx)
# Forward all query params except 'endpoint' # Forward all query params except 'endpoint'
params = {k: v for k, v in request.query_params.items() if k != 'endpoint'} params = {k: v for k, v in request.query_params.items() if k != 'endpoint'}
ctx.logger.info(f"Proxying GET request to Advoware: {endpoint}")
result = await advoware.api_call( result = await advoware.api_call(
endpoint, endpoint,
method='GET', method='GET',
params=params params=params
) )
ctx.logger.info("✅ Proxy GET erfolgreich")
return ApiResponse(status=200, body={'result': result}) return ApiResponse(status=200, body={'result': result})
except Exception as e: except Exception as e:
ctx.logger.error(f"Proxy error: {e}") ctx.logger.error("=" * 80)
ctx.logger.error("❌ ADVOWARE PROXY GET FEHLER")
ctx.logger.error(f"Endpoint: {request.query_params.get('endpoint', 'N/A')}")
ctx.logger.error(f"Error: {e}")
ctx.logger.error("=" * 80)
return ApiResponse( return ApiResponse(
status=500, status=500,
body={'error': 'Internal server error', 'details': str(e)} body={'error': 'Internal server error', 'details': str(e)}

View File

@@ -34,6 +34,12 @@ async def handler(request: ApiRequest, ctx: FlowContext[Any]) -> ApiResponse:
body={'error': 'Endpoint required as query parameter'} body={'error': 'Endpoint required as query parameter'}
) )
ctx.logger.info("=" * 80)
ctx.logger.info("🔄 ADVOWARE PROXY: POST REQUEST")
ctx.logger.info("=" * 80)
ctx.logger.info(f"Endpoint: {endpoint}")
ctx.logger.info("=" * 80)
# Initialize Advoware client # Initialize Advoware client
advoware = AdvowareAPI(ctx) advoware = AdvowareAPI(ctx)
@@ -43,7 +49,6 @@ async def handler(request: ApiRequest, ctx: FlowContext[Any]) -> ApiResponse:
# Get request body # Get request body
json_data = request.body json_data = request.body
ctx.logger.info(f"Proxying POST request to Advoware: {endpoint}")
result = await advoware.api_call( result = await advoware.api_call(
endpoint, endpoint,
method='POST', method='POST',
@@ -51,10 +56,15 @@ async def handler(request: ApiRequest, ctx: FlowContext[Any]) -> ApiResponse:
json_data=json_data json_data=json_data
) )
ctx.logger.info("✅ Proxy POST erfolgreich")
return ApiResponse(status=200, body={'result': result}) return ApiResponse(status=200, body={'result': result})
except Exception as e: except Exception as e:
ctx.logger.error(f"Proxy error: {e}") ctx.logger.error("=" * 80)
ctx.logger.error("❌ ADVOWARE PROXY POST FEHLER")
ctx.logger.error(f"Endpoint: {request.query_params.get('endpoint', 'N/A')}")
ctx.logger.error(f"Error: {e}")
ctx.logger.error("=" * 80)
return ApiResponse( return ApiResponse(
status=500, status=500,
body={'error': 'Internal server error', 'details': str(e)} body={'error': 'Internal server error', 'details': str(e)}

View File

@@ -34,6 +34,12 @@ async def handler(request: ApiRequest, ctx: FlowContext[Any]) -> ApiResponse:
body={'error': 'Endpoint required as query parameter'} body={'error': 'Endpoint required as query parameter'}
) )
ctx.logger.info("=" * 80)
ctx.logger.info("🔄 ADVOWARE PROXY: PUT REQUEST")
ctx.logger.info("=" * 80)
ctx.logger.info(f"Endpoint: {endpoint}")
ctx.logger.info("=" * 80)
# Initialize Advoware client # Initialize Advoware client
advoware = AdvowareAPI(ctx) advoware = AdvowareAPI(ctx)
@@ -43,7 +49,6 @@ async def handler(request: ApiRequest, ctx: FlowContext[Any]) -> ApiResponse:
# Get request body # Get request body
json_data = request.body json_data = request.body
ctx.logger.info(f"Proxying PUT request to Advoware: {endpoint}")
result = await advoware.api_call( result = await advoware.api_call(
endpoint, endpoint,
method='PUT', method='PUT',
@@ -51,10 +56,15 @@ async def handler(request: ApiRequest, ctx: FlowContext[Any]) -> ApiResponse:
json_data=json_data json_data=json_data
) )
ctx.logger.info("✅ Proxy PUT erfolgreich")
return ApiResponse(status=200, body={'result': result}) return ApiResponse(status=200, body={'result': result})
except Exception as e: except Exception as e:
ctx.logger.error(f"Proxy error: {e}") ctx.logger.error("=" * 80)
ctx.logger.error("❌ ADVOWARE PROXY PUT FEHLER")
ctx.logger.error(f"Endpoint: {request.query_params.get('endpoint', 'N/A')}")
ctx.logger.error(f"Error: {e}")
ctx.logger.error("=" * 80)
return ApiResponse( return ApiResponse(
status=500, status=500,
body={'error': 'Internal server error', 'details': str(e)} body={'error': 'Internal server error', 'details': str(e)}

View File

@@ -0,0 +1,90 @@
"""AI Knowledge Daily Sync - Cron Job"""
from typing import Any
from motia import FlowContext, cron
config = {
"name": "AI Knowledge Daily Sync",
"description": "Daily sync of all CAIKnowledge entities (catches missed webhooks, Blake3 verification included)",
"flows": ["aiknowledge-full-sync"],
"triggers": [
cron("0 0 2 * * *"), # Daily at 2:00 AM
],
"enqueues": ["aiknowledge.sync"],
}
async def handler(input_data: None, ctx: FlowContext[Any]) -> None:
"""
Daily sync handler - ensures all active knowledge bases are synchronized.
Loads all CAIKnowledge entities that need sync and emits events.
Blake3 hash verification is always performed (hash available from JunctionData API).
Runs every day at 02:00:00.
"""
from services.espocrm import EspoCRMAPI
from services.models import AIKnowledgeActivationStatus, AIKnowledgeSyncStatus
ctx.logger.info("=" * 80)
ctx.logger.info("🌙 DAILY AI KNOWLEDGE SYNC STARTED")
ctx.logger.info("=" * 80)
espocrm = EspoCRMAPI(ctx)
try:
# Load all CAIKnowledge entities with status 'active' that need sync
result = await espocrm.list_entities(
'CAIKnowledge',
where=[
{
'type': 'equals',
'attribute': 'aktivierungsstatus',
'value': AIKnowledgeActivationStatus.ACTIVE.value
},
{
'type': 'in',
'attribute': 'syncStatus',
'value': [
AIKnowledgeSyncStatus.UNCLEAN.value,
AIKnowledgeSyncStatus.FAILED.value
]
}
],
select='id,name,syncStatus',
max_size=1000 # Adjust if you have more
)
entities = result.get('list', [])
total = len(entities)
ctx.logger.info(f"📊 Found {total} knowledge bases needing sync")
if total == 0:
ctx.logger.info("✅ All knowledge bases are synced")
ctx.logger.info("=" * 80)
return
# Enqueue sync events for all (Blake3 verification always enabled)
for i, entity in enumerate(entities, 1):
await ctx.enqueue({
'topic': 'aiknowledge.sync',
'data': {
'knowledge_id': entity['id'],
'source': 'daily_cron'
}
})
ctx.logger.info(
f"📤 [{i}/{total}] Enqueued: {entity['name']} "
f"(syncStatus={entity.get('syncStatus')})"
)
ctx.logger.info("=" * 80)
ctx.logger.info(f"✅ Daily sync complete: {total} events enqueued")
ctx.logger.info("=" * 80)
except Exception as e:
ctx.logger.error("=" * 80)
ctx.logger.error("❌ FULL SYNC FAILED")
ctx.logger.error("=" * 80)
ctx.logger.error(f"Error: {e}", exc_info=True)
raise

View File

@@ -0,0 +1,89 @@
"""AI Knowledge Sync Event Handler"""
from typing import Dict, Any
from redis import Redis
from motia import FlowContext, queue
config = {
"name": "AI Knowledge Sync",
"description": "Synchronizes CAIKnowledge entities with XAI Collections",
"flows": ["vmh-aiknowledge"],
"triggers": [
queue("aiknowledge.sync")
],
}
async def handler(event_data: Dict[str, Any], ctx: FlowContext[Any]) -> None:
"""
Event handler for AI Knowledge synchronization.
Emitted by:
- Webhook on CAIKnowledge update
- Daily full sync cron job
Args:
event_data: Event payload with knowledge_id
ctx: Motia context
"""
from services.redis_client import RedisClientFactory
from services.aiknowledge_sync_utils import AIKnowledgeSync
ctx.logger.info("=" * 80)
ctx.logger.info("🔄 AI KNOWLEDGE SYNC STARTED")
ctx.logger.info("=" * 80)
# Extract data
knowledge_id = event_data.get('knowledge_id')
source = event_data.get('source', 'unknown')
if not knowledge_id:
ctx.logger.error("❌ Missing knowledge_id in event data")
return
ctx.logger.info(f"📋 Knowledge ID: {knowledge_id}")
ctx.logger.info(f"📋 Source: {source}")
ctx.logger.info("=" * 80)
# Get Redis for locking
redis_client = RedisClientFactory.get_client(strict=False)
# Initialize sync utils
sync_utils = AIKnowledgeSync(ctx, redis_client)
# Acquire lock
lock_acquired = await sync_utils.acquire_sync_lock(knowledge_id)
if not lock_acquired:
ctx.logger.warn(f"⏸️ Lock already held for {knowledge_id}, skipping")
ctx.logger.info(" (Will be retried by Motia queue)")
raise RuntimeError(f"Lock busy for {knowledge_id}") # Motia will retry
try:
# Perform sync (Blake3 hash verification always enabled)
await sync_utils.sync_knowledge_to_xai(knowledge_id, ctx)
ctx.logger.info("=" * 80)
ctx.logger.info("✅ AI KNOWLEDGE SYNC COMPLETED")
ctx.logger.info("=" * 80)
# Release lock with success=True
await sync_utils.release_sync_lock(knowledge_id, success=True)
except Exception as e:
ctx.logger.error("=" * 80)
ctx.logger.error("❌ AI KNOWLEDGE SYNC FAILED")
ctx.logger.error("=" * 80)
ctx.logger.error(f"Error: {e}")
ctx.logger.error(f"Knowledge ID: {knowledge_id}")
ctx.logger.error("=" * 80)
# Release lock with failure
await sync_utils.release_sync_lock(
knowledge_id,
success=False,
error_message=str(e)
)
# Re-raise to let Motia retry
raise

View File

@@ -11,30 +11,29 @@ Verarbeitet:
""" """
from typing import Dict, Any, Optional from typing import Dict, Any, Optional
from motia import FlowContext from motia import FlowContext, queue
from services.advoware import AdvowareAPI from services.advoware import AdvowareAPI
from services.espocrm import EspoCRMAPI from services.espocrm import EspoCRMAPI
from services.bankverbindungen_mapper import BankverbindungenMapper from services.bankverbindungen_mapper import BankverbindungenMapper
from services.notification_utils import NotificationManager from services.notification_utils import NotificationManager
from services.redis_client import get_redis_client
import json import json
import redis
import os
config = { config = {
"name": "VMH Bankverbindungen Sync Handler", "name": "VMH Bankverbindungen Sync Handler",
"description": "Zentraler Sync-Handler für Bankverbindungen (Webhooks + Cron Events)", "description": "Zentraler Sync-Handler für Bankverbindungen (Webhooks + Cron Events)",
"flows": ["vmh-bankverbindungen"], "flows": ["vmh-bankverbindungen"],
"triggers": [ "triggers": [
{"type": "queue", "topic": "vmh.bankverbindungen.create"}, queue("vmh.bankverbindungen.create"),
{"type": "queue", "topic": "vmh.bankverbindungen.update"}, queue("vmh.bankverbindungen.update"),
{"type": "queue", "topic": "vmh.bankverbindungen.delete"}, queue("vmh.bankverbindungen.delete"),
{"type": "queue", "topic": "vmh.bankverbindungen.sync_check"} queue("vmh.bankverbindungen.sync_check")
], ],
"enqueues": [] "enqueues": []
} }
async def handler(event_data: Dict[str, Any], ctx: FlowContext[Any]): async def handler(event_data: Dict[str, Any], ctx: FlowContext[Any]) -> None:
"""Zentraler Sync-Handler für Bankverbindungen""" """Zentraler Sync-Handler für Bankverbindungen"""
entity_id = event_data.get('entity_id') entity_id = event_data.get('entity_id')
@@ -47,20 +46,11 @@ async def handler(event_data: Dict[str, Any], ctx: FlowContext[Any]):
ctx.logger.info(f"🔄 Bankverbindungen Sync gestartet: {action.upper()} | Entity: {entity_id} | Source: {source}") ctx.logger.info(f"🔄 Bankverbindungen Sync gestartet: {action.upper()} | Entity: {entity_id} | Source: {source}")
# Shared Redis client # Shared Redis client (centralized factory)
redis_host = os.getenv('REDIS_HOST', 'localhost') redis_client = get_redis_client(strict=False)
redis_port = int(os.getenv('REDIS_PORT', '6379'))
redis_db = int(os.getenv('REDIS_DB_ADVOWARE_CACHE', '1'))
redis_client = redis.Redis( # APIs initialisieren (mit Context für besseres Logging)
host=redis_host, espocrm = EspoCRMAPI(ctx)
port=redis_port,
db=redis_db,
decode_responses=True
)
# APIs initialisieren
espocrm = EspoCRMAPI()
advoware = AdvowareAPI(ctx) advoware = AdvowareAPI(ctx)
mapper = BankverbindungenMapper() mapper = BankverbindungenMapper()
notification_mgr = NotificationManager(espocrm_api=espocrm, context=ctx) notification_mgr = NotificationManager(espocrm_api=espocrm, context=ctx)
@@ -130,7 +120,7 @@ async def handler(event_data: Dict[str, Any], ctx: FlowContext[Any]):
pass pass
async def handle_create(entity_id, betnr, espo_entity, espocrm, advoware, mapper, ctx, redis_client, lock_key): async def handle_create(entity_id, betnr, espo_entity, espocrm, advoware, mapper, ctx, redis_client, lock_key) -> None:
"""Erstellt neue Bankverbindung in Advoware""" """Erstellt neue Bankverbindung in Advoware"""
try: try:
ctx.logger.info(f"🔨 CREATE Bankverbindung in Advoware für Beteiligter {betnr}...") ctx.logger.info(f"🔨 CREATE Bankverbindung in Advoware für Beteiligter {betnr}...")
@@ -176,7 +166,7 @@ async def handle_create(entity_id, betnr, espo_entity, espocrm, advoware, mapper
redis_client.delete(lock_key) redis_client.delete(lock_key)
async def handle_update(entity_id, betnr, advoware_id, espo_entity, espocrm, notification_mgr, ctx, redis_client, lock_key): async def handle_update(entity_id, betnr, advoware_id, espo_entity, espocrm, notification_mgr, ctx, redis_client, lock_key) -> None:
"""Update nicht möglich - Sendet Notification an User""" """Update nicht möglich - Sendet Notification an User"""
try: try:
ctx.logger.warn(f"⚠️ UPDATE: Advoware API unterstützt kein PUT für Bankverbindungen") ctx.logger.warn(f"⚠️ UPDATE: Advoware API unterstützt kein PUT für Bankverbindungen")
@@ -219,7 +209,7 @@ async def handle_update(entity_id, betnr, advoware_id, espo_entity, espocrm, not
redis_client.delete(lock_key) redis_client.delete(lock_key)
async def handle_delete(entity_id, betnr, advoware_id, espo_entity, espocrm, notification_mgr, ctx, redis_client, lock_key): async def handle_delete(entity_id, betnr, advoware_id, espo_entity, espocrm, notification_mgr, ctx, redis_client, lock_key) -> None:
"""Delete nicht möglich - Sendet Notification an User""" """Delete nicht möglich - Sendet Notification an User"""
try: try:
ctx.logger.warn(f"⚠️ DELETE: Advoware API unterstützt kein DELETE für Bankverbindungen") ctx.logger.warn(f"⚠️ DELETE: Advoware API unterstützt kein DELETE für Bankverbindungen")

View File

@@ -25,14 +25,14 @@ config = {
} }
async def handler(input_data: Dict[str, Any], ctx: FlowContext): async def handler(input_data: Dict[str, Any], ctx: FlowContext) -> None:
""" """
Cron-Handler: Findet alle Beteiligte die Sync benötigen und emittiert Events Cron-Handler: Findet alle Beteiligte die Sync benötigen und emittiert Events
""" """
ctx.logger.info("🕐 Beteiligte Sync Cron gestartet") ctx.logger.info("🕐 Beteiligte Sync Cron gestartet")
try: try:
espocrm = EspoCRMAPI() espocrm = EspoCRMAPI(ctx)
# Berechne Threshold für "veraltete" Syncs (24 Stunden) # Berechne Threshold für "veraltete" Syncs (24 Stunden)
threshold = datetime.datetime.now() - datetime.timedelta(hours=24) threshold = datetime.datetime.now() - datetime.timedelta(hours=24)

View File

@@ -11,56 +11,66 @@ Verarbeitet:
""" """
from typing import Dict, Any, Optional from typing import Dict, Any, Optional
from motia import FlowContext from motia import FlowContext, queue
from services.advoware import AdvowareAPI from services.advoware import AdvowareAPI
from services.advoware_service import AdvowareService from services.advoware_service import AdvowareService
from services.espocrm import EspoCRMAPI from services.espocrm import EspoCRMAPI
from services.espocrm_mapper import BeteiligteMapper from services.espocrm_mapper import BeteiligteMapper
from services.beteiligte_sync_utils import BeteiligteSync from services.beteiligte_sync_utils import BeteiligteSync
from services.redis_client import get_redis_client
from services.exceptions import (
AdvowareAPIError,
EspoCRMAPIError,
SyncError,
RetryableError,
is_retryable
)
from services.logging_utils import get_step_logger
import json import json
import redis
import os
config = { config = {
"name": "VMH Beteiligte Sync Handler", "name": "VMH Beteiligte Sync Handler",
"description": "Zentraler Sync-Handler für Beteiligte (Webhooks + Cron Events)", "description": "Zentraler Sync-Handler für Beteiligte (Webhooks + Cron Events)",
"flows": ["vmh-beteiligte"], "flows": ["vmh-beteiligte"],
"triggers": [ "triggers": [
{"type": "queue", "topic": "vmh.beteiligte.create"}, queue("vmh.beteiligte.create"),
{"type": "queue", "topic": "vmh.beteiligte.update"}, queue("vmh.beteiligte.update"),
{"type": "queue", "topic": "vmh.beteiligte.delete"}, queue("vmh.beteiligte.delete"),
{"type": "queue", "topic": "vmh.beteiligte.sync_check"} queue("vmh.beteiligte.sync_check")
], ],
"enqueues": [] "enqueues": []
} }
async def handler(event_data: Dict[str, Any], ctx: FlowContext[Any]): async def handler(event_data: Dict[str, Any], ctx: FlowContext[Any]) -> None:
"""Zentraler Sync-Handler für Beteiligte""" """
Zentraler Sync-Handler für Beteiligte
Args:
event_data: Event data mit entity_id, action, source
ctx: Motia FlowContext
"""
entity_id = event_data.get('entity_id') entity_id = event_data.get('entity_id')
action = event_data.get('action') action = event_data.get('action')
source = event_data.get('source') source = event_data.get('source')
step_logger = get_step_logger('beteiligte_sync', ctx)
if not entity_id: if not entity_id:
ctx.logger.error("Keine entity_id im Event gefunden") step_logger.error("Keine entity_id im Event gefunden")
return return
ctx.logger.info(f"🔄 Sync-Handler gestartet: {action.upper()} | Entity: {entity_id} | Source: {source}") step_logger.info("=" * 80)
step_logger.info(f"🔄 BETEILIGTE SYNC HANDLER: {action.upper()}")
step_logger.info("=" * 80)
step_logger.info(f"Entity: {entity_id} | Source: {source}")
step_logger.info("=" * 80)
# Shared Redis client for distributed locking # Get shared Redis client (centralized)
redis_host = os.getenv('REDIS_HOST', 'localhost') redis_client = get_redis_client(strict=False)
redis_port = int(os.getenv('REDIS_PORT', '6379'))
redis_db = int(os.getenv('REDIS_DB_ADVOWARE_CACHE', '1'))
redis_client = redis.Redis(
host=redis_host,
port=redis_port,
db=redis_db,
decode_responses=True
)
# APIs initialisieren # APIs initialisieren
espocrm = EspoCRMAPI() espocrm = EspoCRMAPI(ctx)
advoware = AdvowareAPI(ctx) advoware = AdvowareAPI(ctx)
sync_utils = BeteiligteSync(espocrm, redis_client, ctx) sync_utils = BeteiligteSync(espocrm, redis_client, ctx)
mapper = BeteiligteMapper() mapper = BeteiligteMapper()
@@ -164,7 +174,7 @@ async def handler(event_data: Dict[str, Any], ctx: FlowContext[Any]):
ctx.logger.error(traceback.format_exc()) ctx.logger.error(traceback.format_exc())
async def handle_create(entity_id, espo_entity, espocrm, advoware, sync_utils, mapper, ctx): async def handle_create(entity_id, espo_entity, espocrm, advoware, sync_utils, mapper, ctx) -> None:
"""Erstellt neuen Beteiligten in Advoware""" """Erstellt neuen Beteiligten in Advoware"""
try: try:
ctx.logger.info(f"🔨 CREATE in Advoware...") ctx.logger.info(f"🔨 CREATE in Advoware...")
@@ -223,7 +233,7 @@ async def handle_create(entity_id, espo_entity, espocrm, advoware, sync_utils, m
await sync_utils.release_sync_lock(entity_id, 'failed', str(e), increment_retry=True) await sync_utils.release_sync_lock(entity_id, 'failed', str(e), increment_retry=True)
async def handle_update(entity_id, betnr, espo_entity, espocrm, advoware, sync_utils, mapper, ctx): async def handle_update(entity_id, betnr, espo_entity, espocrm, advoware, sync_utils, mapper, ctx) -> None:
"""Synchronisiert existierenden Beteiligten""" """Synchronisiert existierenden Beteiligten"""
try: try:
ctx.logger.info(f"🔍 Fetch von Advoware betNr={betnr}...") ctx.logger.info(f"🔍 Fetch von Advoware betNr={betnr}...")

View File

@@ -10,27 +10,28 @@ Verarbeitet:
""" """
from typing import Dict, Any from typing import Dict, Any
from motia import FlowContext from motia import FlowContext, queue
from services.espocrm import EspoCRMAPI from services.espocrm import EspoCRMAPI
from services.document_sync_utils import DocumentSync from services.document_sync_utils import DocumentSync
from services.xai_service import XAIService
from services.redis_client import get_redis_client
import hashlib
import json import json
import redis
import os
config = { config = {
"name": "VMH Document Sync Handler", "name": "VMH Document Sync Handler",
"description": "Zentraler Sync-Handler für Documents mit xAI Collections", "description": "Zentraler Sync-Handler für Documents mit xAI Collections",
"flows": ["vmh-documents"], "flows": ["vmh-documents"],
"triggers": [ "triggers": [
{"type": "queue", "topic": "vmh.document.create"}, queue("vmh.document.create"),
{"type": "queue", "topic": "vmh.document.update"}, queue("vmh.document.update"),
{"type": "queue", "topic": "vmh.document.delete"} queue("vmh.document.delete")
], ],
"enqueues": [] "enqueues": []
} }
async def handler(event_data: Dict[str, Any], ctx: FlowContext[Any]): async def handler(event_data: Dict[str, Any], ctx: FlowContext[Any]) -> None:
"""Zentraler Sync-Handler für Documents""" """Zentraler Sync-Handler für Documents"""
entity_id = event_data.get('entity_id') entity_id = event_data.get('entity_id')
entity_type = event_data.get('entity_type', 'CDokumente') # Default: CDokumente entity_type = event_data.get('entity_type', 'CDokumente') # Default: CDokumente
@@ -50,25 +51,13 @@ async def handler(event_data: Dict[str, Any], ctx: FlowContext[Any]):
ctx.logger.info(f"Source: {source}") ctx.logger.info(f"Source: {source}")
ctx.logger.info("=" * 80) ctx.logger.info("=" * 80)
# Shared Redis client for distributed locking # Shared Redis client for distributed locking (centralized factory)
redis_host = os.getenv('REDIS_HOST', 'localhost') redis_client = get_redis_client(strict=False)
redis_port = int(os.getenv('REDIS_PORT', '6379'))
redis_db = int(os.getenv('REDIS_DB_ADVOWARE_CACHE', '1'))
redis_client = redis.Redis( # APIs initialisieren (mit Context für besseres Logging)
host=redis_host, espocrm = EspoCRMAPI(ctx)
port=redis_port,
db=redis_db,
decode_responses=True
)
# APIs initialisieren
espocrm = EspoCRMAPI()
sync_utils = DocumentSync(espocrm, redis_client, ctx) sync_utils = DocumentSync(espocrm, redis_client, ctx)
xai_service = XAIService(ctx)
# TODO: xAI Service wird in nächstem Schritt hinzugefügt
# from services.xai_service import XAIService
# xai_service = XAIService(ctx)
try: try:
# 1. ACQUIRE LOCK (verhindert parallele Syncs) # 1. ACQUIRE LOCK (verhindert parallele Syncs)
@@ -98,10 +87,10 @@ async def handler(event_data: Dict[str, Any], ctx: FlowContext[Any]):
# 3. BESTIMME SYNC-AKTION BASIEREND AUF ACTION # 3. BESTIMME SYNC-AKTION BASIEREND AUF ACTION
if action == 'delete': if action == 'delete':
await handle_delete(entity_id, document, sync_utils, ctx, entity_type) await handle_delete(entity_id, document, sync_utils, xai_service, ctx, entity_type)
elif action in ['create', 'update']: elif action in ['create', 'update']:
await handle_create_or_update(entity_id, document, sync_utils, ctx, entity_type) await handle_create_or_update(entity_id, document, sync_utils, xai_service, ctx, entity_type)
else: else:
ctx.logger.warn(f"⚠️ Unbekannte Action: {action}") ctx.logger.warn(f"⚠️ Unbekannte Action: {action}")
@@ -138,7 +127,7 @@ async def handler(event_data: Dict[str, Any], ctx: FlowContext[Any]):
ctx.logger.error(traceback.format_exc()) ctx.logger.error(traceback.format_exc())
async def handle_create_or_update(entity_id: str, document: Dict[str, Any], sync_utils: DocumentSync, ctx: FlowContext[Any], entity_type: str = 'CDokumente'): async def handle_create_or_update(entity_id: str, document: Dict[str, Any], sync_utils: DocumentSync, xai_service: XAIService, ctx: FlowContext[Any], entity_type: str = 'CDokumente') -> None:
""" """
Behandelt Create/Update von Documents Behandelt Create/Update von Documents
@@ -163,6 +152,42 @@ async def handle_create_or_update(entity_id: str, document: Dict[str, Any], sync
if collection_ids: if collection_ids:
ctx.logger.info(f" Collections: {collection_ids}") ctx.logger.info(f" Collections: {collection_ids}")
# ═══════════════════════════════════════════════════════════════
# CHECK: Knowledge Bases mit Status "new" (noch keine Collection)
# ═══════════════════════════════════════════════════════════════
new_knowledge_bases = [cid for cid in collection_ids if cid.startswith('NEW:')]
if new_knowledge_bases:
ctx.logger.info("")
ctx.logger.info("=" * 80)
ctx.logger.info("🆕 DOKUMENT IST MIT KNOWLEDGE BASE(S) VERKNÜPFT (Status: new)")
ctx.logger.info("=" * 80)
for new_kb in new_knowledge_bases:
kb_id = new_kb[4:] # Remove "NEW:" prefix
ctx.logger.info(f"📋 CAIKnowledge {kb_id}")
ctx.logger.info(f" Status: new → Collection muss zuerst erstellt werden")
# Trigger Knowledge Sync
ctx.logger.info(f"📤 Triggering aiknowledge.sync event...")
await ctx.emit('aiknowledge.sync', {
'entity_id': kb_id,
'entity_type': 'CAIKnowledge',
'triggered_by': 'document_sync',
'document_id': entity_id
})
ctx.logger.info(f"✅ Event emitted for {kb_id}")
# Release lock and skip document sync - knowledge sync will handle documents
ctx.logger.info("")
ctx.logger.info("=" * 80)
ctx.logger.info("✅ KNOWLEDGE SYNC GETRIGGERT")
ctx.logger.info(" Document Sync wird übersprungen")
ctx.logger.info(" (Knowledge Sync erstellt Collection und synchronisiert dann Dokumente)")
ctx.logger.info("=" * 80)
await sync_utils.release_sync_lock(entity_id, success=True, entity_type=entity_type)
return
# ═══════════════════════════════════════════════════════════════ # ═══════════════════════════════════════════════════════════════
# PREVIEW-GENERIERUNG bei neuen/geänderten Dateien # PREVIEW-GENERIERUNG bei neuen/geänderten Dateien
# ═══════════════════════════════════════════════════════════════ # ═══════════════════════════════════════════════════════════════
@@ -215,17 +240,23 @@ async def handle_create_or_update(entity_id: str, document: Dict[str, Any], sync
if preview_data: if preview_data:
ctx.logger.info(f"✅ Preview generated: {len(preview_data)} bytes WebP") ctx.logger.info(f"✅ Preview generated: {len(preview_data)} bytes WebP")
# 5. Upload Preview zu EspoCRM # 5. Upload Preview zu EspoCRM und reset file status
ctx.logger.info(f"📤 Uploading preview to EspoCRM...") ctx.logger.info(f"📤 Uploading preview to EspoCRM...")
await sync_utils.update_sync_metadata( await sync_utils.update_sync_metadata(
entity_id, entity_id,
preview_data=preview_data, preview_data=preview_data,
reset_file_status=True, # Reset status nach Preview-Generierung
entity_type=entity_type entity_type=entity_type
# Keine xaiFileId/collections - nur Preview update
) )
ctx.logger.info(f"✅ Preview uploaded successfully") ctx.logger.info(f"✅ Preview uploaded successfully")
else: else:
ctx.logger.warn("⚠️ Preview-Generierung lieferte keine Daten") ctx.logger.warn("⚠️ Preview-Generierung lieferte keine Daten")
# Auch bei fehlgeschlagener Preview-Generierung Status zurücksetzen
await sync_utils.update_sync_metadata(
entity_id,
reset_file_status=True,
entity_type=entity_type
)
finally: finally:
# Cleanup temp file # Cleanup temp file
@@ -251,6 +282,8 @@ async def handle_create_or_update(entity_id: str, document: Dict[str, Any], sync
if not needs_sync: if not needs_sync:
ctx.logger.info("✅ Kein xAI-Sync erforderlich, Lock wird released") ctx.logger.info("✅ Kein xAI-Sync erforderlich, Lock wird released")
# Wenn Preview generiert wurde aber kein xAI sync nötig,
# wurde Status bereits in Preview-Schritt zurückgesetzt
await sync_utils.release_sync_lock(entity_id, success=True, entity_type=entity_type) await sync_utils.release_sync_lock(entity_id, success=True, entity_type=entity_type)
return return
@@ -263,56 +296,53 @@ async def handle_create_or_update(entity_id: str, document: Dict[str, Any], sync
ctx.logger.info("🤖 xAI SYNC STARTEN") ctx.logger.info("🤖 xAI SYNC STARTEN")
ctx.logger.info("=" * 80) ctx.logger.info("=" * 80)
# TODO: Implementierung mit xai_service.py # 1. Hole Download-Informationen (falls nicht schon aus Preview-Schritt vorhanden)
ctx.logger.warn("⚠️ xAI Service noch nicht implementiert!") download_info = await sync_utils.get_document_download_info(entity_id, entity_type)
ctx.logger.info("") if not download_info:
ctx.logger.info("TODO: Folgende Schritte werden implementiert:") raise Exception("Konnte Download-Info nicht ermitteln Datei fehlt?")
ctx.logger.info("1. 📥 Download File von EspoCRM")
ctx.logger.info("2. 📤 Upload zu xAI (falls noch kein xaiFileId)")
ctx.logger.info("3. 📚 Add zu Collections")
ctx.logger.info("4. 💾 Update EspoCRM: xaiFileId + xaiCollections")
ctx.logger.info("")
# PLACEHOLDER Implementation: ctx.logger.info(f"📥 Datei: {download_info['filename']} ({download_info['size']} bytes, {download_info['mime_type']})")
#
# # 1. Download File von EspoCRM
# download_info = await sync_utils.get_document_download_info(entity_id)
# if not download_info:
# raise Exception("Konnte Download-Info nicht ermitteln")
#
# # 2. Upload zu xAI (falls noch nicht vorhanden)
# xai_file_id = document.get('xaiFileId')
# if not xai_file_id:
# file_content = await download_file_from_espocrm(download_info)
# xai_file_id = await xai_service.upload_file(file_content, download_info['filename'])
#
# # 3. Add zu Collections
# for collection_id in collection_ids:
# await xai_service.add_file_to_collection(collection_id, xai_file_id)
#
# # 4. Update EspoCRM
# await sync_utils.release_sync_lock(
# entity_id,
# success=True,
# extra_fields={
# 'xaiFileId': xai_file_id,
# 'xaiCollections': collection_ids
# }
# )
# Für jetzt: Success ohne Sync # 2. Download Datei von EspoCRM
espocrm = sync_utils.espocrm
file_content = await espocrm.download_attachment(download_info['attachment_id'])
ctx.logger.info(f"✅ Downloaded {len(file_content)} bytes")
# 3. MD5-Hash berechnen für Change-Detection
file_hash = hashlib.md5(file_content).hexdigest()
ctx.logger.info(f"🔑 MD5: {file_hash}")
# 4. Upload zu xAI
# Immer neu hochladen wenn needs_sync=True (neues File oder Hash geändert)
ctx.logger.info("📤 Uploading to xAI...")
xai_file_id = await xai_service.upload_file(
file_content,
download_info['filename'],
download_info['mime_type']
)
ctx.logger.info(f"✅ xAI file_id: {xai_file_id}")
# 5. Zu allen Ziel-Collections hinzufügen
ctx.logger.info(f"📚 Füge zu {len(collection_ids)} Collection(s) hinzu...")
added_collections = await xai_service.add_to_collections(collection_ids, xai_file_id)
ctx.logger.info(f"✅ In {len(added_collections)}/{len(collection_ids)} Collections eingetragen")
# 6. EspoCRM Metadaten aktualisieren und Lock freigeben
await sync_utils.update_sync_metadata(
entity_id,
xai_file_id=xai_file_id,
collection_ids=added_collections,
file_hash=file_hash,
entity_type=entity_type
)
await sync_utils.release_sync_lock( await sync_utils.release_sync_lock(
entity_id, entity_id,
success=True, success=True,
extra_fields={ entity_type=entity_type
# TODO: Echte xAI-Daten hier einsetzen
# 'xaiFileId': xai_file_id,
# 'xaiCollections': collection_ids
}
) )
ctx.logger.info("=" * 80) ctx.logger.info("=" * 80)
ctx.logger.info("✅ DOCUMENT SYNC ABGESCHLOSSEN (PLACEHOLDER)") ctx.logger.info("✅ DOCUMENT SYNC ABGESCHLOSSEN")
ctx.logger.info("=" * 80) ctx.logger.info("=" * 80)
except Exception as e: except Exception as e:
@@ -322,7 +352,7 @@ async def handle_create_or_update(entity_id: str, document: Dict[str, Any], sync
await sync_utils.release_sync_lock(entity_id, success=False, error_message=str(e)) await sync_utils.release_sync_lock(entity_id, success=False, error_message=str(e))
async def handle_delete(entity_id: str, document: Dict[str, Any], sync_utils: DocumentSync, ctx: FlowContext[Any], entity_type: str = 'CDokumente'): async def handle_delete(entity_id: str, document: Dict[str, Any], sync_utils: DocumentSync, xai_service: XAIService, ctx: FlowContext[Any], entity_type: str = 'CDokumente') -> None:
""" """
Behandelt Delete von Documents Behandelt Delete von Documents
@@ -346,25 +376,15 @@ async def handle_delete(entity_id: str, document: Dict[str, Any], sync_utils: Do
ctx.logger.info(f" xaiFileId: {xai_file_id}") ctx.logger.info(f" xaiFileId: {xai_file_id}")
ctx.logger.info(f" Collections: {xai_collections}") ctx.logger.info(f" Collections: {xai_collections}")
# TODO: Implementierung mit xai_service ctx.logger.info(f"🗑️ Entferne aus {len(xai_collections)} Collection(s)...")
ctx.logger.warn("⚠️ xAI Delete-Operation noch nicht implementiert!") await xai_service.remove_from_collections(xai_collections, xai_file_id)
ctx.logger.info("") ctx.logger.info(f"✅ File aus {len(xai_collections)} Collection(s) entfernt")
ctx.logger.info("TODO: Folgende Schritte werden implementiert:") ctx.logger.info(" (File selbst bleibt in xAI kann in anderen Collections sein)")
ctx.logger.info("1. 🗑️ Remove File aus allen Collections")
ctx.logger.info("2. ⚠️ File NICHT von xAI löschen (kann in anderen Collections sein)")
ctx.logger.info("")
# PLACEHOLDER Implementation:
#
# for collection_id in xai_collections:
# await xai_service.remove_file_from_collection(collection_id, xai_file_id)
#
# ctx.logger.info(f"✅ File aus {len(xai_collections)} Collection(s) entfernt")
await sync_utils.release_sync_lock(entity_id, success=True, entity_type=entity_type) await sync_utils.release_sync_lock(entity_id, success=True, entity_type=entity_type)
ctx.logger.info("=" * 80) ctx.logger.info("=" * 80)
ctx.logger.info("✅ DELETE ABGESCHLOSSEN (PLACEHOLDER)") ctx.logger.info("✅ DELETE ABGESCHLOSSEN")
ctx.logger.info("=" * 80) ctx.logger.info("=" * 80)
except Exception as e: except Exception as e:

View File

@@ -0,0 +1,91 @@
"""VMH Webhook - AI Knowledge Update"""
from typing import Any
from motia import FlowContext, http, ApiRequest, ApiResponse
config = {
"name": "VMH Webhook AI Knowledge Update",
"description": "Receives update webhooks from EspoCRM for CAIKnowledge entities",
"flows": ["vmh-aiknowledge"],
"triggers": [
http("POST", "/vmh/webhook/aiknowledge/update")
],
"enqueues": ["aiknowledge.sync"],
}
async def handler(request: ApiRequest, ctx: FlowContext[Any]) -> ApiResponse:
"""
Webhook handler for CAIKnowledge updates in EspoCRM.
Triggered when:
- activationStatus changes
- syncStatus changes (e.g., set to 'unclean')
- Documents linked/unlinked
"""
try:
ctx.logger.info("=" * 80)
ctx.logger.info("🔔 AI Knowledge Update Webhook")
ctx.logger.info("=" * 80)
# Extract payload
payload = request.body
# Handle case where payload is a list (e.g., from array-based webhook)
if isinstance(payload, list):
if not payload:
ctx.logger.error("❌ Empty payload list")
return ApiResponse(
status=400,
body={'success': False, 'error': 'Empty payload'}
)
payload = payload[0] # Take first item
# Ensure payload is a dict
if not isinstance(payload, dict):
ctx.logger.error(f"❌ Invalid payload type: {type(payload)}")
return ApiResponse(
status=400,
body={'success': False, 'error': f'Invalid payload type: {type(payload).__name__}'}
)
# Validate required fields
knowledge_id = payload.get('entity_id') or payload.get('id')
entity_type = payload.get('entity_type', 'CAIKnowledge')
action = payload.get('action', 'update')
if not knowledge_id:
ctx.logger.error("❌ Missing entity_id in payload")
return ApiResponse(
status=400,
body={'success': False, 'error': 'Missing entity_id'}
)
ctx.logger.info(f"📋 Entity Type: {entity_type}")
ctx.logger.info(f"📋 Entity ID: {knowledge_id}")
ctx.logger.info(f"📋 Action: {action}")
# Enqueue sync event
await ctx.enqueue({
'topic': 'aiknowledge.sync',
'data': {
'knowledge_id': knowledge_id,
'source': 'webhook',
'action': action
}
})
ctx.logger.info(f"✅ Sync event enqueued for {knowledge_id}")
ctx.logger.info("=" * 80)
return ApiResponse(
status=200,
body={'success': True, 'knowledge_id': knowledge_id}
)
except Exception as e:
ctx.logger.error(f"❌ Webhook error: {e}")
return ApiResponse(
status=500,
body={'success': False, 'error': str(e)}
)

View File

@@ -7,7 +7,7 @@ from motia import FlowContext, http, ApiRequest, ApiResponse
config = { config = {
"name": "VMH Webhook Bankverbindungen Create", "name": "VMH Webhook Bankverbindungen Create",
"description": "Empfängt Create-Webhooks von EspoCRM für Bankverbindungen", "description": "Receives create webhooks from EspoCRM for Bankverbindungen",
"flows": ["vmh-bankverbindungen"], "flows": ["vmh-bankverbindungen"],
"triggers": [ "triggers": [
http("POST", "/vmh/webhook/bankverbindungen/create") http("POST", "/vmh/webhook/bankverbindungen/create")
@@ -23,10 +23,13 @@ async def handler(request: ApiRequest, ctx: FlowContext[Any]) -> ApiResponse:
try: try:
payload = request.body or [] payload = request.body or []
ctx.logger.info("VMH Webhook Bankverbindungen Create empfangen") ctx.logger.info("=" * 80)
ctx.logger.info("📥 VMH WEBHOOK: BANKVERBINDUNGEN CREATE")
ctx.logger.info("=" * 80)
ctx.logger.info(f"Payload: {json.dumps(payload, indent=2, ensure_ascii=False)}") ctx.logger.info(f"Payload: {json.dumps(payload, indent=2, ensure_ascii=False)}")
ctx.logger.info("=" * 80)
# Sammle alle IDs aus dem Batch # Collect all IDs from batch
entity_ids = set() entity_ids = set()
if isinstance(payload, list): if isinstance(payload, list):
@@ -36,7 +39,7 @@ async def handler(request: ApiRequest, ctx: FlowContext[Any]) -> ApiResponse:
elif isinstance(payload, dict) and 'id' in payload: elif isinstance(payload, dict) and 'id' in payload:
entity_ids.add(payload['id']) entity_ids.add(payload['id'])
ctx.logger.info(f"{len(entity_ids)} IDs zum Create-Sync gefunden") ctx.logger.info(f"{len(entity_ids)} IDs found for create sync")
# Emit events # Emit events
for entity_id in entity_ids: for entity_id in entity_ids:
@@ -50,7 +53,8 @@ async def handler(request: ApiRequest, ctx: FlowContext[Any]) -> ApiResponse:
} }
}) })
ctx.logger.info(f"VMH Create Webhook verarbeitet: {len(entity_ids)} Events emittiert") ctx.logger.info("VMH Create Webhook processed: "
f"{len(entity_ids)} events emitted")
return ApiResponse( return ApiResponse(
status=200, status=200,
@@ -62,7 +66,10 @@ async def handler(request: ApiRequest, ctx: FlowContext[Any]) -> ApiResponse:
) )
except Exception as e: except Exception as e:
ctx.logger.error(f"Fehler beim Verarbeiten des VMH Create Webhooks: {e}") ctx.logger.error("=" * 80)
ctx.logger.error("❌ ERROR: BANKVERBINDUNGEN CREATE WEBHOOK")
ctx.logger.error(f"Error: {e}")
ctx.logger.error("=" * 80)
return ApiResponse( return ApiResponse(
status=500, status=500,
body={'error': 'Internal server error', 'details': str(e)} body={'error': 'Internal server error', 'details': str(e)}

View File

@@ -7,7 +7,7 @@ from motia import FlowContext, http, ApiRequest, ApiResponse
config = { config = {
"name": "VMH Webhook Bankverbindungen Delete", "name": "VMH Webhook Bankverbindungen Delete",
"description": "Empfängt Delete-Webhooks von EspoCRM für Bankverbindungen", "description": "Receives delete webhooks from EspoCRM for Bankverbindungen",
"flows": ["vmh-bankverbindungen"], "flows": ["vmh-bankverbindungen"],
"triggers": [ "triggers": [
http("POST", "/vmh/webhook/bankverbindungen/delete") http("POST", "/vmh/webhook/bankverbindungen/delete")
@@ -23,10 +23,13 @@ async def handler(request: ApiRequest, ctx: FlowContext[Any]) -> ApiResponse:
try: try:
payload = request.body or [] payload = request.body or []
ctx.logger.info("VMH Webhook Bankverbindungen Delete empfangen") ctx.logger.info("=" * 80)
ctx.logger.info("📥 VMH WEBHOOK: BANKVERBINDUNGEN DELETE")
ctx.logger.info("=" * 80)
ctx.logger.info(f"Payload: {json.dumps(payload, indent=2, ensure_ascii=False)}") ctx.logger.info(f"Payload: {json.dumps(payload, indent=2, ensure_ascii=False)}")
ctx.logger.info("=" * 80)
# Sammle alle IDs # Collect all IDs
entity_ids = set() entity_ids = set()
if isinstance(payload, list): if isinstance(payload, list):
@@ -36,7 +39,7 @@ async def handler(request: ApiRequest, ctx: FlowContext[Any]) -> ApiResponse:
elif isinstance(payload, dict) and 'id' in payload: elif isinstance(payload, dict) and 'id' in payload:
entity_ids.add(payload['id']) entity_ids.add(payload['id'])
ctx.logger.info(f"{len(entity_ids)} IDs zum Delete-Sync gefunden") ctx.logger.info(f"{len(entity_ids)} IDs found for delete sync")
# Emit events # Emit events
for entity_id in entity_ids: for entity_id in entity_ids:
@@ -50,7 +53,8 @@ async def handler(request: ApiRequest, ctx: FlowContext[Any]) -> ApiResponse:
} }
}) })
ctx.logger.info(f"VMH Delete Webhook verarbeitet: {len(entity_ids)} Events emittiert") ctx.logger.info("VMH Delete Webhook processed: "
f"{len(entity_ids)} events emitted")
return ApiResponse( return ApiResponse(
status=200, status=200,
@@ -62,7 +66,10 @@ async def handler(request: ApiRequest, ctx: FlowContext[Any]) -> ApiResponse:
) )
except Exception as e: except Exception as e:
ctx.logger.error(f"Fehler beim Verarbeiten des VMH Delete Webhooks: {e}") ctx.logger.error("=" * 80)
ctx.logger.error("❌ ERROR: BANKVERBINDUNGEN DELETE WEBHOOK")
ctx.logger.error(f"Error: {e}")
ctx.logger.error("=" * 80)
return ApiResponse( return ApiResponse(
status=500, status=500,
body={'error': 'Internal server error', 'details': str(e)} body={'error': 'Internal server error', 'details': str(e)}

View File

@@ -7,7 +7,7 @@ from motia import FlowContext, http, ApiRequest, ApiResponse
config = { config = {
"name": "VMH Webhook Bankverbindungen Update", "name": "VMH Webhook Bankverbindungen Update",
"description": "Empfängt Update-Webhooks von EspoCRM für Bankverbindungen", "description": "Receives update webhooks from EspoCRM for Bankverbindungen",
"flows": ["vmh-bankverbindungen"], "flows": ["vmh-bankverbindungen"],
"triggers": [ "triggers": [
http("POST", "/vmh/webhook/bankverbindungen/update") http("POST", "/vmh/webhook/bankverbindungen/update")
@@ -23,10 +23,13 @@ async def handler(request: ApiRequest, ctx: FlowContext[Any]) -> ApiResponse:
try: try:
payload = request.body or [] payload = request.body or []
ctx.logger.info("VMH Webhook Bankverbindungen Update empfangen") ctx.logger.info("=" * 80)
ctx.logger.info("📥 VMH WEBHOOK: BANKVERBINDUNGEN UPDATE")
ctx.logger.info("=" * 80)
ctx.logger.info(f"Payload: {json.dumps(payload, indent=2, ensure_ascii=False)}") ctx.logger.info(f"Payload: {json.dumps(payload, indent=2, ensure_ascii=False)}")
ctx.logger.info("=" * 80)
# Sammle alle IDs # Collect all IDs
entity_ids = set() entity_ids = set()
if isinstance(payload, list): if isinstance(payload, list):
@@ -36,7 +39,7 @@ async def handler(request: ApiRequest, ctx: FlowContext[Any]) -> ApiResponse:
elif isinstance(payload, dict) and 'id' in payload: elif isinstance(payload, dict) and 'id' in payload:
entity_ids.add(payload['id']) entity_ids.add(payload['id'])
ctx.logger.info(f"{len(entity_ids)} IDs zum Update-Sync gefunden") ctx.logger.info(f"{len(entity_ids)} IDs found for update sync")
# Emit events # Emit events
for entity_id in entity_ids: for entity_id in entity_ids:
@@ -50,7 +53,8 @@ async def handler(request: ApiRequest, ctx: FlowContext[Any]) -> ApiResponse:
} }
}) })
ctx.logger.info(f"VMH Update Webhook verarbeitet: {len(entity_ids)} Events emittiert") ctx.logger.info("VMH Update Webhook processed: "
f"{len(entity_ids)} events emitted")
return ApiResponse( return ApiResponse(
status=200, status=200,
@@ -62,7 +66,10 @@ async def handler(request: ApiRequest, ctx: FlowContext[Any]) -> ApiResponse:
) )
except Exception as e: except Exception as e:
ctx.logger.error(f"Fehler beim Verarbeiten des VMH Update Webhooks: {e}") ctx.logger.error("=" * 80)
ctx.logger.error("❌ ERROR: BANKVERBINDUNGEN UPDATE WEBHOOK")
ctx.logger.error(f"Error: {e}")
ctx.logger.error("=" * 80)
return ApiResponse( return ApiResponse(
status=500, status=500,
body={'error': 'Internal server error', 'details': str(e)} body={'error': 'Internal server error', 'details': str(e)}

View File

@@ -7,7 +7,7 @@ from motia import FlowContext, http, ApiRequest, ApiResponse
config = { config = {
"name": "VMH Webhook Beteiligte Create", "name": "VMH Webhook Beteiligte Create",
"description": "Empfängt Create-Webhooks von EspoCRM für Beteiligte", "description": "Receives create webhooks from EspoCRM for Beteiligte",
"flows": ["vmh-beteiligte"], "flows": ["vmh-beteiligte"],
"triggers": [ "triggers": [
http("POST", "/vmh/webhook/beteiligte/create") http("POST", "/vmh/webhook/beteiligte/create")
@@ -26,10 +26,13 @@ async def handler(request: ApiRequest, ctx: FlowContext[Any]) -> ApiResponse:
try: try:
payload = request.body or [] payload = request.body or []
ctx.logger.info("VMH Webhook Beteiligte Create empfangen") ctx.logger.info("=" * 80)
ctx.logger.info("📥 VMH WEBHOOK: BETEILIGTE CREATE")
ctx.logger.info("=" * 80)
ctx.logger.info(f"Payload: {json.dumps(payload, indent=2, ensure_ascii=False)}") ctx.logger.info(f"Payload: {json.dumps(payload, indent=2, ensure_ascii=False)}")
ctx.logger.info("=" * 80)
# Sammle alle IDs aus dem Batch # Collect all IDs from batch
entity_ids = set() entity_ids = set()
if isinstance(payload, list): if isinstance(payload, list):
@@ -39,9 +42,9 @@ async def handler(request: ApiRequest, ctx: FlowContext[Any]) -> ApiResponse:
elif isinstance(payload, dict) and 'id' in payload: elif isinstance(payload, dict) and 'id' in payload:
entity_ids.add(payload['id']) entity_ids.add(payload['id'])
ctx.logger.info(f"{len(entity_ids)} IDs zum Create-Sync gefunden") ctx.logger.info(f"{len(entity_ids)} IDs found for create sync")
# Emit events für Queue-Processing (Deduplizierung erfolgt im Event-Handler via Lock) # Emit events for queue processing (deduplication via lock in event handler)
for entity_id in entity_ids: for entity_id in entity_ids:
await ctx.enqueue({ await ctx.enqueue({
'topic': 'vmh.beteiligte.create', 'topic': 'vmh.beteiligte.create',
@@ -53,7 +56,8 @@ async def handler(request: ApiRequest, ctx: FlowContext[Any]) -> ApiResponse:
} }
}) })
ctx.logger.info(f"VMH Create Webhook verarbeitet: {len(entity_ids)} Events emittiert") ctx.logger.info("VMH Create Webhook processed: "
f"{len(entity_ids)} events emitted")
return ApiResponse( return ApiResponse(
status=200, status=200,
@@ -65,7 +69,14 @@ async def handler(request: ApiRequest, ctx: FlowContext[Any]) -> ApiResponse:
) )
except Exception as e: except Exception as e:
ctx.logger.error(f"Fehler beim Verarbeiten des VMH Create Webhooks: {e}") ctx.logger.error("=" * 80)
ctx.logger.error("❌ ERROR: VMH CREATE WEBHOOK")
ctx.logger.error("=" * 80)
ctx.logger.error(f"Error: {e}")
ctx.logger.error(f"Entity IDs attempted: {list(entity_ids) if 'entity_ids' in locals() else 'N/A'}")
ctx.logger.error(f"Full Payload: {json.dumps(request.body, indent=2, ensure_ascii=False)}")
ctx.logger.error(f"Timestamp: {datetime.datetime.now().isoformat()}")
ctx.logger.error("=" * 80)
return ApiResponse( return ApiResponse(
status=500, status=500,
body={ body={

View File

@@ -7,7 +7,7 @@ from motia import FlowContext, http, ApiRequest, ApiResponse
config = { config = {
"name": "VMH Webhook Beteiligte Delete", "name": "VMH Webhook Beteiligte Delete",
"description": "Empfängt Delete-Webhooks von EspoCRM für Beteiligte", "description": "Receives delete webhooks from EspoCRM for Beteiligte",
"flows": ["vmh-beteiligte"], "flows": ["vmh-beteiligte"],
"triggers": [ "triggers": [
http("POST", "/vmh/webhook/beteiligte/delete") http("POST", "/vmh/webhook/beteiligte/delete")
@@ -23,10 +23,13 @@ async def handler(request: ApiRequest, ctx: FlowContext[Any]) -> ApiResponse:
try: try:
payload = request.body or [] payload = request.body or []
ctx.logger.info("VMH Webhook Beteiligte Delete empfangen") ctx.logger.info("=" * 80)
ctx.logger.info("📥 VMH WEBHOOK: BETEILIGTE DELETE")
ctx.logger.info("=" * 80)
ctx.logger.info(f"Payload: {json.dumps(payload, indent=2, ensure_ascii=False)}") ctx.logger.info(f"Payload: {json.dumps(payload, indent=2, ensure_ascii=False)}")
ctx.logger.info("=" * 80)
# Sammle alle IDs aus dem Batch # Collect all IDs from batch
entity_ids = set() entity_ids = set()
if isinstance(payload, list): if isinstance(payload, list):
@@ -36,9 +39,9 @@ async def handler(request: ApiRequest, ctx: FlowContext[Any]) -> ApiResponse:
elif isinstance(payload, dict) and 'id' in payload: elif isinstance(payload, dict) and 'id' in payload:
entity_ids.add(payload['id']) entity_ids.add(payload['id'])
ctx.logger.info(f"{len(entity_ids)} IDs zum Delete-Sync gefunden") ctx.logger.info(f"{len(entity_ids)} IDs found for delete sync")
# Emit events für Queue-Processing # Emit events for queue processing
for entity_id in entity_ids: for entity_id in entity_ids:
await ctx.enqueue({ await ctx.enqueue({
'topic': 'vmh.beteiligte.delete', 'topic': 'vmh.beteiligte.delete',
@@ -50,7 +53,8 @@ async def handler(request: ApiRequest, ctx: FlowContext[Any]) -> ApiResponse:
} }
}) })
ctx.logger.info(f"VMH Delete Webhook verarbeitet: {len(entity_ids)} Events emittiert") ctx.logger.info("VMH Delete Webhook processed: "
f"{len(entity_ids)} events emitted")
return ApiResponse( return ApiResponse(
status=200, status=200,
@@ -62,7 +66,10 @@ async def handler(request: ApiRequest, ctx: FlowContext[Any]) -> ApiResponse:
) )
except Exception as e: except Exception as e:
ctx.logger.error(f"Fehler beim Delete-Webhook: {e}") ctx.logger.error("=" * 80)
ctx.logger.error("❌ ERROR: BETEILIGTE DELETE WEBHOOK")
ctx.logger.error(f"Error: {e}")
ctx.logger.error("=" * 80)
return ApiResponse( return ApiResponse(
status=500, status=500,
body={'error': 'Internal server error', 'details': str(e)} body={'error': 'Internal server error', 'details': str(e)}

View File

@@ -7,7 +7,7 @@ from motia import FlowContext, http, ApiRequest, ApiResponse
config = { config = {
"name": "VMH Webhook Beteiligte Update", "name": "VMH Webhook Beteiligte Update",
"description": "Empfängt Update-Webhooks von EspoCRM für Beteiligte", "description": "Receives update webhooks from EspoCRM for Beteiligte",
"flows": ["vmh-beteiligte"], "flows": ["vmh-beteiligte"],
"triggers": [ "triggers": [
http("POST", "/vmh/webhook/beteiligte/update") http("POST", "/vmh/webhook/beteiligte/update")
@@ -20,16 +20,19 @@ async def handler(request: ApiRequest, ctx: FlowContext[Any]) -> ApiResponse:
""" """
Webhook handler for Beteiligte updates in EspoCRM. Webhook handler for Beteiligte updates in EspoCRM.
Note: Loop-Prevention ist auf EspoCRM-Seite implementiert. Note: Loop prevention is implemented on EspoCRM side.
rowId-Updates triggern keine Webhooks mehr, daher keine Filterung nötig. rowId updates no longer trigger webhooks, so no filtering needed.
""" """
try: try:
payload = request.body or [] payload = request.body or []
ctx.logger.info("VMH Webhook Beteiligte Update empfangen") ctx.logger.info("=" * 80)
ctx.logger.info("📥 VMH WEBHOOK: BETEILIGTE UPDATE")
ctx.logger.info("=" * 80)
ctx.logger.info(f"Payload: {json.dumps(payload, indent=2, ensure_ascii=False)}") ctx.logger.info(f"Payload: {json.dumps(payload, indent=2, ensure_ascii=False)}")
ctx.logger.info("=" * 80)
# Sammle alle IDs aus dem Batch # Collect all IDs from batch
entity_ids = set() entity_ids = set()
if isinstance(payload, list): if isinstance(payload, list):
@@ -39,9 +42,9 @@ async def handler(request: ApiRequest, ctx: FlowContext[Any]) -> ApiResponse:
elif isinstance(payload, dict) and 'id' in payload: elif isinstance(payload, dict) and 'id' in payload:
entity_ids.add(payload['id']) entity_ids.add(payload['id'])
ctx.logger.info(f"{len(entity_ids)} IDs zum Update-Sync gefunden") ctx.logger.info(f"{len(entity_ids)} IDs found for update sync")
# Emit events für Queue-Processing # Emit events for queue processing
for entity_id in entity_ids: for entity_id in entity_ids:
await ctx.enqueue({ await ctx.enqueue({
'topic': 'vmh.beteiligte.update', 'topic': 'vmh.beteiligte.update',
@@ -53,7 +56,8 @@ async def handler(request: ApiRequest, ctx: FlowContext[Any]) -> ApiResponse:
} }
}) })
ctx.logger.info(f"VMH Update Webhook verarbeitet: {len(entity_ids)} Events emittiert") ctx.logger.info("VMH Update Webhook processed: "
f"{len(entity_ids)} events emitted")
return ApiResponse( return ApiResponse(
status=200, status=200,
@@ -65,7 +69,14 @@ async def handler(request: ApiRequest, ctx: FlowContext[Any]) -> ApiResponse:
) )
except Exception as e: except Exception as e:
ctx.logger.error(f"Fehler beim Verarbeiten des VMH Update Webhooks: {e}") ctx.logger.error("=" * 80)
ctx.logger.error("❌ ERROR: VMH UPDATE WEBHOOK")
ctx.logger.error("=" * 80)
ctx.logger.error(f"Error: {e}")
ctx.logger.error(f"Entity IDs attempted: {list(entity_ids) if 'entity_ids' in locals() else 'N/A'}")
ctx.logger.error(f"Full Payload: {json.dumps(request.body, indent=2, ensure_ascii=False)}")
ctx.logger.error(f"Timestamp: {datetime.datetime.now().isoformat()}")
ctx.logger.error("=" * 80)
return ApiResponse( return ApiResponse(
status=500, status=500,
body={ body={

View File

@@ -1,5 +1,6 @@
"""VMH Webhook - Document Create""" """VMH Webhook - Document Create"""
import json import json
import datetime
from typing import Any from typing import Any
from motia import FlowContext, http, ApiRequest, ApiResponse from motia import FlowContext, http, ApiRequest, ApiResponse
@@ -25,51 +26,64 @@ async def handler(request: ApiRequest, ctx: FlowContext[Any]) -> ApiResponse:
try: try:
payload = request.body or [] payload = request.body or []
ctx.logger.info("VMH Webhook Document Create empfangen") ctx.logger.info("=" * 80)
ctx.logger.info("📥 VMH WEBHOOK: DOCUMENT CREATE")
ctx.logger.info("=" * 80)
ctx.logger.debug(f"Payload: {json.dumps(payload, indent=2, ensure_ascii=False)}") ctx.logger.debug(f"Payload: {json.dumps(payload, indent=2, ensure_ascii=False)}")
# Sammle alle IDs aus dem Batch # Collect all IDs from batch
entity_ids = set() entity_ids = set()
entity_type = 'CDokumente' # Default
if isinstance(payload, list): if isinstance(payload, list):
for entity in payload: for entity in payload:
if isinstance(entity, dict) and 'id' in entity: if isinstance(entity, dict) and 'id' in entity:
entity_ids.add(entity['id']) entity_ids.add(entity['id'])
# Extrahiere entityType falls vorhanden # Take entityType from first entity if present
entity_type = entity.get('entityType', 'CDokumente') if entity_type == 'CDokumente':
entity_type = entity.get('entityType', 'CDokumente')
elif isinstance(payload, dict) and 'id' in payload: elif isinstance(payload, dict) and 'id' in payload:
entity_ids.add(payload['id']) entity_ids.add(payload['id'])
entity_type = payload.get('entityType', 'CDokumente') entity_type = payload.get('entityType', 'CDokumente')
ctx.logger.info(f"{len(entity_ids)} Document IDs zum Create-Sync gefunden") ctx.logger.info(f"{len(entity_ids)} document IDs found for create sync")
# Emit events für Queue-Processing (Deduplizierung erfolgt im Event-Handler via Lock) # Emit events for queue processing (deduplication via lock in event handler)
for entity_id in entity_ids: for entity_id in entity_ids:
await ctx.enqueue({ await ctx.enqueue({
'topic': 'vmh.document.create', 'topic': 'vmh.document.create',
'data': { 'data': {
'entity_id': entity_id, 'entity_id': entity_id,
'entity_type': entity_type if 'entity_type' in locals() else 'CDokumente', 'entity_type': entity_type,
'action': 'create', 'action': 'create',
'timestamp': payload[0].get('modifiedAt') if isinstance(payload, list) and payload else None 'timestamp': payload[0].get('modifiedAt') if isinstance(payload, list) and payload else None
} }
}) })
ctx.logger.info("✅ Document Create Webhook processed: "
f"{len(entity_ids)} events emitted")
return ApiResponse( return ApiResponse(
status_code=200, status=200,
body={ body={
'success': True, 'success': True,
'message': f'{len(entity_ids)} Document(s) zum Sync enqueued', 'message': f'{len(entity_ids)} document(s) enqueued for sync',
'entity_ids': list(entity_ids) 'entity_ids': list(entity_ids)
} }
) )
except Exception as e: except Exception as e:
ctx.logger.error(f"Fehler im Document Create Webhook: {e}") ctx.logger.error("=" * 80)
ctx.logger.error(f"Payload: {request.body}") ctx.logger.error("❌ ERROR: DOCUMENT CREATE WEBHOOK")
ctx.logger.error("=" * 80)
ctx.logger.error(f"Error: {e}")
ctx.logger.error(f"Entity IDs attempted: {list(entity_ids) if 'entity_ids' in locals() else 'N/A'}")
ctx.logger.error(f"Full Payload: {json.dumps(request.body, indent=2, ensure_ascii=False)}")
ctx.logger.error(f"Timestamp: {datetime.datetime.now().isoformat()}")
ctx.logger.error("=" * 80)
return ApiResponse( return ApiResponse(
status_code=500, status=500,
body={ body={
'success': False, 'success': False,
'error': str(e) 'error': str(e)

View File

@@ -1,5 +1,6 @@
"""VMH Webhook - Document Delete""" """VMH Webhook - Document Delete"""
import json import json
import datetime
from typing import Any from typing import Any
from motia import FlowContext, http, ApiRequest, ApiResponse from motia import FlowContext, http, ApiRequest, ApiResponse
@@ -25,50 +26,64 @@ async def handler(request: ApiRequest, ctx: FlowContext[Any]) -> ApiResponse:
try: try:
payload = request.body or [] payload = request.body or []
ctx.logger.info("VMH Webhook Document Delete empfangen") ctx.logger.info("=" * 80)
ctx.logger.info("📥 VMH WEBHOOK: DOCUMENT DELETE")
ctx.logger.info("=" * 80)
ctx.logger.debug(f"Payload: {json.dumps(payload, indent=2, ensure_ascii=False)}") ctx.logger.debug(f"Payload: {json.dumps(payload, indent=2, ensure_ascii=False)}")
# Sammle alle IDs aus dem Batch # Collect all IDs from batch
entity_ids = set() entity_ids = set()
entity_type = 'CDokumente' # Default
if isinstance(payload, list): if isinstance(payload, list):
for entity in payload: for entity in payload:
if isinstance(entity, dict) and 'id' in entity: if isinstance(entity, dict) and 'id' in entity:
entity_ids.add(entity['id']) entity_ids.add(entity['id'])
entity_type = entity.get('entityType', 'CDokumente') # Take entityType from first entity if present
if entity_type == 'CDokumente':
entity_type = entity.get('entityType', 'CDokumente')
elif isinstance(payload, dict) and 'id' in payload: elif isinstance(payload, dict) and 'id' in payload:
entity_ids.add(payload['id']) entity_ids.add(payload['id'])
entity_type = payload.get('entityType', 'CDokumente') entity_type = payload.get('entityType', 'CDokumente')
ctx.logger.info(f"{len(entity_ids)} Document IDs zum Delete-Sync gefunden") ctx.logger.info(f"{len(entity_ids)} document IDs found for delete sync")
# Emit events für Queue-Processing # Emit events for queue processing
for entity_id in entity_ids: for entity_id in entity_ids:
await ctx.enqueue({ await ctx.enqueue({
'topic': 'vmh.document.delete', 'topic': 'vmh.document.delete',
'data': { 'data': {
'entity_id': entity_id, 'entity_id': entity_id,
'entity_type': entity_type if 'entity_type' in locals() else 'CDokumente', 'entity_type': entity_type,
'action': 'delete', 'action': 'delete',
'timestamp': payload[0].get('deletedAt') if isinstance(payload, list) and payload else None 'timestamp': payload[0].get('deletedAt') if isinstance(payload, list) and payload else None
} }
}) })
ctx.logger.info("✅ Document Delete Webhook processed: "
f"{len(entity_ids)} events emitted")
return ApiResponse( return ApiResponse(
status_code=200, status=200,
body={ body={
'success': True, 'success': True,
'message': f'{len(entity_ids)} Document(s) zum Delete enqueued', 'message': f'{len(entity_ids)} document(s) enqueued for deletion',
'entity_ids': list(entity_ids) 'entity_ids': list(entity_ids)
} }
) )
except Exception as e: except Exception as e:
ctx.logger.error(f"Fehler im Document Delete Webhook: {e}") ctx.logger.error("=" * 80)
ctx.logger.error(f"Payload: {request.body}") ctx.logger.error("❌ ERROR: DOCUMENT DELETE WEBHOOK")
ctx.logger.error("=" * 80)
ctx.logger.error(f"Error: {e}")
ctx.logger.error(f"Entity IDs attempted: {list(entity_ids) if 'entity_ids' in locals() else 'N/A'}")
ctx.logger.error(f"Full Payload: {json.dumps(request.body, indent=2, ensure_ascii=False)}")
ctx.logger.error(f"Timestamp: {datetime.datetime.now().isoformat()}")
ctx.logger.error("=" * 80)
return ApiResponse( return ApiResponse(
status_code=500, status=500,
body={ body={
'success': False, 'success': False,
'error': str(e) 'error': str(e)

View File

@@ -1,5 +1,6 @@
"""VMH Webhook - Document Update""" """VMH Webhook - Document Update"""
import json import json
import datetime
from typing import Any from typing import Any
from motia import FlowContext, http, ApiRequest, ApiResponse from motia import FlowContext, http, ApiRequest, ApiResponse
@@ -25,50 +26,64 @@ async def handler(request: ApiRequest, ctx: FlowContext[Any]) -> ApiResponse:
try: try:
payload = request.body or [] payload = request.body or []
ctx.logger.info("VMH Webhook Document Update empfangen") ctx.logger.info("=" * 80)
ctx.logger.info("📥 VMH WEBHOOK: DOCUMENT UPDATE")
ctx.logger.info("=" * 80)
ctx.logger.debug(f"Payload: {json.dumps(payload, indent=2, ensure_ascii=False)}") ctx.logger.debug(f"Payload: {json.dumps(payload, indent=2, ensure_ascii=False)}")
# Sammle alle IDs aus dem Batch # Collect all IDs from batch
entity_ids = set() entity_ids = set()
entity_type = 'CDokumente' # Default
if isinstance(payload, list): if isinstance(payload, list):
for entity in payload: for entity in payload:
if isinstance(entity, dict) and 'id' in entity: if isinstance(entity, dict) and 'id' in entity:
entity_ids.add(entity['id']) entity_ids.add(entity['id'])
entity_type = entity.get('entityType', 'CDokumente') # Take entityType from first entity if present
if entity_type == 'CDokumente':
entity_type = entity.get('entityType', 'CDokumente')
elif isinstance(payload, dict) and 'id' in payload: elif isinstance(payload, dict) and 'id' in payload:
entity_ids.add(payload['id']) entity_ids.add(payload['id'])
entity_type = payload.get('entityType', 'CDokumente') entity_type = payload.get('entityType', 'CDokumente')
ctx.logger.info(f"{len(entity_ids)} Document IDs zum Update-Sync gefunden") ctx.logger.info(f"{len(entity_ids)} document IDs found for update sync")
# Emit events für Queue-Processing # Emit events for queue processing
for entity_id in entity_ids: for entity_id in entity_ids:
await ctx.enqueue({ await ctx.enqueue({
'topic': 'vmh.document.update', 'topic': 'vmh.document.update',
'data': { 'data': {
'entity_id': entity_id, 'entity_id': entity_id,
'entity_type': entity_type if 'entity_type' in locals() else 'CDokumente', 'entity_type': entity_type,
'action': 'update', 'action': 'update',
'timestamp': payload[0].get('modifiedAt') if isinstance(payload, list) and payload else None 'timestamp': payload[0].get('modifiedAt') if isinstance(payload, list) and payload else None
} }
}) })
ctx.logger.info("✅ Document Update Webhook processed: "
f"{len(entity_ids)} events emitted")
return ApiResponse( return ApiResponse(
status_code=200, status=200,
body={ body={
'success': True, 'success': True,
'message': f'{len(entity_ids)} Document(s) zum Sync enqueued', 'message': f'{len(entity_ids)} document(s) enqueued for sync',
'entity_ids': list(entity_ids) 'entity_ids': list(entity_ids)
} }
) )
except Exception as e: except Exception as e:
ctx.logger.error(f"Fehler im Document Update Webhook: {e}") ctx.logger.error("=" * 80)
ctx.logger.error(f"Payload: {request.body}") ctx.logger.error("❌ ERROR: DOCUMENT UPDATE WEBHOOK")
ctx.logger.error("=" * 80)
ctx.logger.error(f"Error: {e}")
ctx.logger.error(f"Entity IDs attempted: {list(entity_ids) if 'entity_ids' in locals() else 'N/A'}")
ctx.logger.error(f"Full Payload: {json.dumps(request.body, indent=2, ensure_ascii=False)}")
ctx.logger.error(f"Timestamp: {datetime.datetime.now().isoformat()}")
ctx.logger.error("=" * 80)
return ApiResponse( return ApiResponse(
status_code=500, status=500,
body={ body={
'success': False, 'success': False,
'error': str(e) 'error': str(e)

1033
uv.lock generated

File diff suppressed because it is too large Load Diff