Compare commits
265 Commits
cf2cabd098
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
51e75187ed | ||
|
|
e37fd3bbe4 | ||
|
|
11fa490599 | ||
|
|
27ef21a4f0 | ||
|
|
b3643ddee9 | ||
|
|
68b7660ce3 | ||
|
|
2d61911d98 | ||
|
|
9f642901ab | ||
|
|
add7400b78 | ||
|
|
65cc5200ea | ||
|
|
ede93a7774 | ||
|
|
bc020e9f64 | ||
|
|
bad4659d5b | ||
|
|
e3b33ef596 | ||
|
|
39255f2c9e | ||
|
|
030991cb9a | ||
|
|
fa9b554f50 | ||
|
|
788714ecec | ||
|
|
08ca17c876 | ||
|
|
c157e9cbca | ||
|
|
9005a05bd7 | ||
|
|
98081ae5eb | ||
|
|
c99e35438c | ||
|
|
1241a14ea5 | ||
|
|
0712d18824 | ||
|
|
71040dcd33 | ||
|
|
0923d9b051 | ||
|
|
909301a4de | ||
|
|
d548ce4199 | ||
|
|
0188a46afb | ||
|
|
d6be61cdcf | ||
|
|
6e6525a416 | ||
|
|
6a6b3e8cee | ||
|
|
09ac22f692 | ||
|
|
5a476ac97d | ||
|
|
4f2a963834 | ||
|
|
aa7bd79c51 | ||
|
|
7701a34d7f | ||
|
|
d35e3f4705 | ||
|
|
5d71a371d6 | ||
|
|
f75aef2a4a | ||
|
|
5264528940 | ||
|
|
084183f3a4 | ||
|
|
e05d3e1554 | ||
|
|
06f868abeb | ||
|
|
aed428312f | ||
|
|
32851ca9fb | ||
|
|
cbee0b534f | ||
|
|
8f44d907a5 | ||
|
|
24ce8ccd20 | ||
|
|
786993d8ca | ||
|
|
2b9788bdb0 | ||
|
|
91b5ce990f | ||
|
|
936b4ccc51 | ||
|
|
9e3f15ce4e | ||
|
|
7523f47468 | ||
|
|
6de8b33dd1 | ||
|
|
79c01c85fa | ||
|
|
735cab2018 | ||
|
|
b4e8b74afb | ||
|
|
4b06933576 | ||
|
|
89a6b90ca6 | ||
|
|
f9b9cf0383 | ||
|
|
2de4d03d81 | ||
|
|
d2c2fd92cc | ||
|
|
032df7f401 | ||
|
|
474f09ce88 | ||
|
|
e920dd1b3f | ||
|
|
5ddf8bbc3c | ||
|
|
14cde7b3ee | ||
|
|
581162cdb8 | ||
|
|
dc27fc5500 | ||
|
|
51649c874b | ||
|
|
4d7836540a | ||
|
|
3419e18d7f | ||
|
|
a9b71b9d23 | ||
|
|
e8a18c0025 | ||
|
|
3e9a988aaf | ||
|
|
01f05e4399 | ||
|
|
7c17e484c1 | ||
|
|
ea39418738 | ||
|
|
7f88ed0ed2 | ||
|
|
44659a9dd7 | ||
|
|
87d7da0198 | ||
|
|
9675c1f896 | ||
|
|
9736476a0c | ||
|
|
03d420c984 | ||
|
|
6b52719079 | ||
|
|
a5b7d62969 | ||
|
|
ef9e3699b2 | ||
|
|
440367b69d | ||
|
|
801a5a43f5 | ||
|
|
9c23068a4f | ||
|
|
d359b7b734 | ||
|
|
bd37ff807e | ||
|
|
40d2342086 | ||
|
|
adf3bf8301 | ||
|
|
1b5ccd4dec | ||
|
|
b5d8f9aed3 | ||
|
|
c8171b0a1e | ||
|
|
7e15ef3725 | ||
|
|
e3a3802f5b | ||
|
|
93e319e9fb | ||
|
|
6626d2a8f9 | ||
|
|
3dbc470158 | ||
|
|
e5d0386cfb | ||
|
|
ff071af2a0 | ||
|
|
fcdcbc51e3 | ||
|
|
7b8f8d4b5a | ||
|
|
f385c612f5 | ||
|
|
9166d9dade | ||
|
|
7ae5bc0fd5 | ||
|
|
242ed1101e | ||
|
|
8b2e9ac328 | ||
|
|
084d09e9bd | ||
|
|
646143ce5a | ||
|
|
00d802f965 | ||
|
|
ebb7575f2c | ||
|
|
d0539d0f2f | ||
|
|
8e92a93aa8 | ||
|
|
f794347827 | ||
|
|
1af160eed0 | ||
|
|
eb118ebf92 | ||
|
|
dbb476cc3b | ||
|
|
9345efc3f0 | ||
|
|
c4e993e3f8 | ||
|
|
a58d1aa403 | ||
|
|
d7ed5ce8c5 | ||
|
|
512088ab93 | ||
|
|
32b5e0223d | ||
|
|
9354cbf775 | ||
|
|
756d068b4f | ||
|
|
c02a7bd8a6 | ||
|
|
b6d3fad6ab | ||
|
|
27479ee553 | ||
|
|
82a5d62f44 | ||
|
|
bc23c6815a | ||
|
|
7dd2dc89a9 | ||
|
|
57462899f6 | ||
|
|
f23b872c54 | ||
|
|
55f7195edd | ||
|
|
b14be8583d | ||
|
|
67ad7c236b | ||
|
|
f89ce46631 | ||
|
|
fc71117bf2 | ||
|
|
ea752088f6 | ||
|
|
edadf39445 | ||
| 1c3cec2c06 | |||
|
|
746daaef6d | ||
|
|
441d5740bd | ||
|
|
ee5241a7bc | ||
|
|
e3ab428b91 | ||
| c7ab569b2b | |||
| 645973141c | |||
|
|
68692ade4e | ||
|
|
49908d72d0 | ||
|
|
1b5c2a156c | ||
|
|
159d07efd5 | ||
|
|
06431be40d | ||
|
|
9f3e5bbf9f | ||
|
|
a66b76001b | ||
|
|
3188054462 | ||
|
|
5fd65e8a38 | ||
|
|
34d2529e04 | ||
|
|
928556aa89 | ||
|
|
720493f26b | ||
|
|
ab13254636 | ||
|
|
104a506b6f | ||
|
|
92290b9035 | ||
|
|
b5d855d117 | ||
|
|
1bd57da627 | ||
|
|
f9c03c30d9 | ||
|
|
f2b225106d | ||
|
|
29d3ec60d0 | ||
|
|
bbf038d228 | ||
|
|
c967d80aed | ||
|
|
11c0c1df38 | ||
|
|
f849fd729a | ||
|
|
85949dbf8e | ||
|
|
6fba87fdd9 | ||
|
|
c7236ef7e8 | ||
|
|
307af5c901 | ||
|
|
625906f75a | ||
|
|
129072e0f0 | ||
|
|
dbc4e59e24 | ||
|
|
cf476ea986 | ||
|
|
c989af42f5 | ||
|
|
d3247ef090 | ||
|
|
90c7f9d8ec | ||
|
|
c43d39fd7f | ||
|
|
8aca75118c | ||
|
|
6bf2692faa | ||
|
|
2d85ef310a | ||
|
|
774a0ba6db | ||
|
|
566a8bf84e | ||
|
|
3567845235 | ||
|
|
c4d8da6d0d | ||
|
|
fa8010cf91 | ||
|
|
16de384831 | ||
|
|
a01e6cb88e | ||
|
|
a58cd16f01 | ||
|
|
f514667ef9 | ||
|
|
9e712465af | ||
|
|
bf22d436fb | ||
|
|
f689b892de | ||
|
|
2f2338c973 | ||
|
|
10eb0ce5f9 | ||
|
|
32616504a6 | ||
|
|
4bce3724f2 | ||
|
|
322e2d9cb3 | ||
|
|
c1a8b9d936 | ||
|
|
c374600833 | ||
|
|
87b00a94c0 | ||
|
|
978f0297eb | ||
|
|
959986356b | ||
|
|
f126b40574 | ||
|
|
fa4027d027 | ||
|
|
9da9b323fc | ||
|
|
eb263ce7a4 | ||
|
|
aece5f7414 | ||
|
|
ddabda6f05 | ||
|
|
bcbceba31c | ||
|
|
3a2567b44d | ||
|
|
df0a9d6cf0 | ||
|
|
38363b2837 | ||
|
|
96f94475f6 | ||
|
|
3fd3336f6c | ||
|
|
eaba087d11 | ||
|
|
ed2cc234b8 | ||
|
|
ffd3fd1d7c | ||
|
|
23694b6555 | ||
|
|
8979aa8e43 | ||
|
|
c433bc021e | ||
|
|
f4ed1eb10c | ||
|
|
9c8663a0f1 | ||
|
|
d1632fca17 | ||
| fcf8aa8652 | |||
|
|
65177d3ff7 | ||
|
|
559d6a351c | ||
|
|
8fd11998e4 | ||
|
|
4ce649aa71 | ||
|
|
5ee3cc0104 | ||
|
|
b36712247b | ||
|
|
86b11c7e5f | ||
|
|
8003dcac39 | ||
|
|
778c44226e | ||
|
|
79891063dd | ||
|
|
2c9b0dc448 | ||
|
|
3133615044 | ||
|
|
2bc0f87325 | ||
|
|
4ee38d6f0b | ||
|
|
992d4f2a6b | ||
|
|
8f5f9641c7 | ||
|
|
7cdb53051f | ||
|
|
d834753a98 | ||
|
|
395011d0f4 | ||
|
|
9e1660f954 | ||
|
|
13ff930b5e | ||
|
|
5d1c837f49 | ||
|
|
1dd9662037 | ||
|
|
4626edb232 | ||
|
|
3c29b621ac | ||
|
|
755570d474 | ||
|
|
e890b1490a | ||
|
|
d15de16c47 |
227
.claude/AGENTS.go.md
Normal file
227
.claude/AGENTS.go.md
Normal file
@@ -0,0 +1,227 @@
|
||||
# AGENTS.go.md — Go Agent Rules
|
||||
|
||||
Applies to: `ai-compliance-sdk/` (Go/Gin service)
|
||||
|
||||
---
|
||||
|
||||
## NON-NEGOTIABLE: Pre-Push Checklist
|
||||
|
||||
**BEFORE every `git push`, run ALL of the following from the module root. A single failure blocks the push.**
|
||||
|
||||
```bash
|
||||
# 1. Format (gofmt is non-negotiable — unformatted code fails CI)
|
||||
gofmt -l . | grep -q . && echo "FORMATTING ERRORS — run: gofmt -w ." && exit 1 || true
|
||||
|
||||
# 2. Vet (catches suspicious code that compiles but is likely wrong)
|
||||
go vet ./...
|
||||
|
||||
# 3. Lint (golangci-lint aggregates 50+ linters — the de-facto standard)
|
||||
golangci-lint run --timeout=5m ./...
|
||||
|
||||
# 4. Tests with race detector
|
||||
go test -race -count=1 ./...
|
||||
|
||||
# 5. Build verification (catches import errors, missing implementations)
|
||||
go build ./...
|
||||
```
|
||||
|
||||
**One-liner pre-push gate:**
|
||||
```bash
|
||||
gofmt -l . | grep -q . && exit 1; go vet ./... && golangci-lint run --timeout=5m && go test -race -count=1 ./... && go build ./...
|
||||
```
|
||||
|
||||
### Why each check matters
|
||||
|
||||
| Check | Catches | Time |
|
||||
|-------|---------|------|
|
||||
| `gofmt` | Formatting violations (CI rejects unformatted code) | <1s |
|
||||
| `go vet` | Printf format mismatches, unreachable code, shadowed vars | <5s |
|
||||
| `golangci-lint` | 50+ static analysis checks (errcheck, staticcheck, etc.) | 10-30s |
|
||||
| `go test -race` | Race conditions (invisible without this flag) | 10-60s |
|
||||
| `go build` | Import errors, interface mismatches | <5s |
|
||||
|
||||
---
|
||||
|
||||
## golangci-lint Configuration
|
||||
|
||||
Config lives in `.golangci.yml` at the repo root. Minimum required linters:
|
||||
|
||||
```yaml
|
||||
linters:
|
||||
enable:
|
||||
- errcheck # unchecked errors are bugs
|
||||
- gosimple # code simplification
|
||||
- govet # go vet findings
|
||||
- ineffassign # useless assignments
|
||||
- staticcheck # advanced static analysis (SA*, S*, QF*)
|
||||
- unused # unused code
|
||||
- gofmt # formatting
|
||||
- goimports # import organization
|
||||
- gocritic # opinionated style checks
|
||||
- noctx # HTTP requests without context
|
||||
- bodyclose # unclosed HTTP response bodies
|
||||
- exhaustive # exhaustive switch on enums
|
||||
- wrapcheck # errors from external packages must be wrapped
|
||||
|
||||
linters-settings:
|
||||
errcheck:
|
||||
check-blank: true # blank identifier for errors is a bug
|
||||
govet:
|
||||
enable-all: true
|
||||
|
||||
issues:
|
||||
max-issues-per-linter: 0
|
||||
max-same-issues: 0
|
||||
```
|
||||
|
||||
**Never suppress with `//nolint:` without a comment explaining why it's safe.**
|
||||
|
||||
---
|
||||
|
||||
## Code Structure (Hexagonal Architecture)
|
||||
|
||||
```
|
||||
ai-compliance-sdk/
|
||||
├── cmd/
|
||||
│ └── server/main.go # thin: parse flags, wire deps, call app.Run()
|
||||
├── internal/
|
||||
│ ├── app/ # dependency wiring
|
||||
│ ├── domain/ # pure business logic, no framework deps
|
||||
│ ├── ports/ # interfaces (repositories, external services)
|
||||
│ ├── adapters/
|
||||
│ │ ├── http/ # Gin handlers (≤30 LOC per handler)
|
||||
│ │ ├── postgres/ # DB adapters implementing ports
|
||||
│ │ └── external/ # third-party API clients
|
||||
│ └── services/ # orchestration between domain + ports
|
||||
└── pkg/ # exported, reusable packages
|
||||
```
|
||||
|
||||
**Handler constraint — max 30 lines per handler:**
|
||||
```go
|
||||
func (h *RiskHandler) GetRisk(c *gin.Context) {
|
||||
id, err := uuid.Parse(c.Param("id"))
|
||||
if err != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid id"})
|
||||
return
|
||||
}
|
||||
risk, err := h.service.Get(c.Request.Context(), id)
|
||||
if err != nil {
|
||||
h.handleError(c, err)
|
||||
return
|
||||
}
|
||||
c.JSON(http.StatusOK, risk)
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Error Handling
|
||||
|
||||
```go
|
||||
// REQUIRED: wrap errors with context
|
||||
if err != nil {
|
||||
return fmt.Errorf("get risk %s: %w", id, err)
|
||||
}
|
||||
|
||||
// REQUIRED: define sentinel errors in domain package
|
||||
var ErrNotFound = errors.New("not found")
|
||||
var ErrUnauthorized = errors.New("unauthorized")
|
||||
|
||||
// REQUIRED: check errors — never use _ for error returns
|
||||
result, err := service.Do(ctx, input)
|
||||
if err != nil {
|
||||
// handle it
|
||||
}
|
||||
```
|
||||
|
||||
**`errcheck` linter enforces this — zero tolerance for unchecked errors.**
|
||||
|
||||
---
|
||||
|
||||
## Testing Requirements
|
||||
|
||||
```
|
||||
internal/
|
||||
├── domain/
|
||||
│ ├── risk.go
|
||||
│ └── risk_test.go # unit: pure functions, no I/O
|
||||
├── adapters/
|
||||
│ ├── http/
|
||||
│ │ ├── handler.go
|
||||
│ │ └── handler_test.go # httptest-based, mock service
|
||||
│ └── postgres/
|
||||
│ ├── repo.go
|
||||
│ └── repo_test.go # integration: testcontainers or real DB
|
||||
```
|
||||
|
||||
**Test naming convention:**
|
||||
```go
|
||||
func TestRiskService_Get_ReturnsRisk(t *testing.T) {}
|
||||
func TestRiskService_Get_NotFound_ReturnsError(t *testing.T) {}
|
||||
func TestRiskService_Get_DBError_WrapsError(t *testing.T) {}
|
||||
```
|
||||
|
||||
**Table-driven tests are mandatory for functions with multiple cases:**
|
||||
```go
|
||||
func TestValidateInput(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
wantErr bool
|
||||
}{
|
||||
{"valid", "ok", false},
|
||||
{"empty", "", true},
|
||||
{"too long", strings.Repeat("x", 300), true},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := validateInput(tt.input)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("got err=%v, wantErr=%v", err, tt.wantErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
```bash
|
||||
# Pre-push: unit tests only (fast)
|
||||
go test -race -count=1 -run "^TestUnit" ./...
|
||||
|
||||
# CI: all tests
|
||||
go test -race -count=1 -coverprofile=coverage.out ./...
|
||||
go tool cover -func=coverage.out | grep total
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Context Propagation
|
||||
|
||||
Every function that does I/O (DB, HTTP, file) **must** accept and pass `context.Context` as the first argument:
|
||||
|
||||
```go
|
||||
// REQUIRED
|
||||
func (r *RiskRepo) Get(ctx context.Context, id uuid.UUID) (*Risk, error) {
|
||||
return r.db.QueryRowContext(ctx, query, id).Scan(...)
|
||||
}
|
||||
|
||||
// FORBIDDEN — no context
|
||||
func (r *RiskRepo) Get(id uuid.UUID) (*Risk, error) { ... }
|
||||
```
|
||||
|
||||
`noctx` linter enforces HTTP client context. Manual review required for DB calls.
|
||||
|
||||
---
|
||||
|
||||
## Common Pitfalls That Break CI
|
||||
|
||||
| Pitfall | Prevention |
|
||||
|---------|------------|
|
||||
| Unformatted code | `gofmt -w .` before commit |
|
||||
| Unchecked error return from `rows.Close()` / `resp.Body.Close()` | `errcheck` + `bodyclose` linters |
|
||||
| Goroutine leak (goroutine started but never stopped) | `-race` test flag |
|
||||
| Shadowed `err` variable in nested scope | `govet -shadow` |
|
||||
| HTTP response body not closed | `bodyclose` linter |
|
||||
| `interface{}` instead of `any` (Go 1.18+) | `gocritic` |
|
||||
| Missing context on DB/HTTP calls | `noctx` linter |
|
||||
| Returning concrete type from constructor instead of interface | breaks testability |
|
||||
157
.claude/AGENTS.python.md
Normal file
157
.claude/AGENTS.python.md
Normal file
@@ -0,0 +1,157 @@
|
||||
# AGENTS.python.md — Python Agent Rules
|
||||
|
||||
Applies to: `backend-compliance/`, `ai-compliance-sdk/` (Python path), `compliance-tts-service/`, `document-crawler/`, `dsms-gateway/` (Python services)
|
||||
|
||||
---
|
||||
|
||||
## NON-NEGOTIABLE: Pre-Push Checklist
|
||||
|
||||
**BEFORE every `git push`, run ALL of the following from the service directory. A single failure blocks the push.**
|
||||
|
||||
```bash
|
||||
# 1. Fast lint (Ruff — catches syntax errors, unused imports, style violations)
|
||||
ruff check .
|
||||
|
||||
# 2. Auto-fix safe issues, then re-check
|
||||
ruff check --fix . && ruff check .
|
||||
|
||||
# 3. Type checking (mypy strict on new modules, standard on legacy)
|
||||
mypy . --ignore-missing-imports --no-error-summary
|
||||
|
||||
# 4. Unit tests only (fast, no external deps)
|
||||
pytest tests/unit/ -x -q --no-header
|
||||
|
||||
# 5. Verify the service starts (catches import errors, missing env vars with defaults)
|
||||
python -c "import app" 2>/dev/null || python -c "import main" 2>/dev/null || true
|
||||
```
|
||||
|
||||
**One-liner pre-push gate (run from service root):**
|
||||
```bash
|
||||
ruff check . && mypy . --ignore-missing-imports --no-error-summary && pytest tests/ -x -q --no-header
|
||||
```
|
||||
|
||||
### Why each check matters
|
||||
|
||||
| Check | Catches | Time |
|
||||
|-------|---------|------|
|
||||
| `ruff check` | Syntax errors, unused imports, undefined names | <2s |
|
||||
| `mypy` | Type mismatches, wrong argument types | 5-15s |
|
||||
| `pytest -x` | Logic errors, regressions | 10-60s |
|
||||
| import check | Missing packages, circular imports | <1s |
|
||||
|
||||
---
|
||||
|
||||
## Code Style (Ruff)
|
||||
|
||||
Config lives in `pyproject.toml`. Do **not** add per-file `# noqa` suppressions without a comment explaining why.
|
||||
|
||||
```toml
|
||||
[tool.ruff]
|
||||
line-length = 100
|
||||
target-version = "py311"
|
||||
|
||||
[tool.ruff.lint]
|
||||
select = ["E", "F", "W", "I", "N", "UP", "B", "C4", "SIM", "TCH"]
|
||||
ignore = ["E501"] # line length handled by formatter
|
||||
|
||||
[tool.ruff.lint.per-file-ignores]
|
||||
"tests/*" = ["S101"] # assert is fine in tests
|
||||
```
|
||||
|
||||
**Blocked patterns:**
|
||||
- `from module import *` — always name imports explicitly
|
||||
- Bare `except:` — use `except Exception as e:` at minimum
|
||||
- `print()` in production code — use `logger`
|
||||
- Mutable default arguments: `def f(x=[])` → `def f(x=None)`
|
||||
|
||||
---
|
||||
|
||||
## Type Annotations
|
||||
|
||||
All new functions **must** have complete type annotations. Use `from __future__ import annotations` for forward references.
|
||||
|
||||
```python
|
||||
# Required
|
||||
async def get_tenant(tenant_id: str, db: AsyncSession) -> TenantModel | None:
|
||||
...
|
||||
|
||||
# Required for complex types
|
||||
from typing import Sequence
|
||||
def list_risks(filters: dict[str, str]) -> Sequence[RiskModel]:
|
||||
...
|
||||
```
|
||||
|
||||
**Mypy rules:**
|
||||
- `--disallow-untyped-defs` on new files
|
||||
- `--strict` on new modules (not legacy)
|
||||
- Never use `type: ignore` without a comment
|
||||
|
||||
---
|
||||
|
||||
## FastAPI-Specific Rules
|
||||
|
||||
```python
|
||||
# Handlers stay thin — delegate to service layer
|
||||
@router.get("/risks/{risk_id}", response_model=RiskResponse)
|
||||
async def get_risk(risk_id: UUID, service: RiskService = Depends(get_risk_service)):
|
||||
return await service.get(risk_id) # ≤5 lines per handler
|
||||
|
||||
# Always use response_model — never return raw dicts from endpoints
|
||||
# Always validate input with Pydantic — no manual dict parsing
|
||||
# Use HTTPException with specific status codes, never bare 500
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Testing Requirements
|
||||
|
||||
```
|
||||
tests/
|
||||
├── unit/ # Pure logic tests, no DB/HTTP (run on every push)
|
||||
├── integration/ # Requires running services (run in CI only)
|
||||
└── contracts/ # OpenAPI snapshot tests (run on API changes)
|
||||
```
|
||||
|
||||
**Unit test requirements:**
|
||||
- Every new function → at least one happy-path test
|
||||
- Every bug fix → regression test that would have caught it
|
||||
- Mock all I/O: DB calls, HTTP calls, filesystem reads
|
||||
|
||||
```bash
|
||||
# Run unit tests only (fast, for pre-push)
|
||||
pytest tests/unit/ -x -q
|
||||
|
||||
# Run with coverage (for CI)
|
||||
pytest tests/ --cov=. --cov-report=term-missing --cov-fail-under=70
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Dependency Management
|
||||
|
||||
```bash
|
||||
# Check new package license before adding
|
||||
pip show <package> | grep -E "License|Home-page"
|
||||
|
||||
# After adding to requirements.txt — verify no GPL/AGPL
|
||||
pip-licenses --fail-on="GPL;AGPL" 2>/dev/null || echo "Check licenses manually"
|
||||
```
|
||||
|
||||
**Never add:**
|
||||
- GPL/AGPL licensed packages
|
||||
- Packages with known CVEs (`pip audit`)
|
||||
- Packages that only exist for dev (`pytest`, `ruff`) to production requirements
|
||||
|
||||
---
|
||||
|
||||
## Common Pitfalls That Break CI
|
||||
|
||||
| Pitfall | Prevention |
|
||||
|---------|------------|
|
||||
| `const x = ...` inside dict literal (wrong language!) | Run ruff before push |
|
||||
| Pydantic v1 syntax in v2 project | Use `model_config`, not `class Config` |
|
||||
| Sync function called inside async without `run_in_executor` | mypy + async linter |
|
||||
| Missing `await` on coroutine | mypy catches this |
|
||||
| `datetime.utcnow()` (deprecated) | Use `datetime.now(timezone.utc)` |
|
||||
| Bare `except:` swallowing errors silently | ruff B001/E722 catches this |
|
||||
| Unused imports left in committed code | ruff F401 catches this |
|
||||
186
.claude/AGENTS.typescript.md
Normal file
186
.claude/AGENTS.typescript.md
Normal file
@@ -0,0 +1,186 @@
|
||||
# AGENTS.typescript.md — TypeScript/Next.js Agent Rules
|
||||
|
||||
Applies to: `pitch-deck/`, `admin-v2/` (Next.js apps in this repo)
|
||||
|
||||
---
|
||||
|
||||
## NON-NEGOTIABLE: Pre-Push Checklist
|
||||
|
||||
**BEFORE every `git push`, run ALL of the following from the Next.js app directory. A single failure blocks the push.**
|
||||
|
||||
```bash
|
||||
# 1. Type check (catches the class of bug that broke ChatFAB.tsx — const inside object)
|
||||
npx tsc --noEmit
|
||||
|
||||
# 2. Lint (ESLint with TypeScript-aware rules)
|
||||
npm run lint
|
||||
|
||||
# 3. Production build (THE most important check — passes lint/types but still fails build)
|
||||
npm run build
|
||||
```
|
||||
|
||||
**One-liner pre-push gate:**
|
||||
```bash
|
||||
npx tsc --noEmit && npm run lint && npm run build
|
||||
```
|
||||
|
||||
> **Why `npm run build` is mandatory:** Next.js performs additional checks during build (server component boundaries, missing env vars referenced in code, RSC/client component violations) that `tsc` and ESLint alone do not catch. The ChatFAB syntax error (`const` inside object literal) is exactly the kind of error caught only by build.
|
||||
|
||||
### Why each check matters
|
||||
|
||||
| Check | Catches | Time |
|
||||
|-------|---------|------|
|
||||
| `tsc --noEmit` | Type errors, wrong prop types, missing members | 5-20s |
|
||||
| `eslint` | React hooks rules, import order, unused vars | 5-15s |
|
||||
| `next build` | Server/client boundary violations, missing deps, syntax errors in JSX, env var issues | 30-120s |
|
||||
|
||||
---
|
||||
|
||||
## TypeScript Configuration
|
||||
|
||||
`tsconfig.json` must have strict mode enabled:
|
||||
|
||||
```json
|
||||
{
|
||||
"compilerOptions": {
|
||||
"strict": true,
|
||||
"noImplicitAny": true,
|
||||
"strictNullChecks": true,
|
||||
"noUncheckedIndexedAccess": true,
|
||||
"exactOptionalPropertyTypes": true,
|
||||
"noUnusedLocals": true,
|
||||
"noUnusedParameters": true,
|
||||
"noFallthroughCasesInSwitch": true
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Never use `// @ts-ignore` or `// @ts-expect-error` without a comment explaining why it's unavoidable.**
|
||||
|
||||
---
|
||||
|
||||
## ESLint Configuration
|
||||
|
||||
```json
|
||||
{
|
||||
"extends": [
|
||||
"next/core-web-vitals",
|
||||
"plugin:@typescript-eslint/recommended-type-checked"
|
||||
],
|
||||
"rules": {
|
||||
"@typescript-eslint/no-explicit-any": "error",
|
||||
"@typescript-eslint/no-unused-vars": "error",
|
||||
"@typescript-eslint/no-floating-promises": "error",
|
||||
"@typescript-eslint/await-thenable": "error",
|
||||
"react-hooks/exhaustive-deps": "error",
|
||||
"no-console": "warn"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**`@typescript-eslint/no-floating-promises`** — catches `await`-less async calls that silently swallow errors.
|
||||
**`react-hooks/exhaustive-deps`** — catches missing deps in `useEffect`/`useCallback` (source of stale closure bugs).
|
||||
|
||||
---
|
||||
|
||||
## Next.js 15 Rules (App Router)
|
||||
|
||||
### Server vs Client boundary
|
||||
|
||||
```typescript
|
||||
// Server Component (default) — no 'use client' needed
|
||||
// Can: fetch data, access DB, read env vars, import server-only packages
|
||||
async function Page() {
|
||||
const data = await fetchData() // direct async/await
|
||||
return <ClientComponent data={data} />
|
||||
}
|
||||
|
||||
// Client Component — must have 'use client' at top
|
||||
'use client'
|
||||
// Can: use hooks, handle events, access browser APIs
|
||||
// Cannot: import server-only packages (nodemailer, fs, db pool)
|
||||
```
|
||||
|
||||
**Common violation:** Importing `lib/email.ts` (which imports nodemailer) from a client component → use `lib/email-templates.ts` instead.
|
||||
|
||||
### Route Handler typing
|
||||
|
||||
```typescript
|
||||
// Always type request and use NextResponse
|
||||
export async function GET(request: Request): Promise<NextResponse> {
|
||||
const { searchParams } = new URL(request.url)
|
||||
return NextResponse.json({ data })
|
||||
}
|
||||
```
|
||||
|
||||
### Environment variables
|
||||
|
||||
```typescript
|
||||
// Server-only env vars: access directly
|
||||
const secret = process.env.PITCH_ADMIN_SECRET // fine in server components
|
||||
|
||||
// Client env vars: must be prefixed NEXT_PUBLIC_
|
||||
const url = process.env.NEXT_PUBLIC_API_URL // accessible in browser
|
||||
|
||||
// Never access server-only env vars in 'use client' components
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Component Architecture
|
||||
|
||||
```
|
||||
app/
|
||||
├── (route-group)/
|
||||
│ ├── page.tsx # Server Component — data fetching
|
||||
│ └── _components/ # Colocated components for this route
|
||||
│ ├── ClientThing.tsx # 'use client' when needed
|
||||
│ └── ServerThing.tsx # Server by default
|
||||
components/
|
||||
│ └── ui/ # Shared presentational components
|
||||
lib/
|
||||
│ ├── server-only-module.ts # import 'server-only' at top
|
||||
│ └── shared-module.ts # safe for both server and client
|
||||
```
|
||||
|
||||
**Rules:**
|
||||
- Push `'use client'` boundary as deep as possible (toward leaves)
|
||||
- Never import server-only modules from client components
|
||||
- Colocate `_components/` and `_hooks/` per route when they're route-specific
|
||||
|
||||
---
|
||||
|
||||
## Testing Requirements
|
||||
|
||||
```bash
|
||||
# Type check (fastest, run first)
|
||||
npx tsc --noEmit
|
||||
|
||||
# Unit tests (Vitest)
|
||||
npx vitest run
|
||||
|
||||
# E2E tests (Playwright — CI only, requires running server)
|
||||
npx playwright test
|
||||
```
|
||||
|
||||
**Test every:**
|
||||
- Custom hook (`usePresenterMode`, `useSlideNavigation`)
|
||||
- Utility function (`lib/auth.ts` helpers, `lib/email-templates.ts`)
|
||||
- API route handler (mock DB, assert response shape)
|
||||
|
||||
---
|
||||
|
||||
## Common Pitfalls That Break CI
|
||||
|
||||
| Pitfall | Prevention |
|
||||
|---------|------------|
|
||||
| `const x = ...` inside object literal | `tsc --noEmit` + `npm run build` |
|
||||
| Server-only import in client component | `import 'server-only'` guard + ESLint |
|
||||
| Missing `await` on async function call | `@typescript-eslint/no-floating-promises` |
|
||||
| `useEffect` with missing dependency | `react-hooks/exhaustive-deps` error |
|
||||
| `any` type hiding type errors | `@typescript-eslint/no-explicit-any` error |
|
||||
| Unused variable left after refactor | `noUnusedLocals` in tsconfig |
|
||||
| `process.env.SECRET` in client component | Next.js build error |
|
||||
| Forgetting `export default` on page component | Next.js build error |
|
||||
| Calling server action from server component | must use route handler instead |
|
||||
| `jose` full import in Edge Runtime | Use specific subpath: `jose/jwt/verify` |
|
||||
@@ -2,28 +2,53 @@
|
||||
|
||||
## Entwicklungsumgebung (WICHTIG - IMMER ZUERST LESEN)
|
||||
|
||||
### Zwei-Rechner-Setup
|
||||
### Zwei-Rechner-Setup + Orca
|
||||
|
||||
| Geraet | Rolle | Aufgaben |
|
||||
|--------|-------|----------|
|
||||
| **MacBook** | Entwicklung | Claude Terminal, Code-Entwicklung, Browser (Frontend-Tests) |
|
||||
| **Mac Mini** | Server | Docker, alle Services, Tests, Builds, Deployment |
|
||||
| **Mac Mini** | Lokaler Server | Docker fuer lokale Dev/Tests (NICHT fuer Production!) |
|
||||
| **Orca** | Production | Automatisches Build + Deploy bei Push auf gitea |
|
||||
|
||||
**WICHTIG:** Code wird direkt auf dem MacBook in diesem Repo bearbeitet. Docker und Services laufen auf dem Mac Mini.
|
||||
**WICHTIG:** Code wird direkt auf dem MacBook in diesem Repo bearbeitet. Production-Deployment laeuft automatisch ueber Orca.
|
||||
|
||||
### Entwicklungsworkflow
|
||||
### Entwicklungsworkflow (CI/CD — Orca)
|
||||
|
||||
```bash
|
||||
# 1. Code auf MacBook bearbeiten (dieses Verzeichnis)
|
||||
# 2. Committen und pushen:
|
||||
git push origin main && git push gitea main
|
||||
# 2. Committen und zu BEIDEN Remotes pushen:
|
||||
git push origin main
|
||||
|
||||
# 3. Auf Mac Mini pullen und Container neu bauen:
|
||||
# 3. FERTIG! Push auf gitea triggert automatisch:
|
||||
# - Gitea Actions: Tests
|
||||
# - Orca: Build → Deploy
|
||||
```
|
||||
|
||||
**NIEMALS** manuell in Orca auf "Redeploy" klicken — Gitea Actions triggert Orca automatisch.
|
||||
**IMMER auf `main` pushen** — sowohl origin als auch gitea.
|
||||
|
||||
### Post-Push Deploy-Monitoring (PFLICHT nach jedem Push auf gitea)
|
||||
|
||||
**IMMER wenn Claude auf gitea pusht, MUSS danach automatisch das Deploy-Monitoring laufen:**
|
||||
|
||||
1. Dem User sofort mitteilen: "Deploy gestartet, ich ueberwache den Status..."
|
||||
2. Im Hintergrund Health-Checks pollen (alle 20 Sekunden, max 5 Minuten):
|
||||
```bash
|
||||
curl -sf https://api-dev.breakpilot.ai/health # Compliance Backend
|
||||
curl -sf https://sdk-dev.breakpilot.ai/health # AI SDK
|
||||
```
|
||||
3. Sobald ALLE Endpoints healthy sind, dem User im Chat melden:
|
||||
**"Deploy abgeschlossen! Du kannst jetzt testen."**
|
||||
4. Falls nach 5 Minuten noch nicht healthy → Fehlermeldung mit Hinweis auf Orca-Logs.
|
||||
|
||||
### Lokale Entwicklung (Mac Mini — optional, nur Dev/Tests)
|
||||
|
||||
```bash
|
||||
ssh macmini "cd /Users/benjaminadmin/Projekte/breakpilot-core && git pull --no-rebase origin main"
|
||||
ssh macmini "cd /Users/benjaminadmin/Projekte/breakpilot-core && /usr/local/bin/docker compose build --no-cache <service> && /usr/local/bin/docker compose up -d <service>"
|
||||
```
|
||||
|
||||
### SSH-Verbindung (fuer Docker/Tests)
|
||||
### SSH-Verbindung (fuer lokale Docker/Tests)
|
||||
|
||||
```bash
|
||||
ssh macmini "cd /Users/benjaminadmin/Projekte/breakpilot-core && <cmd>"
|
||||
@@ -51,6 +76,14 @@ networks:
|
||||
name: breakpilot-network # Fixer Name, kein Auto-Prefix!
|
||||
```
|
||||
|
||||
### Deployment-Modell
|
||||
|
||||
| Repo | Deployment | Trigger |
|
||||
|------|-----------|---------|
|
||||
| **breakpilot-core** | Orca (automatisch) | Push auf gitea main |
|
||||
| **breakpilot-compliance** | Orca (automatisch) | Push auf gitea main |
|
||||
| **breakpilot-lehrer** | Mac Mini (lokal) | Manuell docker compose |
|
||||
|
||||
---
|
||||
|
||||
## Haupt-URLs (via Nginx Reverse Proxy)
|
||||
@@ -161,7 +194,7 @@ networks:
|
||||
| `compliance` | Compliance | compliance_*, dsr, gdpr, sdk_tenants, consent_admin |
|
||||
|
||||
```bash
|
||||
# DB-Zugang
|
||||
# DB-Zugang (lokal)
|
||||
ssh macmini "docker exec bp-core-postgres psql -U breakpilot -d breakpilot_db"
|
||||
```
|
||||
|
||||
@@ -185,15 +218,45 @@ breakpilot-core/
|
||||
├── gitea/ # Gitea Config
|
||||
├── docs-src/ # MkDocs Quellen
|
||||
├── mkdocs.yml # MkDocs Config
|
||||
├── control-pipeline/ # RAG/Control Pipeline (Port 8098)
|
||||
├── scripts/ # Helper Scripts
|
||||
└── docker-compose.yml # Haupt-Compose (28+ Services)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Control Pipeline (WICHTIG)
|
||||
|
||||
**Seit 2026-04-09 liegt die gesamte RAG/Control-Pipeline im Core-Repo** (`control-pipeline/`), NICHT mehr im Compliance-Repo. Alle Arbeiten an der Pipeline (Pass 0a/0b, BatchDedup, Control Generator, Enrichment) finden ausschliesslich hier statt.
|
||||
|
||||
- **Port:** 8098
|
||||
- **Container:** bp-core-control-pipeline
|
||||
- **DB:** Schreibt ins `compliance`-Schema der shared PostgreSQL
|
||||
- **Das Compliance-Repo wird NICHT fuer Pipeline-Aenderungen benutzt**
|
||||
|
||||
```bash
|
||||
# Container auf Mac Mini
|
||||
ssh macmini "cd ~/Projekte/breakpilot-core && /usr/local/bin/docker compose build --no-cache control-pipeline && /usr/local/bin/docker compose up -d --no-deps control-pipeline"
|
||||
|
||||
# Health
|
||||
ssh macmini "/usr/local/bin/docker exec bp-core-control-pipeline curl -sf http://127.0.0.1:8098/health"
|
||||
|
||||
# Logs
|
||||
ssh macmini "/usr/local/bin/docker logs -f bp-core-control-pipeline"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Haeufige Befehle
|
||||
|
||||
### Docker
|
||||
### Deployment (CI/CD — Standardweg)
|
||||
|
||||
```bash
|
||||
# Committen und pushen → Orca deployt automatisch:
|
||||
git push origin main
|
||||
```
|
||||
|
||||
### Lokale Docker-Befehle (Mac Mini — nur Dev/Tests)
|
||||
|
||||
```bash
|
||||
# Alle Core-Services starten
|
||||
@@ -211,35 +274,50 @@ ssh macmini "/usr/local/bin/docker ps --filter name=bp-core"
|
||||
|
||||
**WICHTIG:** Docker-Pfad auf Mac Mini ist `/usr/local/bin/docker` (nicht im Standard-SSH-PATH).
|
||||
|
||||
### Alle 3 Projekte starten
|
||||
|
||||
```bash
|
||||
# 1. Core (MUSS zuerst!)
|
||||
ssh macmini "cd /Users/benjaminadmin/Projekte/breakpilot-core && /usr/local/bin/docker compose up -d"
|
||||
# Warten auf Health:
|
||||
ssh macmini "curl -sf http://127.0.0.1:8099/health"
|
||||
|
||||
# 2. Lehrer
|
||||
ssh macmini "cd /Users/benjaminadmin/Projekte/breakpilot-lehrer && /usr/local/bin/docker compose up -d"
|
||||
|
||||
# 3. Compliance
|
||||
ssh macmini "cd /Users/benjaminadmin/Projekte/breakpilot-compliance && /usr/local/bin/docker compose up -d"
|
||||
```
|
||||
|
||||
### Git
|
||||
|
||||
```bash
|
||||
# Zu BEIDEN Remotes pushen (PFLICHT!):
|
||||
ssh macmini "cd /Users/benjaminadmin/Projekte/breakpilot-core && git push all main"
|
||||
git push origin main
|
||||
|
||||
# Remotes:
|
||||
# origin: lokale Gitea (macmini:3003)
|
||||
# gitea: gitea.meghsakha.com
|
||||
# all: beide gleichzeitig
|
||||
|
||||
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Pre-Push Checks (PFLICHT — VOR JEDEM PUSH)
|
||||
|
||||
> Full detail: `.claude/rules/pre-push-checks.md` | Stack rules: `AGENTS.python.md`, `AGENTS.go.md`, `AGENTS.typescript.md`
|
||||
|
||||
**NIEMALS pushen ohne diese Checks. CI-Failures blockieren das gesamte Deploy.**
|
||||
|
||||
### Python (backend-core, rag-service, embedding-service, control-pipeline)
|
||||
|
||||
```bash
|
||||
cd <service-dir>
|
||||
ruff check . && mypy . --ignore-missing-imports --no-error-summary && pytest tests/ -x -q --no-header
|
||||
```
|
||||
|
||||
### Go (consent-service, billing-service)
|
||||
|
||||
```bash
|
||||
cd <service-dir>
|
||||
gofmt -l . | grep -q . && exit 1; go vet ./... && golangci-lint run --timeout=5m && go test -race ./... && go build ./...
|
||||
```
|
||||
|
||||
### TypeScript/Next.js (pitch-deck, admin-v2)
|
||||
|
||||
```bash
|
||||
cd pitch-deck # or admin-v2
|
||||
npx tsc --noEmit && npm run lint && npm run build
|
||||
```
|
||||
|
||||
> `npm run build` ist PFLICHT — `tsc` allein reicht nicht. Syntax-Fehler wie `const` inside object literal werden nur vom Build gefangen.
|
||||
|
||||
---
|
||||
|
||||
## Kernprinzipien
|
||||
|
||||
### 1. Open Source Policy
|
||||
|
||||
74
.claude/rules/pre-push-checks.md
Normal file
74
.claude/rules/pre-push-checks.md
Normal file
@@ -0,0 +1,74 @@
|
||||
# Pre-Push Checks (MANDATORY)
|
||||
|
||||
## Rule
|
||||
|
||||
**NEVER push to any remote without first running and confirming ALL checks pass for every changed language stack.**
|
||||
|
||||
This rule exists because CI failures break the deploy pipeline for everyone and waste ~5 minutes per failed build. A 60-second local check prevents that.
|
||||
|
||||
---
|
||||
|
||||
## Quick Reference by Stack
|
||||
|
||||
### Python (backend-compliance, ai-compliance-sdk, compliance-tts-service)
|
||||
|
||||
```bash
|
||||
cd <service-dir>
|
||||
ruff check . && mypy . --ignore-missing-imports --no-error-summary && pytest tests/ -x -q --no-header
|
||||
```
|
||||
|
||||
Blocks on: syntax errors, type errors, failing tests.
|
||||
|
||||
### Go (ai-compliance-sdk Go path)
|
||||
|
||||
```bash
|
||||
cd <service-dir>
|
||||
gofmt -l . | grep -q . && exit 1; go vet ./... && golangci-lint run --timeout=5m && go test -race ./... && go build ./...
|
||||
```
|
||||
|
||||
Blocks on: formatting, vet findings, lint violations, test failures, build errors.
|
||||
|
||||
### TypeScript/Next.js (admin-compliance, developer-portal)
|
||||
|
||||
```bash
|
||||
cd <nextjs-app-dir>
|
||||
npx tsc --noEmit && npm run lint && npm run build
|
||||
```
|
||||
|
||||
Blocks on: type errors, lint violations, **build failures**.
|
||||
|
||||
> `npm run build` is mandatory — `tsc` passes but `next build` fails more often than you'd expect (server/client boundary violations, env var issues, JSX syntax errors).
|
||||
|
||||
---
|
||||
|
||||
## What Claude Must Do Before Every Push
|
||||
|
||||
1. Identify which services/apps were changed in this task
|
||||
2. Run the appropriate gate command(s) from the table above
|
||||
3. If any check fails: fix it, re-run, confirm green
|
||||
4. Only then run `git push origin main`
|
||||
|
||||
**No exceptions.** A push that skips pre-push checks and breaks CI is worse than a delayed push.
|
||||
|
||||
---
|
||||
|
||||
## CI vs Local Checks
|
||||
|
||||
| Stage | Where | What |
|
||||
|-------|-------|------|
|
||||
| Pre-push (local) | Claude runs | Lint + type check + unit tests + build |
|
||||
| CI (Gitea Actions) | Automatic on push | Same + integration tests + contract tests |
|
||||
| Deploy (Orca) | Automatic after CI | Docker build + health check |
|
||||
|
||||
Local checks catch 90% of CI failures in seconds. CI is the safety net, not the first line of defense.
|
||||
|
||||
---
|
||||
|
||||
## Failures That Were Caused by Skipping Pre-Push Checks
|
||||
|
||||
- `ChatFAB.tsx`: `const textLang` inside fetch object literal — caught by `tsc --noEmit` and `npm run build`
|
||||
- `nodemailer` webpack error: server-only import in client component — caught by `npm run build`
|
||||
- `jose` Edge Runtime error: full package import — caught by `npm run build`
|
||||
- `main.py` `<en>` tags spoken: missing `import re` — caught by `python -c "import main"`
|
||||
|
||||
These all caused a broken deploy. Each would have been caught in <60 seconds locally.
|
||||
66
.gitea/workflows/build-pitch-deck.yml
Normal file
66
.gitea/workflows/build-pitch-deck.yml
Normal file
@@ -0,0 +1,66 @@
|
||||
# Build + push pitch-deck Docker image to registry.meghsakha.com
|
||||
# and trigger orca redeploy on every push to main that touches pitch-deck/.
|
||||
#
|
||||
# Requires Gitea Actions secret: ORCA_WEBHOOK_SECRET
|
||||
# (must match the `secret` field in ~/.orca/webhooks.json on the orca master)
|
||||
|
||||
name: Build pitch-deck
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
paths:
|
||||
- 'pitch-deck/**'
|
||||
|
||||
jobs:
|
||||
build-push-deploy:
|
||||
runs-on: docker
|
||||
container:
|
||||
image: docker:27-cli
|
||||
steps:
|
||||
- name: Checkout
|
||||
run: |
|
||||
apk add --no-cache git openssl curl
|
||||
git clone --depth 1 --branch ${GITHUB_REF_NAME} ${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY}.git .
|
||||
|
||||
- name: Login to registry
|
||||
env:
|
||||
REGISTRY_USERNAME: ${{ secrets.REGISTRY_USERNAME }}
|
||||
REGISTRY_PASSWORD: ${{ secrets.REGISTRY_PASSWORD }}
|
||||
run: |
|
||||
echo "$REGISTRY_PASSWORD" | docker login registry.meghsakha.com -u "$REGISTRY_USERNAME" --password-stdin
|
||||
|
||||
- name: Build image
|
||||
run: |
|
||||
cd pitch-deck
|
||||
SHORT_SHA=$(git rev-parse --short HEAD)
|
||||
docker build \
|
||||
--build-arg GIT_SHA=${SHORT_SHA} \
|
||||
-t registry.meghsakha.com/breakpilot/pitch-deck:latest \
|
||||
-t registry.meghsakha.com/breakpilot/pitch-deck:${SHORT_SHA} \
|
||||
.
|
||||
|
||||
- name: Push to registry
|
||||
run: |
|
||||
SHORT_SHA=$(git rev-parse --short HEAD)
|
||||
docker push registry.meghsakha.com/breakpilot/pitch-deck:latest
|
||||
docker push registry.meghsakha.com/breakpilot/pitch-deck:${SHORT_SHA}
|
||||
echo "Pushed :latest + :${SHORT_SHA}"
|
||||
|
||||
- name: Trigger orca redeploy
|
||||
env:
|
||||
ORCA_WEBHOOK_SECRET: ${{ secrets.ORCA_WEBHOOK_SECRET }}
|
||||
ORCA_WEBHOOK_URL: http://46.225.100.82:6880/api/v1/webhooks/github
|
||||
run: |
|
||||
SHA=$(git rev-parse HEAD)
|
||||
PAYLOAD="{\"ref\":\"refs/heads/main\",\"repository\":{\"full_name\":\"${GITHUB_REPOSITORY}\"},\"head_commit\":{\"id\":\"$SHA\",\"message\":\"ci: pitch-deck image build\"}}"
|
||||
SIG=$(printf '%s' "$PAYLOAD" | openssl dgst -sha256 -hmac "$ORCA_WEBHOOK_SECRET" -r | awk '{print $1}')
|
||||
curl -sSf -k \
|
||||
-X POST \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "X-GitHub-Event: push" \
|
||||
-H "X-Hub-Signature-256: sha256=$SIG" \
|
||||
-d "$PAYLOAD" \
|
||||
"$ORCA_WEBHOOK_URL" \
|
||||
|| { echo "Orca redeploy failed"; exit 1; }
|
||||
echo "Orca redeploy triggered"
|
||||
@@ -140,117 +140,6 @@ jobs:
|
||||
python -m pytest tests/bqas/ -v --tb=short || true
|
||||
|
||||
# ========================================
|
||||
# Build & Deploy auf Hetzner (nur main, kein PR)
|
||||
# Deploys now handled by per-service workflows (e.g. build-pitch-deck.yml)
|
||||
# which trigger orca webhooks directly after building + pushing the image.
|
||||
# ========================================
|
||||
|
||||
deploy-hetzner:
|
||||
runs-on: docker
|
||||
if: github.event_name == 'push' && github.ref == 'refs/heads/main'
|
||||
needs:
|
||||
- test-go-consent
|
||||
container: docker:27-cli
|
||||
steps:
|
||||
- name: Deploy
|
||||
run: |
|
||||
set -euo pipefail
|
||||
DEPLOY_DIR="/opt/breakpilot-core"
|
||||
COMPOSE_FILES="-f docker-compose.yml -f docker-compose.hetzner.yml"
|
||||
COMMIT_SHA="${GITHUB_SHA:-unknown}"
|
||||
SHORT_SHA="${COMMIT_SHA:0:8}"
|
||||
REPO_URL="${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY}.git"
|
||||
|
||||
# Services die deployed werden
|
||||
SERVICES="postgres valkey qdrant minio ollama mailpit embedding-service rag-service backend-core consent-service health-aggregator"
|
||||
|
||||
echo "=== BreakPilot Core Deploy ==="
|
||||
echo "Commit: ${SHORT_SHA}"
|
||||
echo "Deploy Dir: ${DEPLOY_DIR}"
|
||||
echo "Services: ${SERVICES}"
|
||||
echo ""
|
||||
|
||||
# 1. Repo auf dem Host erstellen/aktualisieren via Helper-Container
|
||||
echo "=== Updating code on host ==="
|
||||
docker run --rm \
|
||||
-v "${DEPLOY_DIR}:${DEPLOY_DIR}" \
|
||||
--entrypoint sh \
|
||||
alpine/git:latest \
|
||||
-c "
|
||||
if [ ! -d '${DEPLOY_DIR}/.git' ]; then
|
||||
echo 'Erstmaliges Klonen nach ${DEPLOY_DIR}...'
|
||||
git clone '${REPO_URL}' '${DEPLOY_DIR}'
|
||||
else
|
||||
cd '${DEPLOY_DIR}'
|
||||
git fetch origin main
|
||||
git reset --hard origin/main
|
||||
fi
|
||||
"
|
||||
echo "Code aktualisiert auf ${SHORT_SHA}"
|
||||
|
||||
# 2. .env sicherstellen
|
||||
docker run --rm -v "${DEPLOY_DIR}:${DEPLOY_DIR}" alpine \
|
||||
sh -c "
|
||||
if [ ! -f '${DEPLOY_DIR}/.env' ]; then
|
||||
echo 'WARNUNG: ${DEPLOY_DIR}/.env fehlt!'
|
||||
echo 'Erstelle .env aus .env.example mit Defaults...'
|
||||
if [ -f '${DEPLOY_DIR}/.env.example' ]; then
|
||||
cp '${DEPLOY_DIR}/.env.example' '${DEPLOY_DIR}/.env'
|
||||
echo '.env aus .env.example erstellt'
|
||||
else
|
||||
echo 'Kein .env.example gefunden — Services starten mit Defaults'
|
||||
fi
|
||||
else
|
||||
echo '.env vorhanden'
|
||||
fi
|
||||
"
|
||||
|
||||
# 3. Shared Network erstellen (falls noch nicht vorhanden)
|
||||
docker network create breakpilot-network 2>/dev/null || true
|
||||
|
||||
# 4. Build + Deploy via Helper-Container
|
||||
echo ""
|
||||
echo "=== Building + Deploying ==="
|
||||
docker run --rm \
|
||||
-v /var/run/docker.sock:/var/run/docker.sock \
|
||||
-v "${DEPLOY_DIR}:${DEPLOY_DIR}" \
|
||||
-w "${DEPLOY_DIR}" \
|
||||
docker:27-cli \
|
||||
sh -c "
|
||||
set -e
|
||||
COMPOSE_FILES='-f docker-compose.yml -f docker-compose.hetzner.yml'
|
||||
|
||||
echo '=== Building Docker Images ==='
|
||||
docker compose \${COMPOSE_FILES} build --parallel \
|
||||
backend-core consent-service rag-service embedding-service health-aggregator
|
||||
|
||||
echo ''
|
||||
echo '=== Starting infrastructure ==='
|
||||
docker compose \${COMPOSE_FILES} up -d postgres valkey qdrant minio mailpit
|
||||
|
||||
echo 'Warte auf DB + Cache...'
|
||||
sleep 10
|
||||
|
||||
echo ''
|
||||
echo '=== Starting Ollama + pulling bge-m3 ==='
|
||||
docker compose \${COMPOSE_FILES} up -d ollama
|
||||
sleep 5
|
||||
|
||||
# bge-m3 Modell pullen (nur beim ersten Mal ~670MB)
|
||||
echo 'Pulling bge-m3 model (falls noch nicht vorhanden)...'
|
||||
docker exec bp-core-ollama ollama pull bge-m3 2>&1 || echo 'WARNUNG: bge-m3 pull fehlgeschlagen (wird spaeter nachgeholt)'
|
||||
|
||||
echo ''
|
||||
echo '=== Starting application services ==='
|
||||
docker compose \${COMPOSE_FILES} up -d \
|
||||
embedding-service rag-service backend-core consent-service health-aggregator
|
||||
|
||||
echo ''
|
||||
echo '=== Health Checks ==='
|
||||
sleep 15
|
||||
for svc in bp-core-postgres bp-core-valkey bp-core-qdrant bp-core-ollama bp-core-embedding-service bp-core-rag-service bp-core-backend bp-core-consent-service bp-core-health; do
|
||||
STATUS=\$(docker inspect --format='{{.State.Status}}' \"\${svc}\" 2>/dev/null || echo 'not found')
|
||||
echo \"\${svc}: \${STATUS}\"
|
||||
done
|
||||
"
|
||||
|
||||
echo ""
|
||||
echo "=== Deploy abgeschlossen: ${SHORT_SHA} ==="
|
||||
|
||||
@@ -1,27 +0,0 @@
|
||||
name: Deploy to Coolify
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- coolify
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Deploy via Coolify API
|
||||
run: |
|
||||
echo "Deploying breakpilot-core to Coolify..."
|
||||
HTTP_STATUS=$(curl -s -o /dev/null -w "%{http_code}" \
|
||||
-X POST \
|
||||
-H "Authorization: Bearer ${{ secrets.COOLIFY_API_TOKEN }}" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"uuid": "${{ secrets.COOLIFY_RESOURCE_UUID }}", "force_rebuild": true}' \
|
||||
"${{ secrets.COOLIFY_BASE_URL }}/api/v1/deploy")
|
||||
|
||||
echo "HTTP Status: $HTTP_STATUS"
|
||||
if [ "$HTTP_STATUS" -ne 200 ] && [ "$HTTP_STATUS" -ne 201 ]; then
|
||||
echo "Deployment failed with status $HTTP_STATUS"
|
||||
exit 1
|
||||
fi
|
||||
echo "Deployment triggered successfully!"
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -7,6 +7,7 @@
|
||||
secrets/
|
||||
*.pem
|
||||
*.key
|
||||
.mcp.json
|
||||
|
||||
# Node
|
||||
node_modules/
|
||||
|
||||
19
control-pipeline/Dockerfile
Normal file
19
control-pipeline/Dockerfile
Normal file
@@ -0,0 +1,19 @@
|
||||
FROM python:3.11-slim
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
curl \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
COPY requirements.txt .
|
||||
RUN pip install --no-cache-dir -r requirements.txt
|
||||
|
||||
COPY . .
|
||||
|
||||
EXPOSE 8098
|
||||
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=10s --retries=3 \
|
||||
CMD curl -f http://127.0.0.1:8098/health || exit 1
|
||||
|
||||
CMD ["python", "main.py"]
|
||||
8
control-pipeline/api/__init__.py
Normal file
8
control-pipeline/api/__init__.py
Normal file
@@ -0,0 +1,8 @@
|
||||
from fastapi import APIRouter
|
||||
|
||||
from api.control_generator_routes import router as generator_router
|
||||
from api.canonical_control_routes import router as canonical_router
|
||||
|
||||
router = APIRouter()
|
||||
router.include_router(generator_router)
|
||||
router.include_router(canonical_router)
|
||||
2132
control-pipeline/api/canonical_control_routes.py
Normal file
2132
control-pipeline/api/canonical_control_routes.py
Normal file
File diff suppressed because it is too large
Load Diff
1102
control-pipeline/api/control_generator_routes.py
Normal file
1102
control-pipeline/api/control_generator_routes.py
Normal file
File diff suppressed because it is too large
Load Diff
67
control-pipeline/config.py
Normal file
67
control-pipeline/config.py
Normal file
@@ -0,0 +1,67 @@
|
||||
import os
|
||||
|
||||
|
||||
class Settings:
|
||||
"""Environment-based configuration for control-pipeline."""
|
||||
|
||||
# Database (compliance schema)
|
||||
DATABASE_URL: str = os.getenv(
|
||||
"DATABASE_URL",
|
||||
"postgresql://breakpilot:breakpilot123@localhost:5432/breakpilot_db",
|
||||
)
|
||||
SCHEMA_SEARCH_PATH: str = os.getenv(
|
||||
"SCHEMA_SEARCH_PATH", "compliance,core,public"
|
||||
)
|
||||
|
||||
# Qdrant (vector search for dedup)
|
||||
QDRANT_URL: str = os.getenv("QDRANT_URL", "http://localhost:6333")
|
||||
QDRANT_API_KEY: str = os.getenv("QDRANT_API_KEY", "")
|
||||
|
||||
# Embedding Service
|
||||
EMBEDDING_SERVICE_URL: str = os.getenv(
|
||||
"EMBEDDING_SERVICE_URL", "http://embedding-service:8087"
|
||||
)
|
||||
|
||||
# LLM - Anthropic
|
||||
ANTHROPIC_API_KEY: str = os.getenv("ANTHROPIC_API_KEY", "")
|
||||
CONTROL_GEN_ANTHROPIC_MODEL: str = os.getenv(
|
||||
"CONTROL_GEN_ANTHROPIC_MODEL", "claude-sonnet-4-6"
|
||||
)
|
||||
DECOMPOSITION_LLM_MODEL: str = os.getenv(
|
||||
"DECOMPOSITION_LLM_MODEL", "claude-haiku-4-5-20251001"
|
||||
)
|
||||
CONTROL_GEN_LLM_TIMEOUT: int = int(
|
||||
os.getenv("CONTROL_GEN_LLM_TIMEOUT", "180")
|
||||
)
|
||||
|
||||
# LLM - Ollama (fallback)
|
||||
OLLAMA_URL: str = os.getenv(
|
||||
"OLLAMA_URL", "http://host.docker.internal:11434"
|
||||
)
|
||||
CONTROL_GEN_OLLAMA_MODEL: str = os.getenv(
|
||||
"CONTROL_GEN_OLLAMA_MODEL", "qwen3.5:35b-a3b"
|
||||
)
|
||||
|
||||
# SDK Service (for RAG search proxy)
|
||||
SDK_URL: str = os.getenv(
|
||||
"SDK_URL", "http://ai-compliance-sdk:8090"
|
||||
)
|
||||
|
||||
# Auth
|
||||
JWT_SECRET: str = os.getenv("JWT_SECRET", "")
|
||||
|
||||
# Server
|
||||
PORT: int = int(os.getenv("PORT", "8098"))
|
||||
LOG_LEVEL: str = os.getenv("LOG_LEVEL", "INFO")
|
||||
ENVIRONMENT: str = os.getenv("ENVIRONMENT", "development")
|
||||
|
||||
# Pipeline
|
||||
DECOMPOSITION_BATCH_SIZE: int = int(
|
||||
os.getenv("DECOMPOSITION_BATCH_SIZE", "5")
|
||||
)
|
||||
DECOMPOSITION_LLM_TIMEOUT: int = int(
|
||||
os.getenv("DECOMPOSITION_LLM_TIMEOUT", "120")
|
||||
)
|
||||
|
||||
|
||||
settings = Settings()
|
||||
0
control-pipeline/data/__init__.py
Normal file
0
control-pipeline/data/__init__.py
Normal file
205
control-pipeline/data/source_type_classification.py
Normal file
205
control-pipeline/data/source_type_classification.py
Normal file
@@ -0,0 +1,205 @@
|
||||
"""
|
||||
Source-Type-Klassifikation fuer Regulierungen und Frameworks.
|
||||
|
||||
Dreistufiges Modell der normativen Verbindlichkeit:
|
||||
|
||||
Stufe 1 — GESETZ (law):
|
||||
Rechtlich bindend. Bussgeld bei Verstoss.
|
||||
Beispiele: DSGVO, NIS2, AI Act, CRA
|
||||
|
||||
Stufe 2 — LEITLINIE (guideline):
|
||||
Offizielle Auslegungshilfe von Aufsichtsbehoerden.
|
||||
Beweislastumkehr: Wer abweicht, muss begruenden warum.
|
||||
Beispiele: EDPB-Leitlinien, BSI-Standards, WP29-Dokumente
|
||||
|
||||
Stufe 3 — FRAMEWORK (framework):
|
||||
Freiwillige Best Practices, nicht rechtsverbindlich.
|
||||
Aber: Koennen als "Stand der Technik" herangezogen werden.
|
||||
Beispiele: ENISA, NIST, OWASP, OECD, CISA
|
||||
|
||||
Mapping: source_regulation (aus control_parent_links) -> source_type
|
||||
"""
|
||||
|
||||
# --- Typ-Definitionen ---
|
||||
SOURCE_TYPE_LAW = "law" # Gesetz/Verordnung/Richtlinie — normative_strength bleibt
|
||||
SOURCE_TYPE_GUIDELINE = "guideline" # Leitlinie/Standard — max "should"
|
||||
SOURCE_TYPE_FRAMEWORK = "framework" # Framework/Best Practice — max "may"
|
||||
|
||||
# Max erlaubte normative_strength pro source_type
|
||||
# DB-Constraint erlaubt: must, should, may (NICHT "can")
|
||||
NORMATIVE_STRENGTH_CAP: dict[str, str] = {
|
||||
SOURCE_TYPE_LAW: "must", # keine Begrenzung
|
||||
SOURCE_TYPE_GUIDELINE: "should", # max "should"
|
||||
SOURCE_TYPE_FRAMEWORK: "may", # max "may" (= "kann")
|
||||
}
|
||||
|
||||
# Reihenfolge fuer Vergleiche (hoeher = staerker)
|
||||
STRENGTH_ORDER: dict[str, int] = {
|
||||
"may": 1, # KANN (DB-Wert)
|
||||
"can": 1, # Alias — wird in cap_normative_strength zu "may" normalisiert
|
||||
"should": 2,
|
||||
"must": 3,
|
||||
}
|
||||
|
||||
|
||||
def cap_normative_strength(original: str, source_type: str) -> str:
|
||||
"""
|
||||
Begrenzt die normative_strength basierend auf dem source_type.
|
||||
|
||||
Beispiel:
|
||||
cap_normative_strength("must", "framework") -> "may"
|
||||
cap_normative_strength("should", "law") -> "should"
|
||||
cap_normative_strength("must", "guideline") -> "should"
|
||||
"""
|
||||
cap = NORMATIVE_STRENGTH_CAP.get(source_type, "must")
|
||||
cap_level = STRENGTH_ORDER.get(cap, 3)
|
||||
original_level = STRENGTH_ORDER.get(original, 3)
|
||||
if original_level > cap_level:
|
||||
return cap
|
||||
return original
|
||||
|
||||
|
||||
def get_highest_source_type(source_types: list[str]) -> str:
|
||||
"""
|
||||
Bestimmt den hoechsten source_type aus einer Liste.
|
||||
Ein Gesetz uebertrumpft alles.
|
||||
|
||||
Beispiel:
|
||||
get_highest_source_type(["framework", "law"]) -> "law"
|
||||
get_highest_source_type(["framework", "guideline"]) -> "guideline"
|
||||
"""
|
||||
type_order = {SOURCE_TYPE_FRAMEWORK: 1, SOURCE_TYPE_GUIDELINE: 2, SOURCE_TYPE_LAW: 3}
|
||||
if not source_types:
|
||||
return SOURCE_TYPE_FRAMEWORK
|
||||
return max(source_types, key=lambda t: type_order.get(t, 0))
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Klassifikation: source_regulation -> source_type
|
||||
#
|
||||
# Diese Map wird fuer den Backfill und zukuenftige Pipeline-Runs verwendet.
|
||||
# Neue Regulierungen hier eintragen!
|
||||
# ============================================================================
|
||||
|
||||
SOURCE_REGULATION_CLASSIFICATION: dict[str, str] = {
|
||||
# --- EU-Verordnungen (unmittelbar bindend) ---
|
||||
"DSGVO (EU) 2016/679": SOURCE_TYPE_LAW,
|
||||
"KI-Verordnung (EU) 2024/1689": SOURCE_TYPE_LAW,
|
||||
"Cyber Resilience Act (CRA)": SOURCE_TYPE_LAW,
|
||||
"NIS2-Richtlinie (EU) 2022/2555": SOURCE_TYPE_LAW,
|
||||
"Data Act": SOURCE_TYPE_LAW,
|
||||
"Data Governance Act (DGA)": SOURCE_TYPE_LAW,
|
||||
"Markets in Crypto-Assets (MiCA)": SOURCE_TYPE_LAW,
|
||||
"Maschinenverordnung (EU) 2023/1230": SOURCE_TYPE_LAW,
|
||||
"Batterieverordnung (EU) 2023/1542": SOURCE_TYPE_LAW,
|
||||
"AML-Verordnung": SOURCE_TYPE_LAW,
|
||||
|
||||
# --- EU-Richtlinien (nach nationaler Umsetzung bindend) ---
|
||||
# Fuer Compliance-Zwecke wie Gesetze behandeln
|
||||
|
||||
# --- Nationale Gesetze ---
|
||||
"Bundesdatenschutzgesetz (BDSG)": SOURCE_TYPE_LAW,
|
||||
"Telekommunikationsgesetz": SOURCE_TYPE_LAW,
|
||||
"Telekommunikationsgesetz Oesterreich": SOURCE_TYPE_LAW,
|
||||
"Gewerbeordnung (GewO)": SOURCE_TYPE_LAW,
|
||||
"Handelsgesetzbuch (HGB)": SOURCE_TYPE_LAW,
|
||||
"Abgabenordnung (AO)": SOURCE_TYPE_LAW,
|
||||
"IFRS-Übernahmeverordnung": SOURCE_TYPE_LAW,
|
||||
"Österreichisches Datenschutzgesetz (DSG)": SOURCE_TYPE_LAW,
|
||||
"LOPDGDD - Ley Orgánica de Protección de Datos (Spanien)": SOURCE_TYPE_LAW,
|
||||
"Loi Informatique et Libertés (Frankreich)": SOURCE_TYPE_LAW,
|
||||
"Információs önrendelkezési jog törvény (Ungarn)": SOURCE_TYPE_LAW,
|
||||
"EU Blue Guide 2022": SOURCE_TYPE_LAW,
|
||||
|
||||
# --- EDPB/WP29 Leitlinien (offizielle Auslegungshilfe) ---
|
||||
"EDPB Leitlinien 01/2019 (Zertifizierung)": SOURCE_TYPE_GUIDELINE,
|
||||
"EDPB Leitlinien 01/2020 (Datentransfers)": SOURCE_TYPE_GUIDELINE,
|
||||
"EDPB Leitlinien 01/2020 (Vernetzte Fahrzeuge)": SOURCE_TYPE_GUIDELINE,
|
||||
"EDPB Leitlinien 01/2022 (BCR)": SOURCE_TYPE_GUIDELINE,
|
||||
"EDPB Leitlinien 01/2024 (Berechtigtes Interesse)": SOURCE_TYPE_GUIDELINE,
|
||||
"EDPB Leitlinien 04/2019 (Data Protection by Design)": SOURCE_TYPE_GUIDELINE,
|
||||
"EDPB Leitlinien 05/2020 - Einwilligung": SOURCE_TYPE_GUIDELINE,
|
||||
"EDPB Leitlinien 07/2020 (Datentransfers)": SOURCE_TYPE_GUIDELINE,
|
||||
"EDPB Leitlinien 08/2020 (Social Media)": SOURCE_TYPE_GUIDELINE,
|
||||
"EDPB Leitlinien 09/2022 (Data Breach)": SOURCE_TYPE_GUIDELINE,
|
||||
"EDPB Leitlinien 09/2022 - Meldung von Datenschutzverletzungen": SOURCE_TYPE_GUIDELINE,
|
||||
"EDPB Empfehlungen 01/2020 - Ergaenzende Massnahmen fuer Datentransfers": SOURCE_TYPE_GUIDELINE,
|
||||
"EDPB Leitlinien - Berechtigtes Interesse (Art. 6(1)(f))": SOURCE_TYPE_GUIDELINE,
|
||||
"WP244 Leitlinien (Profiling)": SOURCE_TYPE_GUIDELINE,
|
||||
"WP251 Leitlinien (Profiling)": SOURCE_TYPE_GUIDELINE,
|
||||
"WP260 Leitlinien (Transparenz)": SOURCE_TYPE_GUIDELINE,
|
||||
|
||||
# --- BSI Standards (behoerdliche technische Richtlinien) ---
|
||||
"BSI-TR-03161-1": SOURCE_TYPE_GUIDELINE,
|
||||
"BSI-TR-03161-2": SOURCE_TYPE_GUIDELINE,
|
||||
"BSI-TR-03161-3": SOURCE_TYPE_GUIDELINE,
|
||||
|
||||
# --- ENISA (EU-Agentur, aber Empfehlungen nicht rechtsverbindlich) ---
|
||||
"ENISA Cybersecurity State 2024": SOURCE_TYPE_FRAMEWORK,
|
||||
"ENISA ICS/SCADA Dependencies": SOURCE_TYPE_FRAMEWORK,
|
||||
"ENISA Supply Chain Good Practices": SOURCE_TYPE_FRAMEWORK,
|
||||
"ENISA Threat Landscape Supply Chain": SOURCE_TYPE_FRAMEWORK,
|
||||
|
||||
# --- NIST (US-Standards, international als Best Practice) ---
|
||||
"NIST AI Risk Management Framework": SOURCE_TYPE_FRAMEWORK,
|
||||
"NIST Cybersecurity Framework 2.0": SOURCE_TYPE_FRAMEWORK,
|
||||
"NIST SP 800-207 (Zero Trust)": SOURCE_TYPE_FRAMEWORK,
|
||||
"NIST SP 800-218 (SSDF)": SOURCE_TYPE_FRAMEWORK,
|
||||
"NIST SP 800-53 Rev. 5": SOURCE_TYPE_FRAMEWORK,
|
||||
"NIST SP 800-63-3": SOURCE_TYPE_FRAMEWORK,
|
||||
|
||||
# --- OWASP (Community-Standards) ---
|
||||
"OWASP API Security Top 10 (2023)": SOURCE_TYPE_FRAMEWORK,
|
||||
"OWASP ASVS 4.0": SOURCE_TYPE_FRAMEWORK,
|
||||
"OWASP MASVS 2.0": SOURCE_TYPE_FRAMEWORK,
|
||||
"OWASP SAMM 2.0": SOURCE_TYPE_FRAMEWORK,
|
||||
"OWASP Top 10 (2021)": SOURCE_TYPE_FRAMEWORK,
|
||||
|
||||
# --- Sonstige Frameworks ---
|
||||
"OECD KI-Empfehlung": SOURCE_TYPE_FRAMEWORK,
|
||||
"CISA Secure by Design": SOURCE_TYPE_FRAMEWORK,
|
||||
}
|
||||
|
||||
|
||||
def classify_source_regulation(source_regulation: str) -> str:
|
||||
"""
|
||||
Klassifiziert eine source_regulation als law, guideline oder framework.
|
||||
|
||||
Verwendet exaktes Matching gegen die Map. Bei unbekannten Quellen
|
||||
wird anhand von Schluesselwoertern geraten, Fallback ist 'framework'
|
||||
(konservativstes Ergebnis).
|
||||
"""
|
||||
if not source_regulation:
|
||||
return SOURCE_TYPE_FRAMEWORK
|
||||
|
||||
# Exaktes Match
|
||||
if source_regulation in SOURCE_REGULATION_CLASSIFICATION:
|
||||
return SOURCE_REGULATION_CLASSIFICATION[source_regulation]
|
||||
|
||||
# Heuristik fuer unbekannte Quellen
|
||||
lower = source_regulation.lower()
|
||||
|
||||
# Gesetze erkennen
|
||||
law_indicators = [
|
||||
"verordnung", "richtlinie", "gesetz", "directive", "regulation",
|
||||
"(eu)", "(eg)", "act", "ley", "loi", "törvény", "código",
|
||||
]
|
||||
if any(ind in lower for ind in law_indicators):
|
||||
return SOURCE_TYPE_LAW
|
||||
|
||||
# Leitlinien erkennen
|
||||
guideline_indicators = [
|
||||
"edpb", "leitlinie", "guideline", "wp2", "bsi", "empfehlung",
|
||||
]
|
||||
if any(ind in lower for ind in guideline_indicators):
|
||||
return SOURCE_TYPE_GUIDELINE
|
||||
|
||||
# Frameworks erkennen
|
||||
framework_indicators = [
|
||||
"enisa", "nist", "owasp", "oecd", "cisa", "framework", "iso",
|
||||
]
|
||||
if any(ind in lower for ind in framework_indicators):
|
||||
return SOURCE_TYPE_FRAMEWORK
|
||||
|
||||
# Konservativ: unbekannt = framework (geringste Verbindlichkeit)
|
||||
return SOURCE_TYPE_FRAMEWORK
|
||||
0
control-pipeline/db/__init__.py
Normal file
0
control-pipeline/db/__init__.py
Normal file
37
control-pipeline/db/session.py
Normal file
37
control-pipeline/db/session.py
Normal file
@@ -0,0 +1,37 @@
|
||||
"""Database session factory for control-pipeline.
|
||||
|
||||
Connects to the shared PostgreSQL with search_path set to compliance schema.
|
||||
"""
|
||||
|
||||
from sqlalchemy import create_engine, event
|
||||
from sqlalchemy.orm import sessionmaker
|
||||
|
||||
from config import settings
|
||||
|
||||
engine = create_engine(
|
||||
settings.DATABASE_URL,
|
||||
pool_pre_ping=True,
|
||||
pool_size=5,
|
||||
max_overflow=10,
|
||||
echo=False,
|
||||
)
|
||||
|
||||
|
||||
@event.listens_for(engine, "connect")
|
||||
def set_search_path(dbapi_connection, connection_record):
|
||||
cursor = dbapi_connection.cursor()
|
||||
cursor.execute(f"SET search_path TO {settings.SCHEMA_SEARCH_PATH}")
|
||||
cursor.close()
|
||||
dbapi_connection.commit()
|
||||
|
||||
|
||||
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
|
||||
|
||||
|
||||
def get_db():
|
||||
"""FastAPI dependency for DB sessions."""
|
||||
db = SessionLocal()
|
||||
try:
|
||||
yield db
|
||||
finally:
|
||||
db.close()
|
||||
88
control-pipeline/main.py
Normal file
88
control-pipeline/main.py
Normal file
@@ -0,0 +1,88 @@
|
||||
import logging
|
||||
from contextlib import asynccontextmanager
|
||||
|
||||
import uvicorn
|
||||
from fastapi import FastAPI
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
|
||||
from config import settings
|
||||
from db.session import engine
|
||||
|
||||
logging.basicConfig(
|
||||
level=getattr(logging, settings.LOG_LEVEL, logging.INFO),
|
||||
format="%(asctime)s [%(name)s] %(levelname)s: %(message)s",
|
||||
)
|
||||
logger = logging.getLogger("control-pipeline")
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def lifespan(app: FastAPI):
|
||||
"""Startup: verify DB and Qdrant connectivity."""
|
||||
logger.info("Control-Pipeline starting up ...")
|
||||
|
||||
# Verify database connection
|
||||
try:
|
||||
with engine.connect() as conn:
|
||||
conn.execute(__import__("sqlalchemy").text("SELECT 1"))
|
||||
logger.info("Database connection OK")
|
||||
except Exception as exc:
|
||||
logger.error("Database connection failed: %s", exc)
|
||||
|
||||
yield
|
||||
|
||||
logger.info("Control-Pipeline shutting down ...")
|
||||
|
||||
|
||||
app = FastAPI(
|
||||
title="BreakPilot Control Pipeline",
|
||||
description="Control generation, decomposition, and deduplication pipeline for the BreakPilot compliance platform.",
|
||||
version="1.0.0",
|
||||
lifespan=lifespan,
|
||||
)
|
||||
|
||||
# CORS
|
||||
app.add_middleware(
|
||||
CORSMiddleware,
|
||||
allow_origins=["*"],
|
||||
allow_credentials=True,
|
||||
allow_methods=["*"],
|
||||
allow_headers=["*"],
|
||||
)
|
||||
|
||||
# Routers
|
||||
from api import router as api_router # noqa: E402
|
||||
|
||||
app.include_router(api_router)
|
||||
|
||||
|
||||
# Health
|
||||
@app.get("/health")
|
||||
async def health():
|
||||
"""Liveness probe."""
|
||||
db_ok = False
|
||||
try:
|
||||
with engine.connect() as conn:
|
||||
conn.execute(__import__("sqlalchemy").text("SELECT 1"))
|
||||
db_ok = True
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
status = "healthy" if db_ok else "degraded"
|
||||
return {
|
||||
"status": status,
|
||||
"service": "control-pipeline",
|
||||
"version": "1.0.0",
|
||||
"dependencies": {
|
||||
"postgres": "ok" if db_ok else "unavailable",
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
uvicorn.run(
|
||||
"main:app",
|
||||
host="0.0.0.0",
|
||||
port=settings.PORT,
|
||||
reload=False,
|
||||
log_level="info",
|
||||
)
|
||||
22
control-pipeline/requirements.txt
Normal file
22
control-pipeline/requirements.txt
Normal file
@@ -0,0 +1,22 @@
|
||||
# Web Framework
|
||||
fastapi>=0.123.0
|
||||
uvicorn[standard]>=0.27.0
|
||||
|
||||
# Database
|
||||
SQLAlchemy>=2.0.36
|
||||
psycopg2-binary>=2.9.10
|
||||
|
||||
# HTTP Client
|
||||
httpx>=0.28.0
|
||||
|
||||
# Validation
|
||||
pydantic>=2.5.0
|
||||
|
||||
# AI - Anthropic Claude
|
||||
anthropic>=0.75.0
|
||||
|
||||
# Vector DB (dedup)
|
||||
qdrant-client>=1.7.0
|
||||
|
||||
# Auth
|
||||
python-jose[cryptography]>=3.3.0
|
||||
219
control-pipeline/scripts/import_backup.py
Normal file
219
control-pipeline/scripts/import_backup.py
Normal file
@@ -0,0 +1,219 @@
|
||||
"""
|
||||
Import compliance backup into local PostgreSQL.
|
||||
Fixes Python-style lists/dicts in JSONB fields to valid JSON.
|
||||
"""
|
||||
import ast
|
||||
import gzip
|
||||
import json
|
||||
import re
|
||||
import sys
|
||||
import psycopg2
|
||||
|
||||
DB_URL = "postgresql://breakpilot:breakpilot123@localhost:5432/breakpilot_db"
|
||||
BACKUP_PATH = "/tmp/compliance-db-2026-03-28_16-25-19.sql.gz"
|
||||
|
||||
# Tables with JSONB columns that need Python→JSON conversion
|
||||
JSONB_TABLES = {
|
||||
"canonical_controls",
|
||||
"canonical_controls_pre_dedup",
|
||||
"obligation_candidates",
|
||||
"control_dedup_reviews",
|
||||
"canonical_generation_jobs",
|
||||
"canonical_processed_chunks",
|
||||
}
|
||||
|
||||
|
||||
def fix_python_value(val: str) -> str:
|
||||
"""Convert Python repr to JSON string for JSONB fields."""
|
||||
if val == "NULL":
|
||||
return None
|
||||
# Strip outer SQL quotes
|
||||
if val.startswith("'") and val.endswith("'"):
|
||||
# Unescape SQL single quotes
|
||||
inner = val[1:-1].replace("''", "'")
|
||||
else:
|
||||
return val
|
||||
|
||||
# Try to parse as Python literal and convert to JSON
|
||||
try:
|
||||
obj = ast.literal_eval(inner)
|
||||
return json.dumps(obj, ensure_ascii=False)
|
||||
except (ValueError, SyntaxError):
|
||||
# Already valid JSON or plain string
|
||||
return inner
|
||||
|
||||
|
||||
def process_line(line: str, conn) -> bool:
|
||||
"""Process a single SQL line. Returns True if it was an INSERT."""
|
||||
line = line.strip()
|
||||
if not line.startswith("INSERT INTO"):
|
||||
if line.startswith("SET "):
|
||||
return False
|
||||
return False
|
||||
|
||||
# Execute directly for non-JSONB tables
|
||||
table_match = re.match(r'INSERT INTO "(\w+)"', line)
|
||||
if not table_match:
|
||||
return False
|
||||
table = table_match.group(1)
|
||||
|
||||
if table not in JSONB_TABLES:
|
||||
# Execute as-is
|
||||
try:
|
||||
with conn.cursor() as cur:
|
||||
cur.execute(line)
|
||||
return True
|
||||
except Exception as e:
|
||||
conn.rollback()
|
||||
return False
|
||||
|
||||
# For JSONB tables: use psycopg2 parameterized query
|
||||
# Extract column names and values
|
||||
cols_match = re.match(r'INSERT INTO "\w+" \(([^)]+)\) VALUES \(', line)
|
||||
if not cols_match:
|
||||
return False
|
||||
|
||||
col_names = [c.strip().strip('"') for c in cols_match.group(1).split(",")]
|
||||
|
||||
# Extract VALUES portion
|
||||
vals_start = line.index("VALUES (") + 8
|
||||
vals_str = line[vals_start:-2] # Remove trailing );
|
||||
|
||||
# Parse SQL values (handling nested quotes and parentheses)
|
||||
values = []
|
||||
current = ""
|
||||
in_quote = False
|
||||
depth = 0
|
||||
i = 0
|
||||
while i < len(vals_str):
|
||||
c = vals_str[i]
|
||||
if in_quote:
|
||||
if c == "'" and i + 1 < len(vals_str) and vals_str[i + 1] == "'":
|
||||
current += "''"
|
||||
i += 2
|
||||
continue
|
||||
elif c == "'":
|
||||
current += "'"
|
||||
in_quote = False
|
||||
else:
|
||||
current += c
|
||||
else:
|
||||
if c == "'":
|
||||
current += "'"
|
||||
in_quote = True
|
||||
elif c == "(" :
|
||||
depth += 1
|
||||
current += c
|
||||
elif c == ")":
|
||||
depth -= 1
|
||||
current += c
|
||||
elif c == "," and depth == 0:
|
||||
values.append(current.strip())
|
||||
current = ""
|
||||
else:
|
||||
current += c
|
||||
i += 1
|
||||
values.append(current.strip())
|
||||
|
||||
if len(values) != len(col_names):
|
||||
# Fallback: try direct execution
|
||||
try:
|
||||
with conn.cursor() as cur:
|
||||
cur.execute(line)
|
||||
return True
|
||||
except Exception:
|
||||
conn.rollback()
|
||||
return False
|
||||
|
||||
# Convert values
|
||||
params = []
|
||||
placeholders = []
|
||||
for col, val in zip(col_names, values):
|
||||
if val == "NULL":
|
||||
params.append(None)
|
||||
placeholders.append("%s")
|
||||
elif val in ("TRUE", "true"):
|
||||
params.append(True)
|
||||
placeholders.append("%s")
|
||||
elif val in ("FALSE", "false"):
|
||||
params.append(False)
|
||||
placeholders.append("%s")
|
||||
elif val.startswith("'") and val.endswith("'"):
|
||||
inner = val[1:-1].replace("''", "'")
|
||||
# Check if this looks like a Python literal (list/dict)
|
||||
stripped = inner.strip()
|
||||
if stripped and stripped[0] in ("[", "{") and stripped not in ("[]", "{}"):
|
||||
try:
|
||||
obj = ast.literal_eval(inner)
|
||||
params.append(json.dumps(obj, ensure_ascii=False))
|
||||
except (ValueError, SyntaxError):
|
||||
params.append(inner)
|
||||
else:
|
||||
params.append(inner)
|
||||
placeholders.append("%s")
|
||||
else:
|
||||
# Numeric or other
|
||||
try:
|
||||
if "." in val:
|
||||
params.append(float(val))
|
||||
else:
|
||||
params.append(int(val))
|
||||
except ValueError:
|
||||
params.append(val)
|
||||
placeholders.append("%s")
|
||||
|
||||
col_list = ", ".join(f'"{c}"' for c in col_names)
|
||||
ph_list = ", ".join(placeholders)
|
||||
sql = f'INSERT INTO "{table}" ({col_list}) VALUES ({ph_list})'
|
||||
|
||||
try:
|
||||
with conn.cursor() as cur:
|
||||
cur.execute(sql, params)
|
||||
return True
|
||||
except Exception as e:
|
||||
conn.rollback()
|
||||
if "duplicate key" not in str(e):
|
||||
print(f" ERROR [{table}]: {str(e)[:120]}", file=sys.stderr)
|
||||
return False
|
||||
|
||||
|
||||
def main():
|
||||
conn = psycopg2.connect(DB_URL)
|
||||
conn.autocommit = True
|
||||
|
||||
with conn.cursor() as cur:
|
||||
cur.execute("SET search_path TO compliance, public")
|
||||
|
||||
total = 0
|
||||
ok = 0
|
||||
errors = 0
|
||||
|
||||
print(f"Reading {BACKUP_PATH}...")
|
||||
with gzip.open(BACKUP_PATH, "rt", encoding="utf-8") as f:
|
||||
buffer = ""
|
||||
for line in f:
|
||||
buffer += line
|
||||
if not buffer.rstrip().endswith(";"):
|
||||
continue
|
||||
# Complete SQL statement
|
||||
stmt = buffer.strip()
|
||||
buffer = ""
|
||||
|
||||
if not stmt.startswith("INSERT"):
|
||||
continue
|
||||
|
||||
total += 1
|
||||
if process_line(stmt, conn):
|
||||
ok += 1
|
||||
else:
|
||||
errors += 1
|
||||
|
||||
if total % 10000 == 0:
|
||||
print(f" {total:>8} processed, {ok} ok, {errors} errors")
|
||||
|
||||
print(f"\nDONE: {total} total, {ok} ok, {errors} errors")
|
||||
conn.close()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
284
control-pipeline/scripts/ingest_bag_urteile.py
Normal file
284
control-pipeline/scripts/ingest_bag_urteile.py
Normal file
@@ -0,0 +1,284 @@
|
||||
"""Ingest BAG (Bundesarbeitsgericht) court decisions into RAG.
|
||||
|
||||
Downloads PDFs from bundesarbeitsgericht.de and uploads them to the
|
||||
bp_compliance_datenschutz Qdrant collection via the RAG-Service API.
|
||||
|
||||
These decisions are curated for IT/KI-Mitbestimmung relevance (§87 BetrVG).
|
||||
|
||||
Usage:
|
||||
python scripts/ingest_bag_urteile.py [--rag-url https://macmini:8097] [--dry-run]
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import tempfile
|
||||
import time
|
||||
|
||||
import httpx
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Curated BAG decisions for IT/AI works council co-determination
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
BAG_DECISIONS = [
|
||||
# --- M365 / Copilot / Standardsoftware ---
|
||||
{
|
||||
"url": "https://www.bundesarbeitsgericht.de/entscheidung/1-abr-20-21/",
|
||||
"case_number": "1 ABR 20/21",
|
||||
"date": "2022-03-08",
|
||||
"subject": "Microsoft Office 365 — Mitbestimmung",
|
||||
"keywords": ["Microsoft 365", "Standardsoftware", "Ueberwachung", "§87 BetrVG"],
|
||||
},
|
||||
{
|
||||
"url": "https://www.bundesarbeitsgericht.de/entscheidung/1-abn-36-18/",
|
||||
"case_number": "1 ABN 36/18",
|
||||
"date": "2018-10-23",
|
||||
"subject": "Excel / Standardsoftware — keine Geringfuegigkeitsschwelle",
|
||||
"keywords": ["Excel", "Standardsoftware", "Geringfuegigkeit", "§87 BetrVG"],
|
||||
},
|
||||
{
|
||||
"url": "https://www.bundesarbeitsgericht.de/entscheidung/1-abr-45-11/",
|
||||
"case_number": "1 ABR 45/11",
|
||||
"date": "2012-09-25",
|
||||
"subject": "SAP ERP im Personalwesen",
|
||||
"keywords": ["SAP", "ERP", "Personalwesen", "Verhaltenskontrolle", "§87 BetrVG"],
|
||||
},
|
||||
{
|
||||
"url": "https://www.bundesarbeitsgericht.de/entscheidung/1-abr-31-19/",
|
||||
"case_number": "1 ABR 31/19",
|
||||
"date": "2021-01-27",
|
||||
"subject": "E-Mail-Kommunikationssoftware — Mitbestimmung",
|
||||
"keywords": ["E-Mail", "Kommunikation", "Software", "§87 BetrVG"],
|
||||
},
|
||||
{
|
||||
"url": "https://www.bundesarbeitsgericht.de/entscheidung/1-abr-13-17/",
|
||||
"case_number": "1 ABR 13/17",
|
||||
"date": "2019-07-09",
|
||||
"subject": "IT-System fuer Mitarbeiterbefragung",
|
||||
"keywords": ["Mitarbeiterbefragung", "Feedback", "technische Einrichtung", "§87 BetrVG"],
|
||||
},
|
||||
{
|
||||
"url": "https://www.bundesarbeitsgericht.de/entscheidung/1-abr-16-23/",
|
||||
"case_number": "1 ABR 16/23",
|
||||
"date": "2024-07-16",
|
||||
"subject": "Headset-System — Geraetenutzungsdaten",
|
||||
"keywords": ["Headset", "Geraetenutzung", "Ueberwachung", "§87 BetrVG"],
|
||||
},
|
||||
# --- Ueberwachung, Social, Drittplattformen ---
|
||||
{
|
||||
"url": "https://www.bundesarbeitsgericht.de/entscheidung/1-abr-7-15/",
|
||||
"case_number": "1 ABR 7/15",
|
||||
"date": "2016-12-13",
|
||||
"subject": "Facebook-Seite — indirekte Leistungsueberwachung",
|
||||
"keywords": ["Facebook", "Social Media", "Besucherbeitraege", "Ueberwachung", "§87 BetrVG"],
|
||||
},
|
||||
{
|
||||
"url": "https://www.bundesarbeitsgericht.de/entscheidung/1-abr-43-12/",
|
||||
"case_number": "1 ABR 43/12",
|
||||
"date": "2013-12-10",
|
||||
"subject": "Google Maps — indirekte Ueberwachung / Definition Ueberwachung",
|
||||
"keywords": ["Google Maps", "Routenplaner", "indirekte Ueberwachung", "Definition", "§87 BetrVG"],
|
||||
},
|
||||
{
|
||||
"url": "https://www.bundesarbeitsgericht.de/entscheidung/1-abr-68-13/",
|
||||
"case_number": "1 ABR 68/13",
|
||||
"date": "2015-07-21",
|
||||
"subject": "Ueberwachung durch technische Einrichtung eines Dritten (SaaS/Cloud)",
|
||||
"keywords": ["Drittsystem", "SaaS", "Cloud", "Ueberwachung", "§87 BetrVG"],
|
||||
},
|
||||
# --- Video, Belastung, Leistungskennzahlen ---
|
||||
{
|
||||
"url": "https://www.bundesarbeitsgericht.de/entscheidung/1-abr-78-11/",
|
||||
"case_number": "1 ABR 78/11",
|
||||
"date": "2012-12-11",
|
||||
"subject": "Videoueberwachung — Grundsatzentscheidung",
|
||||
"keywords": ["Videoueberwachung", "Kamera", "Arbeitsplatz", "§87 BetrVG"],
|
||||
},
|
||||
{
|
||||
"url": "https://www.bundesarbeitsgericht.de/entscheidung/1-abr-46-15/",
|
||||
"case_number": "1 ABR 46/15",
|
||||
"date": "2017-04-25",
|
||||
"subject": "Belastungsstatistik — dauerhafte Kennzahlenueberwachung",
|
||||
"keywords": ["Belastungsstatistik", "Kennzahlen", "Analytics", "Persoenlichkeitsrecht", "§87 BetrVG"],
|
||||
},
|
||||
# --- Negative / abgrenzende Faelle ---
|
||||
{
|
||||
"url": "https://www.bundesarbeitsgericht.de/entscheidung/1-abr-32-16/",
|
||||
"case_number": "1 ABR 32/16",
|
||||
"date": "2017-12-19",
|
||||
"subject": "Anti-Terror-Listen — keine Mitbestimmung",
|
||||
"keywords": ["Anti-Terror", "Sanktionsliste", "keine Mitbestimmung", "Abgrenzung", "§87 BetrVG"],
|
||||
},
|
||||
{
|
||||
"url": "https://www.bundesarbeitsgericht.de/entscheidung/1-abr-22-21/",
|
||||
"case_number": "1 ABR 22/21",
|
||||
"date": "2022-09-13",
|
||||
"subject": "Elektronische Arbeitszeiterfassung — Initiativrecht",
|
||||
"keywords": ["Arbeitszeiterfassung", "Initiativrecht", "digitale Systeme", "§87 BetrVG"],
|
||||
},
|
||||
# --- Historische Grundsatzentscheidungen ---
|
||||
{
|
||||
"url": "https://www.bundesarbeitsgericht.de/entscheidung/1-abr-43-81/",
|
||||
"case_number": "1 ABR 43/81",
|
||||
"date": "1983-12-06",
|
||||
"subject": "Grundsatz technische Ueberwachung — Eignung genuegt",
|
||||
"keywords": ["Grundsatz", "Eignung", "technische Einrichtung", "§87 BetrVG"],
|
||||
},
|
||||
{
|
||||
"url": "https://www.bundesarbeitsgericht.de/entscheidung/1-abr-23-82/",
|
||||
"case_number": "1 ABR 23/82",
|
||||
"date": "1984-09-14",
|
||||
"subject": "Erste Grundlinie IT-Systeme",
|
||||
"keywords": ["IT-System", "Grundlinie", "technische Einrichtung", "§87 BetrVG"],
|
||||
},
|
||||
# --- E-Mail / Internet ---
|
||||
{
|
||||
"url": "https://www.bundesarbeitsgericht.de/entscheidung/1-abr-46-10/",
|
||||
"case_number": "1 ABR 46/10",
|
||||
"date": "2012-02-07",
|
||||
"subject": "Internet- und E-Mail-Nutzung — Kommunikationsdaten",
|
||||
"keywords": ["Internet", "E-Mail", "Kommunikationsdaten", "Auswertung", "§87 BetrVG"],
|
||||
},
|
||||
# --- HR / Bewertungssysteme ---
|
||||
{
|
||||
"url": "https://www.bundesarbeitsgericht.de/entscheidung/1-abr-40-07/",
|
||||
"case_number": "1 ABR 40/07",
|
||||
"date": "2008-07-22",
|
||||
"subject": "Beurteilungssysteme — §94/§95 BetrVG",
|
||||
"keywords": ["Beurteilung", "Bewertungssystem", "HR", "§94 BetrVG", "§95 BetrVG"],
|
||||
},
|
||||
{
|
||||
"url": "https://www.bundesarbeitsgericht.de/entscheidung/1-abr-16-07/",
|
||||
"case_number": "1 ABR 16/07",
|
||||
"date": "2008-03-18",
|
||||
"subject": "Personalfrageboegen — Bewertung",
|
||||
"keywords": ["Personalfragebogen", "Bewertung", "HR-Tools", "§94 BetrVG"],
|
||||
},
|
||||
# --- Video / physische Ueberwachung ---
|
||||
{
|
||||
"url": "https://www.bundesarbeitsgericht.de/entscheidung/1-abr-21-03/",
|
||||
"case_number": "1 ABR 21/03",
|
||||
"date": "2004-06-29",
|
||||
"subject": "Videoueberwachung Arbeitsplatz",
|
||||
"keywords": ["Video", "Kamera", "Arbeitsplatz", "Ueberwachung", "§87 BetrVG"],
|
||||
},
|
||||
# --- Zustaendigkeit ---
|
||||
{
|
||||
"url": "https://www.bundesarbeitsgericht.de/entscheidung/1-abr-2-05/",
|
||||
"case_number": "1 ABR 2/05",
|
||||
"date": "2006-05-03",
|
||||
"subject": "Zustaendigkeit Betriebsrat bei konzernweiten Tools",
|
||||
"keywords": ["Zustaendigkeit", "Konzern", "Gesamtbetriebsrat", "§87 BetrVG"],
|
||||
},
|
||||
{
|
||||
"url": "https://www.bundesarbeitsgericht.de/entscheidung/1-abr-58-04/",
|
||||
"case_number": "1 ABR 58/04",
|
||||
"date": "2006-03-28",
|
||||
"subject": "Mitbestimmung bei Einfuehrung technischer Systeme",
|
||||
"keywords": ["Systemeinführung", "technische Systeme", "Mitbestimmung", "§87 BetrVG"],
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
def normalize_case_number(case_number: str) -> str:
|
||||
"""Normalize case number for use as regulation_id."""
|
||||
return re.sub(r"[^a-z0-9]", "_", case_number.lower()).strip("_")
|
||||
|
||||
|
||||
def download_decision(url: str, client: httpx.Client) -> bytes:
|
||||
"""Download a BAG decision page as HTML."""
|
||||
resp = client.get(url, follow_redirects=True)
|
||||
resp.raise_for_status()
|
||||
return resp.content
|
||||
|
||||
|
||||
def upload_to_rag(
|
||||
file_bytes: bytes,
|
||||
filename: str,
|
||||
metadata: dict,
|
||||
rag_url: str,
|
||||
client: httpx.Client,
|
||||
) -> dict:
|
||||
"""Upload a document to the RAG service."""
|
||||
files = {"file": (filename, file_bytes, "text/html")}
|
||||
data = {
|
||||
"collection": "bp_compliance_datenschutz",
|
||||
"data_type": "compliance_datenschutz",
|
||||
"bundesland": "bund",
|
||||
"use_case": "court_decision",
|
||||
"year": metadata.get("date", "2024")[:4],
|
||||
"chunk_strategy": "legal",
|
||||
"chunk_size": "512",
|
||||
"chunk_overlap": "50",
|
||||
"metadata_json": json.dumps(metadata),
|
||||
}
|
||||
resp = client.post(f"{rag_url}/api/v1/documents/upload", files=files, data=data)
|
||||
resp.raise_for_status()
|
||||
return resp.json()
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Ingest BAG court decisions into RAG")
|
||||
parser.add_argument("--rag-url", default="https://macmini:8097", help="RAG service URL")
|
||||
parser.add_argument("--dry-run", action="store_true", help="Download only, don't upload")
|
||||
args = parser.parse_args()
|
||||
|
||||
client = httpx.Client(timeout=60, verify=False)
|
||||
stats = {"downloaded": 0, "uploaded": 0, "errors": 0}
|
||||
|
||||
for decision in BAG_DECISIONS:
|
||||
case_id = normalize_case_number(decision["case_number"])
|
||||
print(f"\n--- {decision['case_number']}: {decision['subject']} ---")
|
||||
|
||||
# Download
|
||||
try:
|
||||
html_bytes = download_decision(decision["url"], client)
|
||||
stats["downloaded"] += 1
|
||||
print(f" Downloaded: {len(html_bytes)} bytes")
|
||||
except Exception as e:
|
||||
print(f" ERROR downloading: {e}")
|
||||
stats["errors"] += 1
|
||||
continue
|
||||
|
||||
if args.dry_run:
|
||||
continue
|
||||
|
||||
# Upload
|
||||
metadata = {
|
||||
"regulation_id": f"bag_{case_id}",
|
||||
"regulation_name_de": f"BAG {decision['case_number']} — {decision['subject']}",
|
||||
"category": "arbeitsrecht",
|
||||
"source": "bundesarbeitsgericht.de",
|
||||
"doc_type": "court_decision",
|
||||
"license": "public_domain_§5_UrhG",
|
||||
"court": "BAG",
|
||||
"case_number": decision["case_number"],
|
||||
"date": decision["date"],
|
||||
"subject_matter": decision["subject"],
|
||||
"keywords": decision["keywords"],
|
||||
}
|
||||
|
||||
try:
|
||||
result = upload_to_rag(
|
||||
html_bytes,
|
||||
f"bag_{case_id}.html",
|
||||
metadata,
|
||||
args.rag_url,
|
||||
client,
|
||||
)
|
||||
stats["uploaded"] += 1
|
||||
print(f" Uploaded: {result.get('chunks_count', '?')} chunks, doc_id={result.get('document_id', '?')}")
|
||||
except Exception as e:
|
||||
print(f" ERROR uploading: {e}")
|
||||
stats["errors"] += 1
|
||||
|
||||
time.sleep(1) # Rate limiting
|
||||
|
||||
print(f"\n=== Done: {stats['downloaded']} downloaded, {stats['uploaded']} uploaded, {stats['errors']} errors ===")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
0
control-pipeline/services/__init__.py
Normal file
0
control-pipeline/services/__init__.py
Normal file
187
control-pipeline/services/anchor_finder.py
Normal file
187
control-pipeline/services/anchor_finder.py
Normal file
@@ -0,0 +1,187 @@
|
||||
"""
|
||||
Anchor Finder — finds open-source references (OWASP, NIST, ENISA) for controls.
|
||||
|
||||
Two-stage search:
|
||||
Stage A: RAG-internal search for open-source chunks matching the control topic
|
||||
Stage B: Web search via DuckDuckGo Instant Answer API (no API key needed)
|
||||
|
||||
Only open-source references (Rule 1+2) are accepted as anchors.
|
||||
"""
|
||||
|
||||
import logging
|
||||
from dataclasses import dataclass
|
||||
from typing import List, Optional
|
||||
|
||||
import httpx
|
||||
|
||||
from .rag_client import ComplianceRAGClient, get_rag_client
|
||||
from .control_generator import (
|
||||
GeneratedControl,
|
||||
REGULATION_LICENSE_MAP,
|
||||
_RULE2_PREFIXES,
|
||||
_RULE3_PREFIXES,
|
||||
_classify_regulation,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Regulation codes that are safe to reference as open anchors (Rule 1+2)
|
||||
_OPEN_SOURCE_RULES = {1, 2}
|
||||
|
||||
|
||||
@dataclass
|
||||
class OpenAnchor:
|
||||
framework: str
|
||||
ref: str
|
||||
url: str
|
||||
|
||||
|
||||
class AnchorFinder:
|
||||
"""Finds open-source references to anchor generated controls."""
|
||||
|
||||
def __init__(self, rag_client: Optional[ComplianceRAGClient] = None):
|
||||
self.rag = rag_client or get_rag_client()
|
||||
|
||||
async def find_anchors(
|
||||
self,
|
||||
control: GeneratedControl,
|
||||
skip_web: bool = False,
|
||||
min_anchors: int = 2,
|
||||
) -> List[OpenAnchor]:
|
||||
"""Find open-source anchors for a control."""
|
||||
# Stage A: RAG-internal search
|
||||
anchors = await self._search_rag_for_open_anchors(control)
|
||||
|
||||
# Stage B: Web search if not enough anchors
|
||||
if len(anchors) < min_anchors and not skip_web:
|
||||
web_anchors = await self._search_web(control)
|
||||
# Deduplicate by framework+ref
|
||||
existing_keys = {(a.framework, a.ref) for a in anchors}
|
||||
for wa in web_anchors:
|
||||
if (wa.framework, wa.ref) not in existing_keys:
|
||||
anchors.append(wa)
|
||||
|
||||
return anchors
|
||||
|
||||
async def _search_rag_for_open_anchors(self, control: GeneratedControl) -> List[OpenAnchor]:
|
||||
"""Search RAG for chunks from open sources matching the control topic."""
|
||||
# Build search query from control title + first 3 tags
|
||||
tags_str = " ".join(control.tags[:3]) if control.tags else ""
|
||||
query = f"{control.title} {tags_str}".strip()
|
||||
|
||||
results = await self.rag.search_with_rerank(
|
||||
query=query,
|
||||
collection="bp_compliance_ce",
|
||||
top_k=15,
|
||||
)
|
||||
|
||||
anchors: List[OpenAnchor] = []
|
||||
seen: set[str] = set()
|
||||
|
||||
for r in results:
|
||||
if not r.regulation_code:
|
||||
continue
|
||||
|
||||
# Only accept open-source references
|
||||
license_info = _classify_regulation(r.regulation_code)
|
||||
if license_info.get("rule") not in _OPEN_SOURCE_RULES:
|
||||
continue
|
||||
|
||||
# Build reference key for dedup
|
||||
ref = r.article or r.category or ""
|
||||
key = f"{r.regulation_code}:{ref}"
|
||||
if key in seen:
|
||||
continue
|
||||
seen.add(key)
|
||||
|
||||
framework_name = license_info.get("name", r.regulation_name or r.regulation_short or r.regulation_code)
|
||||
url = r.source_url or self._build_reference_url(r.regulation_code, ref)
|
||||
|
||||
anchors.append(OpenAnchor(
|
||||
framework=framework_name,
|
||||
ref=ref,
|
||||
url=url,
|
||||
))
|
||||
|
||||
if len(anchors) >= 5:
|
||||
break
|
||||
|
||||
return anchors
|
||||
|
||||
async def _search_web(self, control: GeneratedControl) -> List[OpenAnchor]:
|
||||
"""Search DuckDuckGo Instant Answer API for open references."""
|
||||
keywords = f"{control.title} security control OWASP NIST"
|
||||
try:
|
||||
async with httpx.AsyncClient(timeout=10.0) as client:
|
||||
resp = await client.get(
|
||||
"https://api.duckduckgo.com/",
|
||||
params={
|
||||
"q": keywords,
|
||||
"format": "json",
|
||||
"no_html": "1",
|
||||
"skip_disambig": "1",
|
||||
},
|
||||
)
|
||||
if resp.status_code != 200:
|
||||
return []
|
||||
|
||||
data = resp.json()
|
||||
anchors: List[OpenAnchor] = []
|
||||
|
||||
# Parse RelatedTopics
|
||||
for topic in data.get("RelatedTopics", [])[:10]:
|
||||
url = topic.get("FirstURL", "")
|
||||
text = topic.get("Text", "")
|
||||
|
||||
if not url:
|
||||
continue
|
||||
|
||||
# Only accept known open-source domains
|
||||
framework = self._identify_framework_from_url(url)
|
||||
if framework:
|
||||
anchors.append(OpenAnchor(
|
||||
framework=framework,
|
||||
ref=text[:100] if text else url,
|
||||
url=url,
|
||||
))
|
||||
|
||||
if len(anchors) >= 3:
|
||||
break
|
||||
|
||||
return anchors
|
||||
|
||||
except Exception as e:
|
||||
logger.warning("Web anchor search failed: %s", e)
|
||||
return []
|
||||
|
||||
@staticmethod
|
||||
def _identify_framework_from_url(url: str) -> Optional[str]:
|
||||
"""Identify if a URL belongs to a known open-source framework."""
|
||||
url_lower = url.lower()
|
||||
if "owasp.org" in url_lower:
|
||||
return "OWASP"
|
||||
if "nist.gov" in url_lower or "csrc.nist.gov" in url_lower:
|
||||
return "NIST"
|
||||
if "enisa.europa.eu" in url_lower:
|
||||
return "ENISA"
|
||||
if "cisa.gov" in url_lower:
|
||||
return "CISA"
|
||||
if "eur-lex.europa.eu" in url_lower:
|
||||
return "EU Law"
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def _build_reference_url(regulation_code: str, ref: str) -> str:
|
||||
"""Build a reference URL for known frameworks."""
|
||||
code = regulation_code.lower()
|
||||
if code.startswith("owasp"):
|
||||
return "https://owasp.org/www-project-application-security-verification-standard/"
|
||||
if code.startswith("nist"):
|
||||
return "https://csrc.nist.gov/publications"
|
||||
if code.startswith("enisa"):
|
||||
return "https://www.enisa.europa.eu/publications"
|
||||
if code.startswith("eu_"):
|
||||
return "https://eur-lex.europa.eu/"
|
||||
if code == "cisa_secure_by_design":
|
||||
return "https://www.cisa.gov/securebydesign"
|
||||
return ""
|
||||
245
control-pipeline/services/applicability_engine.py
Normal file
245
control-pipeline/services/applicability_engine.py
Normal file
@@ -0,0 +1,245 @@
|
||||
"""
|
||||
Applicability Engine -- filters controls based on company profile + scope answers.
|
||||
|
||||
Deterministic, no LLM needed. Implements Scoped Control Applicability (Phase C2).
|
||||
|
||||
Filtering logic:
|
||||
- Controls with NULL applicability fields are INCLUDED (apply to everyone).
|
||||
- Controls with '["all"]' match all queries.
|
||||
- Industry: control applies if its applicable_industries contains the requested
|
||||
industry OR contains "all" OR is NULL.
|
||||
- Company size: control applies if its applicable_company_size contains the
|
||||
requested size OR contains "all" OR is NULL.
|
||||
- Scope signals: control applies if it has NO scope_conditions, or the company
|
||||
has at least one of the required signals (requires_any logic).
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
from typing import Any, Optional
|
||||
|
||||
from sqlalchemy import text
|
||||
|
||||
from db.session import SessionLocal
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Valid company sizes (ordered smallest to largest)
|
||||
VALID_SIZES = ("micro", "small", "medium", "large", "enterprise")
|
||||
|
||||
|
||||
def _parse_json_text(value: Any) -> Any:
|
||||
"""Parse a TEXT column that stores JSON. Returns None if unparseable."""
|
||||
if value is None:
|
||||
return None
|
||||
if isinstance(value, (list, dict)):
|
||||
return value
|
||||
if isinstance(value, str):
|
||||
try:
|
||||
return json.loads(value)
|
||||
except (json.JSONDecodeError, ValueError):
|
||||
return None
|
||||
return None
|
||||
|
||||
|
||||
def _matches_industry(applicable_industries_raw: Any, industry: str) -> bool:
|
||||
"""Check if a control's applicable_industries matches the requested industry."""
|
||||
industries = _parse_json_text(applicable_industries_raw)
|
||||
if industries is None:
|
||||
return True # NULL = applies to everyone
|
||||
if not isinstance(industries, list):
|
||||
return True # malformed = include
|
||||
if "all" in industries:
|
||||
return True
|
||||
return industry in industries
|
||||
|
||||
|
||||
def _matches_company_size(applicable_company_size_raw: Any, company_size: str) -> bool:
|
||||
"""Check if a control's applicable_company_size matches the requested size."""
|
||||
sizes = _parse_json_text(applicable_company_size_raw)
|
||||
if sizes is None:
|
||||
return True # NULL = applies to everyone
|
||||
if not isinstance(sizes, list):
|
||||
return True # malformed = include
|
||||
if "all" in sizes:
|
||||
return True
|
||||
return company_size in sizes
|
||||
|
||||
|
||||
def _matches_scope_signals(
|
||||
scope_conditions_raw: Any, scope_signals: list[str]
|
||||
) -> bool:
|
||||
"""Check if a control's scope_conditions are satisfied by the given signals.
|
||||
|
||||
A control with scope_conditions = {"requires_any": ["uses_ai", "processes_health_data"]}
|
||||
matches if the company has at least one of those signals.
|
||||
A control with NULL or empty scope_conditions always matches.
|
||||
"""
|
||||
conditions = _parse_json_text(scope_conditions_raw)
|
||||
if conditions is None:
|
||||
return True # no conditions = applies to everyone
|
||||
if not isinstance(conditions, dict):
|
||||
return True # malformed = include
|
||||
|
||||
requires_any = conditions.get("requires_any", [])
|
||||
if not requires_any:
|
||||
return True # no required signals = applies to everyone
|
||||
|
||||
# Company must have at least one of the required signals
|
||||
return bool(set(requires_any) & set(scope_signals))
|
||||
|
||||
|
||||
def get_applicable_controls(
|
||||
db,
|
||||
industry: Optional[str] = None,
|
||||
company_size: Optional[str] = None,
|
||||
scope_signals: Optional[list[str]] = None,
|
||||
limit: int = 100,
|
||||
offset: int = 0,
|
||||
) -> dict[str, Any]:
|
||||
"""
|
||||
Returns controls applicable to the given company profile.
|
||||
|
||||
Uses SQL pre-filtering with LIKE for performance, then Python post-filtering
|
||||
for precise JSON matching (since columns are TEXT, not JSONB).
|
||||
|
||||
Args:
|
||||
db: SQLAlchemy session
|
||||
industry: e.g. "Telekommunikation", "Energie", "Gesundheitswesen"
|
||||
company_size: e.g. "medium", "large", "enterprise"
|
||||
scope_signals: e.g. ["uses_ai", "third_country_transfer"]
|
||||
limit: max results to return (applied after filtering)
|
||||
offset: pagination offset (applied after filtering)
|
||||
|
||||
Returns:
|
||||
dict with total_applicable count, paginated controls, and breakdown stats
|
||||
"""
|
||||
if scope_signals is None:
|
||||
scope_signals = []
|
||||
|
||||
# SQL pre-filter: broad match to reduce Python-side filtering
|
||||
query = """
|
||||
SELECT id, framework_id, control_id, title, objective, rationale,
|
||||
scope, requirements, test_procedure, evidence,
|
||||
severity, risk_score, implementation_effort,
|
||||
evidence_confidence, open_anchors, release_state, tags,
|
||||
license_rule, source_original_text, source_citation,
|
||||
customer_visible, verification_method, category, evidence_type,
|
||||
target_audience, generation_metadata, generation_strategy,
|
||||
applicable_industries, applicable_company_size, scope_conditions,
|
||||
parent_control_uuid, decomposition_method, pipeline_version,
|
||||
created_at, updated_at
|
||||
FROM canonical_controls
|
||||
WHERE release_state NOT IN ('duplicate', 'deprecated', 'rejected')
|
||||
"""
|
||||
params: dict[str, Any] = {}
|
||||
|
||||
# SQL-level pre-filtering (broad, may include false positives)
|
||||
if industry:
|
||||
query += """ AND (applicable_industries IS NULL
|
||||
OR applicable_industries LIKE '%"all"%'
|
||||
OR applicable_industries LIKE '%' || :industry || '%')"""
|
||||
params["industry"] = industry
|
||||
|
||||
if company_size:
|
||||
query += """ AND (applicable_company_size IS NULL
|
||||
OR applicable_company_size LIKE '%"all"%'
|
||||
OR applicable_company_size LIKE '%' || :company_size || '%')"""
|
||||
params["company_size"] = company_size
|
||||
|
||||
# For scope_signals we cannot do precise SQL filtering on requires_any,
|
||||
# but we can at least exclude controls whose scope_conditions text
|
||||
# does not contain any of the requested signals (if only 1 signal).
|
||||
# With multiple signals we skip SQL pre-filter and do it in Python.
|
||||
if scope_signals and len(scope_signals) == 1:
|
||||
query += """ AND (scope_conditions IS NULL
|
||||
OR scope_conditions LIKE '%' || :scope_sig || '%')"""
|
||||
params["scope_sig"] = scope_signals[0]
|
||||
|
||||
query += " ORDER BY control_id"
|
||||
|
||||
rows = db.execute(text(query), params).fetchall()
|
||||
|
||||
# Python-level precise filtering
|
||||
applicable = []
|
||||
for r in rows:
|
||||
if industry and not _matches_industry(r.applicable_industries, industry):
|
||||
continue
|
||||
if company_size and not _matches_company_size(
|
||||
r.applicable_company_size, company_size
|
||||
):
|
||||
continue
|
||||
if scope_signals and not _matches_scope_signals(
|
||||
r.scope_conditions, scope_signals
|
||||
):
|
||||
continue
|
||||
applicable.append(r)
|
||||
|
||||
total_applicable = len(applicable)
|
||||
|
||||
# Apply pagination
|
||||
paginated = applicable[offset : offset + limit]
|
||||
|
||||
# Build domain breakdown
|
||||
domain_counts: dict[str, int] = {}
|
||||
for r in applicable:
|
||||
domain = r.control_id.split("-")[0].upper() if r.control_id else "UNKNOWN"
|
||||
domain_counts[domain] = domain_counts.get(domain, 0) + 1
|
||||
|
||||
# Build severity breakdown
|
||||
severity_counts: dict[str, int] = {}
|
||||
for r in applicable:
|
||||
sev = r.severity or "unknown"
|
||||
severity_counts[sev] = severity_counts.get(sev, 0) + 1
|
||||
|
||||
# Build industry breakdown (from matched controls)
|
||||
industry_counts: dict[str, int] = {}
|
||||
for r in applicable:
|
||||
industries = _parse_json_text(r.applicable_industries)
|
||||
if isinstance(industries, list):
|
||||
for ind in industries:
|
||||
industry_counts[ind] = industry_counts.get(ind, 0) + 1
|
||||
else:
|
||||
industry_counts["unclassified"] = (
|
||||
industry_counts.get("unclassified", 0) + 1
|
||||
)
|
||||
|
||||
return {
|
||||
"total_applicable": total_applicable,
|
||||
"limit": limit,
|
||||
"offset": offset,
|
||||
"controls": [_row_to_control(r) for r in paginated],
|
||||
"breakdown": {
|
||||
"by_domain": domain_counts,
|
||||
"by_severity": severity_counts,
|
||||
"by_industry": industry_counts,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def _row_to_control(r) -> dict[str, Any]:
|
||||
"""Convert a DB row to a control dict for API response."""
|
||||
return {
|
||||
"id": str(r.id),
|
||||
"framework_id": str(r.framework_id),
|
||||
"control_id": r.control_id,
|
||||
"title": r.title,
|
||||
"objective": r.objective,
|
||||
"rationale": r.rationale,
|
||||
"severity": r.severity,
|
||||
"category": r.category,
|
||||
"verification_method": r.verification_method,
|
||||
"evidence_type": getattr(r, "evidence_type", None),
|
||||
"target_audience": r.target_audience,
|
||||
"applicable_industries": r.applicable_industries,
|
||||
"applicable_company_size": r.applicable_company_size,
|
||||
"scope_conditions": r.scope_conditions,
|
||||
"release_state": r.release_state,
|
||||
"control_id_domain": (
|
||||
r.control_id.split("-")[0].upper() if r.control_id else None
|
||||
),
|
||||
"created_at": r.created_at.isoformat() if r.created_at else None,
|
||||
"updated_at": r.updated_at.isoformat() if r.updated_at else None,
|
||||
}
|
||||
631
control-pipeline/services/batch_dedup_runner.py
Normal file
631
control-pipeline/services/batch_dedup_runner.py
Normal file
@@ -0,0 +1,631 @@
|
||||
"""Batch Dedup Runner — Orchestrates deduplication of ~85k atomare Controls.
|
||||
|
||||
Reduces Pass 0b controls from ~85k to ~18-25k unique Master Controls via:
|
||||
Phase 1: Intra-Group Dedup — same merge_group_hint → pick best, link rest
|
||||
(85k → ~52k, mostly title-identical short-circuit, no embeddings)
|
||||
Phase 2: Cross-Group Dedup — embed masters, search Qdrant for similar
|
||||
masters with different hints (52k → ~18-25k)
|
||||
|
||||
All Pass 0b controls have pattern_id=NULL. The primary grouping key is
|
||||
merge_group_hint (format: "action_type:norm_obj:trigger_key"), which
|
||||
encodes the normalized action, object, and trigger.
|
||||
|
||||
Usage:
|
||||
runner = BatchDedupRunner(db)
|
||||
stats = await runner.run(dry_run=True) # preview
|
||||
stats = await runner.run(dry_run=False) # execute
|
||||
stats = await runner.run(hint_filter="implement:multi_factor_auth:none")
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
import time
|
||||
from collections import defaultdict
|
||||
|
||||
from sqlalchemy import text
|
||||
|
||||
from services.control_dedup import (
|
||||
canonicalize_text,
|
||||
ensure_qdrant_collection,
|
||||
get_embedding,
|
||||
normalize_action,
|
||||
normalize_object,
|
||||
qdrant_search_cross_regulation,
|
||||
qdrant_upsert,
|
||||
LINK_THRESHOLD,
|
||||
REVIEW_THRESHOLD,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
DEDUP_COLLECTION = "atomic_controls_dedup"
|
||||
|
||||
|
||||
# ── Quality Score ────────────────────────────────────────────────────────
|
||||
|
||||
|
||||
def quality_score(control: dict) -> float:
|
||||
"""Score a control by richness of requirements, tests, evidence, and objective.
|
||||
|
||||
Higher score = better candidate for master control.
|
||||
"""
|
||||
score = 0.0
|
||||
|
||||
reqs = control.get("requirements") or "[]"
|
||||
if isinstance(reqs, str):
|
||||
try:
|
||||
reqs = json.loads(reqs)
|
||||
except (json.JSONDecodeError, TypeError):
|
||||
reqs = []
|
||||
score += len(reqs) * 2.0
|
||||
|
||||
tests = control.get("test_procedure") or "[]"
|
||||
if isinstance(tests, str):
|
||||
try:
|
||||
tests = json.loads(tests)
|
||||
except (json.JSONDecodeError, TypeError):
|
||||
tests = []
|
||||
score += len(tests) * 1.5
|
||||
|
||||
evidence = control.get("evidence") or "[]"
|
||||
if isinstance(evidence, str):
|
||||
try:
|
||||
evidence = json.loads(evidence)
|
||||
except (json.JSONDecodeError, TypeError):
|
||||
evidence = []
|
||||
score += len(evidence) * 1.0
|
||||
|
||||
objective = control.get("objective") or ""
|
||||
score += min(len(objective) / 200, 3.0)
|
||||
|
||||
return score
|
||||
|
||||
|
||||
# ── Batch Dedup Runner ───────────────────────────────────────────────────
|
||||
|
||||
|
||||
class BatchDedupRunner:
|
||||
"""Batch dedup orchestrator for existing Pass 0b atomic controls."""
|
||||
|
||||
def __init__(self, db, collection: str = DEDUP_COLLECTION):
|
||||
self.db = db
|
||||
self.collection = collection
|
||||
self.stats = {
|
||||
"total_controls": 0,
|
||||
"unique_hints": 0,
|
||||
"phase1_groups_processed": 0,
|
||||
"masters": 0,
|
||||
"linked": 0,
|
||||
"review": 0,
|
||||
"new_controls": 0,
|
||||
"parent_links_transferred": 0,
|
||||
"cross_group_linked": 0,
|
||||
"cross_group_review": 0,
|
||||
"errors": 0,
|
||||
"skipped_title_identical": 0,
|
||||
}
|
||||
self._progress_phase = ""
|
||||
self._progress_count = 0
|
||||
self._progress_total = 0
|
||||
|
||||
async def run(
|
||||
self,
|
||||
dry_run: bool = False,
|
||||
hint_filter: str = None,
|
||||
) -> dict:
|
||||
"""Run the full batch dedup pipeline.
|
||||
|
||||
Args:
|
||||
dry_run: If True, compute stats but don't modify DB/Qdrant.
|
||||
hint_filter: If set, only process groups matching this hint prefix.
|
||||
|
||||
Returns:
|
||||
Stats dict with counts.
|
||||
"""
|
||||
start = time.monotonic()
|
||||
logger.info("BatchDedup starting (dry_run=%s, hint_filter=%s)",
|
||||
dry_run, hint_filter)
|
||||
|
||||
if not dry_run:
|
||||
await ensure_qdrant_collection(collection=self.collection)
|
||||
|
||||
# Phase 1: Intra-group dedup (same merge_group_hint)
|
||||
self._progress_phase = "phase1"
|
||||
groups = self._load_merge_groups(hint_filter)
|
||||
self._progress_total = self.stats["total_controls"]
|
||||
|
||||
for hint, controls in groups:
|
||||
try:
|
||||
await self._process_hint_group(hint, controls, dry_run)
|
||||
self.stats["phase1_groups_processed"] += 1
|
||||
except Exception as e:
|
||||
logger.error("BatchDedup Phase 1 error on hint %s: %s", hint, e)
|
||||
self.stats["errors"] += 1
|
||||
try:
|
||||
self.db.rollback()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
logger.info(
|
||||
"BatchDedup Phase 1 done: %d masters, %d linked, %d review",
|
||||
self.stats["masters"], self.stats["linked"], self.stats["review"],
|
||||
)
|
||||
|
||||
# Phase 2: Cross-group dedup via embeddings
|
||||
if not dry_run:
|
||||
self._progress_phase = "phase2"
|
||||
await self._run_cross_group_pass()
|
||||
|
||||
elapsed = time.monotonic() - start
|
||||
self.stats["elapsed_seconds"] = round(elapsed, 1)
|
||||
logger.info("BatchDedup completed in %.1fs: %s", elapsed, self.stats)
|
||||
return self.stats
|
||||
|
||||
def _load_merge_groups(self, hint_filter: str = None) -> list:
|
||||
"""Load all Pass 0b controls grouped by merge_group_hint, largest first."""
|
||||
conditions = [
|
||||
"decomposition_method = 'pass0b'",
|
||||
"release_state != 'deprecated'",
|
||||
"release_state != 'duplicate'",
|
||||
]
|
||||
params = {}
|
||||
|
||||
if hint_filter:
|
||||
conditions.append("generation_metadata->>'merge_group_hint' LIKE :hf")
|
||||
params["hf"] = f"{hint_filter}%"
|
||||
|
||||
where = " AND ".join(conditions)
|
||||
rows = self.db.execute(text(f"""
|
||||
SELECT id::text, control_id, title, objective,
|
||||
pattern_id, requirements::text, test_procedure::text,
|
||||
evidence::text, release_state,
|
||||
generation_metadata->>'merge_group_hint' as merge_group_hint,
|
||||
generation_metadata->>'action_object_class' as action_object_class
|
||||
FROM canonical_controls
|
||||
WHERE {where}
|
||||
ORDER BY control_id
|
||||
"""), params).fetchall()
|
||||
|
||||
by_hint = defaultdict(list)
|
||||
for r in rows:
|
||||
by_hint[r[9] or ""].append({
|
||||
"uuid": r[0],
|
||||
"control_id": r[1],
|
||||
"title": r[2],
|
||||
"objective": r[3],
|
||||
"pattern_id": r[4],
|
||||
"requirements": r[5],
|
||||
"test_procedure": r[6],
|
||||
"evidence": r[7],
|
||||
"release_state": r[8],
|
||||
"merge_group_hint": r[9] or "",
|
||||
"action_object_class": r[10] or "",
|
||||
})
|
||||
|
||||
self.stats["total_controls"] = len(rows)
|
||||
self.stats["unique_hints"] = len(by_hint)
|
||||
|
||||
sorted_groups = sorted(by_hint.items(), key=lambda x: len(x[1]), reverse=True)
|
||||
logger.info("BatchDedup loaded %d controls in %d hint groups",
|
||||
len(rows), len(sorted_groups))
|
||||
return sorted_groups
|
||||
|
||||
def _sub_group_by_merge_hint(self, controls: list) -> dict:
|
||||
"""Group controls by merge_group_hint composite key."""
|
||||
groups = defaultdict(list)
|
||||
for c in controls:
|
||||
hint = c["merge_group_hint"]
|
||||
if hint:
|
||||
groups[hint].append(c)
|
||||
else:
|
||||
groups[f"__no_hint_{c['uuid']}"].append(c)
|
||||
return dict(groups)
|
||||
|
||||
async def _process_hint_group(
|
||||
self,
|
||||
hint: str,
|
||||
controls: list,
|
||||
dry_run: bool,
|
||||
):
|
||||
"""Process all controls sharing the same merge_group_hint.
|
||||
|
||||
Within a hint group, all controls share action+object+trigger.
|
||||
The best-quality control becomes master, rest are linked as duplicates.
|
||||
"""
|
||||
if len(controls) < 2:
|
||||
# Singleton → always master
|
||||
self.stats["masters"] += 1
|
||||
if not dry_run:
|
||||
await self._embed_and_index(controls[0])
|
||||
self._progress_count += 1
|
||||
self._log_progress(hint)
|
||||
return
|
||||
|
||||
# Sort by quality score (best first)
|
||||
sorted_group = sorted(controls, key=quality_score, reverse=True)
|
||||
master = sorted_group[0]
|
||||
self.stats["masters"] += 1
|
||||
|
||||
if not dry_run:
|
||||
await self._embed_and_index(master)
|
||||
|
||||
for candidate in sorted_group[1:]:
|
||||
# All share the same hint → check title similarity
|
||||
if candidate["title"].strip().lower() == master["title"].strip().lower():
|
||||
# Identical title → direct link (no embedding needed)
|
||||
self.stats["linked"] += 1
|
||||
self.stats["skipped_title_identical"] += 1
|
||||
if not dry_run:
|
||||
await self._mark_duplicate(master, candidate, confidence=1.0)
|
||||
else:
|
||||
# Different title within same hint → still likely duplicate
|
||||
# Use embedding to verify
|
||||
await self._check_and_link_within_group(master, candidate, dry_run)
|
||||
|
||||
self._progress_count += 1
|
||||
self._log_progress(hint)
|
||||
|
||||
async def _check_and_link_within_group(
|
||||
self,
|
||||
master: dict,
|
||||
candidate: dict,
|
||||
dry_run: bool,
|
||||
):
|
||||
"""Check if candidate (same hint group) is duplicate of master via embedding."""
|
||||
parts = candidate["merge_group_hint"].split(":", 2)
|
||||
action = parts[0] if len(parts) > 0 else ""
|
||||
obj = parts[1] if len(parts) > 1 else ""
|
||||
|
||||
canonical = canonicalize_text(action, obj, candidate["title"])
|
||||
embedding = await get_embedding(canonical)
|
||||
|
||||
if not embedding:
|
||||
# Can't embed → link anyway (same hint = same action+object)
|
||||
self.stats["linked"] += 1
|
||||
if not dry_run:
|
||||
await self._mark_duplicate(master, candidate, confidence=0.90)
|
||||
return
|
||||
|
||||
# Search the dedup collection (unfiltered — pattern_id is NULL)
|
||||
results = await qdrant_search_cross_regulation(
|
||||
embedding, top_k=3, collection=self.collection,
|
||||
)
|
||||
|
||||
if not results:
|
||||
# No Qdrant matches yet (master might not be indexed yet) → link to master
|
||||
self.stats["linked"] += 1
|
||||
if not dry_run:
|
||||
await self._mark_duplicate(master, candidate, confidence=0.90)
|
||||
return
|
||||
|
||||
best = results[0]
|
||||
best_score = best.get("score", 0.0)
|
||||
best_payload = best.get("payload", {})
|
||||
best_uuid = best_payload.get("control_uuid", "")
|
||||
|
||||
if best_score > LINK_THRESHOLD:
|
||||
self.stats["linked"] += 1
|
||||
if not dry_run:
|
||||
await self._mark_duplicate_to(best_uuid, candidate, confidence=best_score)
|
||||
elif best_score > REVIEW_THRESHOLD:
|
||||
self.stats["review"] += 1
|
||||
if not dry_run:
|
||||
self._write_review(candidate, best_payload, best_score)
|
||||
else:
|
||||
# Very different despite same hint → new master
|
||||
self.stats["new_controls"] += 1
|
||||
if not dry_run:
|
||||
await self._index_with_embedding(candidate, embedding)
|
||||
|
||||
async def _run_cross_group_pass(self):
|
||||
"""Phase 2: Find cross-group duplicates among surviving masters.
|
||||
|
||||
After Phase 1, ~52k masters remain. Many have similar semantics
|
||||
despite different merge_group_hints (e.g. different German spellings).
|
||||
This pass embeds all masters and finds near-duplicates via Qdrant.
|
||||
"""
|
||||
logger.info("BatchDedup Phase 2: Cross-group pass starting...")
|
||||
|
||||
rows = self.db.execute(text("""
|
||||
SELECT id::text, control_id, title,
|
||||
generation_metadata->>'merge_group_hint' as merge_group_hint
|
||||
FROM canonical_controls
|
||||
WHERE decomposition_method = 'pass0b'
|
||||
AND release_state != 'duplicate'
|
||||
AND release_state != 'deprecated'
|
||||
ORDER BY control_id
|
||||
""")).fetchall()
|
||||
|
||||
self._progress_total = len(rows)
|
||||
self._progress_count = 0
|
||||
logger.info("BatchDedup Cross-group: %d masters to check", len(rows))
|
||||
cross_linked = 0
|
||||
cross_review = 0
|
||||
|
||||
# Process in parallel batches for embedding + Qdrant search
|
||||
PARALLEL_BATCH = 10
|
||||
|
||||
async def _embed_and_search(r):
|
||||
"""Embed one control and search Qdrant — safe for asyncio.gather."""
|
||||
hint = r[3] or ""
|
||||
parts = hint.split(":", 2)
|
||||
action = parts[0] if len(parts) > 0 else ""
|
||||
obj = parts[1] if len(parts) > 1 else ""
|
||||
canonical = canonicalize_text(action, obj, r[2])
|
||||
embedding = await get_embedding(canonical)
|
||||
if not embedding:
|
||||
return None
|
||||
results = await qdrant_search_cross_regulation(
|
||||
embedding, top_k=5, collection=self.collection,
|
||||
)
|
||||
return (r, results)
|
||||
|
||||
for batch_start in range(0, len(rows), PARALLEL_BATCH):
|
||||
batch = rows[batch_start:batch_start + PARALLEL_BATCH]
|
||||
tasks = [_embed_and_search(r) for r in batch]
|
||||
results_batch = await asyncio.gather(*tasks, return_exceptions=True)
|
||||
|
||||
for res in results_batch:
|
||||
if res is None or isinstance(res, Exception):
|
||||
if isinstance(res, Exception):
|
||||
logger.error("BatchDedup embed/search error: %s", res)
|
||||
self.stats["errors"] += 1
|
||||
continue
|
||||
|
||||
r, results = res
|
||||
ctrl_uuid = r[0]
|
||||
hint = r[3] or ""
|
||||
|
||||
if not results:
|
||||
continue
|
||||
|
||||
for match in results:
|
||||
match_score = match.get("score", 0.0)
|
||||
match_payload = match.get("payload", {})
|
||||
match_uuid = match_payload.get("control_uuid", "")
|
||||
|
||||
if match_uuid == ctrl_uuid:
|
||||
continue
|
||||
|
||||
if match_score > LINK_THRESHOLD:
|
||||
try:
|
||||
self.db.execute(text("""
|
||||
UPDATE canonical_controls
|
||||
SET release_state = 'duplicate', merged_into_uuid = CAST(:master AS uuid)
|
||||
WHERE id = CAST(:dup AS uuid)
|
||||
AND release_state != 'duplicate'
|
||||
"""), {"master": match_uuid, "dup": ctrl_uuid})
|
||||
|
||||
self.db.execute(text("""
|
||||
INSERT INTO control_parent_links
|
||||
(control_uuid, parent_control_uuid, link_type, confidence)
|
||||
VALUES (CAST(:cu AS uuid), CAST(:pu AS uuid), 'cross_regulation', :conf)
|
||||
ON CONFLICT (control_uuid, parent_control_uuid) DO NOTHING
|
||||
"""), {"cu": match_uuid, "pu": ctrl_uuid, "conf": match_score})
|
||||
|
||||
transferred = self._transfer_parent_links(match_uuid, ctrl_uuid)
|
||||
self.stats["parent_links_transferred"] += transferred
|
||||
|
||||
self.db.commit()
|
||||
cross_linked += 1
|
||||
except Exception as e:
|
||||
logger.error("BatchDedup cross-group link error %s→%s: %s",
|
||||
ctrl_uuid, match_uuid, e)
|
||||
self.db.rollback()
|
||||
self.stats["errors"] += 1
|
||||
break
|
||||
elif match_score > REVIEW_THRESHOLD:
|
||||
self._write_review(
|
||||
{"control_id": r[1], "title": r[2], "objective": "",
|
||||
"merge_group_hint": hint, "pattern_id": None},
|
||||
match_payload, match_score,
|
||||
)
|
||||
cross_review += 1
|
||||
break
|
||||
|
||||
processed = min(batch_start + PARALLEL_BATCH, len(rows))
|
||||
self._progress_count = processed
|
||||
if processed % 500 < PARALLEL_BATCH:
|
||||
logger.info("BatchDedup Cross-group: %d/%d checked, %d linked, %d review",
|
||||
processed, len(rows), cross_linked, cross_review)
|
||||
|
||||
self.stats["cross_group_linked"] = cross_linked
|
||||
self.stats["cross_group_review"] = cross_review
|
||||
logger.info("BatchDedup Cross-group complete: %d linked, %d review",
|
||||
cross_linked, cross_review)
|
||||
|
||||
# ── Qdrant Helpers ───────────────────────────────────────────────────
|
||||
|
||||
async def _embed_and_index(self, control: dict):
|
||||
"""Compute embedding and index a control in the dedup Qdrant collection."""
|
||||
parts = control["merge_group_hint"].split(":", 2)
|
||||
action = parts[0] if len(parts) > 0 else ""
|
||||
obj = parts[1] if len(parts) > 1 else ""
|
||||
|
||||
norm_action = normalize_action(action)
|
||||
norm_object = normalize_object(obj)
|
||||
canonical = canonicalize_text(action, obj, control["title"])
|
||||
embedding = await get_embedding(canonical)
|
||||
|
||||
if not embedding:
|
||||
return
|
||||
|
||||
await qdrant_upsert(
|
||||
point_id=control["uuid"],
|
||||
embedding=embedding,
|
||||
payload={
|
||||
"control_uuid": control["uuid"],
|
||||
"control_id": control["control_id"],
|
||||
"title": control["title"],
|
||||
"pattern_id": control.get("pattern_id"),
|
||||
"action_normalized": norm_action,
|
||||
"object_normalized": norm_object,
|
||||
"canonical_text": canonical,
|
||||
"merge_group_hint": control["merge_group_hint"],
|
||||
},
|
||||
collection=self.collection,
|
||||
)
|
||||
|
||||
async def _index_with_embedding(self, control: dict, embedding: list):
|
||||
"""Index a control with a pre-computed embedding."""
|
||||
parts = control["merge_group_hint"].split(":", 2)
|
||||
action = parts[0] if len(parts) > 0 else ""
|
||||
obj = parts[1] if len(parts) > 1 else ""
|
||||
|
||||
norm_action = normalize_action(action)
|
||||
norm_object = normalize_object(obj)
|
||||
canonical = canonicalize_text(action, obj, control["title"])
|
||||
|
||||
await qdrant_upsert(
|
||||
point_id=control["uuid"],
|
||||
embedding=embedding,
|
||||
payload={
|
||||
"control_uuid": control["uuid"],
|
||||
"control_id": control["control_id"],
|
||||
"title": control["title"],
|
||||
"pattern_id": control.get("pattern_id"),
|
||||
"action_normalized": norm_action,
|
||||
"object_normalized": norm_object,
|
||||
"canonical_text": canonical,
|
||||
"merge_group_hint": control["merge_group_hint"],
|
||||
},
|
||||
collection=self.collection,
|
||||
)
|
||||
|
||||
# ── DB Write Helpers ─────────────────────────────────────────────────
|
||||
|
||||
async def _mark_duplicate(self, master: dict, candidate: dict, confidence: float):
|
||||
"""Mark candidate as duplicate of master, transfer parent links."""
|
||||
try:
|
||||
self.db.execute(text("""
|
||||
UPDATE canonical_controls
|
||||
SET release_state = 'duplicate', merged_into_uuid = CAST(:master AS uuid)
|
||||
WHERE id = CAST(:cand AS uuid)
|
||||
"""), {"master": master["uuid"], "cand": candidate["uuid"]})
|
||||
|
||||
self.db.execute(text("""
|
||||
INSERT INTO control_parent_links
|
||||
(control_uuid, parent_control_uuid, link_type, confidence)
|
||||
VALUES (CAST(:master AS uuid), CAST(:cand_parent AS uuid), 'dedup_merge', :conf)
|
||||
ON CONFLICT (control_uuid, parent_control_uuid) DO NOTHING
|
||||
"""), {"master": master["uuid"], "cand_parent": candidate["uuid"], "conf": confidence})
|
||||
|
||||
transferred = self._transfer_parent_links(master["uuid"], candidate["uuid"])
|
||||
self.stats["parent_links_transferred"] += transferred
|
||||
|
||||
self.db.commit()
|
||||
except Exception as e:
|
||||
logger.error("BatchDedup _mark_duplicate error %s→%s: %s",
|
||||
candidate["uuid"], master["uuid"], e)
|
||||
self.db.rollback()
|
||||
raise
|
||||
|
||||
async def _mark_duplicate_to(self, master_uuid: str, candidate: dict, confidence: float):
|
||||
"""Mark candidate as duplicate of a Qdrant-matched master."""
|
||||
try:
|
||||
self.db.execute(text("""
|
||||
UPDATE canonical_controls
|
||||
SET release_state = 'duplicate', merged_into_uuid = CAST(:master AS uuid)
|
||||
WHERE id = CAST(:cand AS uuid)
|
||||
"""), {"master": master_uuid, "cand": candidate["uuid"]})
|
||||
|
||||
self.db.execute(text("""
|
||||
INSERT INTO control_parent_links
|
||||
(control_uuid, parent_control_uuid, link_type, confidence)
|
||||
VALUES (CAST(:master AS uuid), CAST(:cand_parent AS uuid), 'dedup_merge', :conf)
|
||||
ON CONFLICT (control_uuid, parent_control_uuid) DO NOTHING
|
||||
"""), {"master": master_uuid, "cand_parent": candidate["uuid"], "conf": confidence})
|
||||
|
||||
transferred = self._transfer_parent_links(master_uuid, candidate["uuid"])
|
||||
self.stats["parent_links_transferred"] += transferred
|
||||
|
||||
self.db.commit()
|
||||
except Exception as e:
|
||||
logger.error("BatchDedup _mark_duplicate_to error %s→%s: %s",
|
||||
candidate["uuid"], master_uuid, e)
|
||||
self.db.rollback()
|
||||
raise
|
||||
|
||||
def _transfer_parent_links(self, master_uuid: str, duplicate_uuid: str) -> int:
|
||||
"""Move existing parent links from duplicate to master."""
|
||||
rows = self.db.execute(text("""
|
||||
SELECT parent_control_uuid::text, link_type, confidence,
|
||||
source_regulation, source_article, obligation_candidate_id::text
|
||||
FROM control_parent_links
|
||||
WHERE control_uuid = CAST(:dup AS uuid)
|
||||
AND link_type = 'decomposition'
|
||||
"""), {"dup": duplicate_uuid}).fetchall()
|
||||
|
||||
transferred = 0
|
||||
for r in rows:
|
||||
parent_uuid = r[0]
|
||||
if parent_uuid == master_uuid:
|
||||
continue
|
||||
self.db.execute(text("""
|
||||
INSERT INTO control_parent_links
|
||||
(control_uuid, parent_control_uuid, link_type, confidence,
|
||||
source_regulation, source_article, obligation_candidate_id)
|
||||
VALUES (CAST(:cu AS uuid), CAST(:pu AS uuid), :lt, :conf,
|
||||
:sr, :sa, CAST(:oci AS uuid))
|
||||
ON CONFLICT (control_uuid, parent_control_uuid) DO NOTHING
|
||||
"""), {
|
||||
"cu": master_uuid,
|
||||
"pu": parent_uuid,
|
||||
"lt": r[1],
|
||||
"conf": float(r[2]) if r[2] else 1.0,
|
||||
"sr": r[3],
|
||||
"sa": r[4],
|
||||
"oci": r[5],
|
||||
})
|
||||
transferred += 1
|
||||
|
||||
return transferred
|
||||
|
||||
def _write_review(self, candidate: dict, matched_payload: dict, score: float):
|
||||
"""Write a dedup review entry for borderline matches."""
|
||||
try:
|
||||
self.db.execute(text("""
|
||||
INSERT INTO control_dedup_reviews
|
||||
(candidate_control_id, candidate_title, candidate_objective,
|
||||
matched_control_uuid, matched_control_id,
|
||||
similarity_score, dedup_stage, dedup_details)
|
||||
VALUES (:ccid, :ct, :co, CAST(:mcu AS uuid), :mci,
|
||||
:ss, 'batch_dedup', CAST(:dd AS jsonb))
|
||||
"""), {
|
||||
"ccid": candidate["control_id"],
|
||||
"ct": candidate["title"],
|
||||
"co": candidate.get("objective", ""),
|
||||
"mcu": matched_payload.get("control_uuid"),
|
||||
"mci": matched_payload.get("control_id"),
|
||||
"ss": score,
|
||||
"dd": json.dumps({
|
||||
"merge_group_hint": candidate.get("merge_group_hint", ""),
|
||||
"pattern_id": candidate.get("pattern_id"),
|
||||
}),
|
||||
})
|
||||
self.db.commit()
|
||||
except Exception as e:
|
||||
logger.error("BatchDedup _write_review error: %s", e)
|
||||
self.db.rollback()
|
||||
raise
|
||||
|
||||
# ── Progress ─────────────────────────────────────────────────────────
|
||||
|
||||
def _log_progress(self, hint: str):
|
||||
"""Log progress every 500 controls."""
|
||||
if self._progress_count > 0 and self._progress_count % 500 == 0:
|
||||
logger.info(
|
||||
"BatchDedup [%s] %d/%d — masters=%d, linked=%d, review=%d",
|
||||
self._progress_phase, self._progress_count, self._progress_total,
|
||||
self.stats["masters"], self.stats["linked"], self.stats["review"],
|
||||
)
|
||||
|
||||
def get_status(self) -> dict:
|
||||
"""Return current progress stats (for status endpoint)."""
|
||||
return {
|
||||
"phase": self._progress_phase,
|
||||
"progress": self._progress_count,
|
||||
"total": self._progress_total,
|
||||
**self.stats,
|
||||
}
|
||||
438
control-pipeline/services/citation_backfill.py
Normal file
438
control-pipeline/services/citation_backfill.py
Normal file
@@ -0,0 +1,438 @@
|
||||
"""
|
||||
Citation Backfill Service — enrich existing controls with article/paragraph provenance.
|
||||
|
||||
3-tier matching strategy:
|
||||
Tier 1 — Hash match: sha256(source_original_text) → RAG chunk lookup
|
||||
Tier 2 — Regex parse: split concatenated "DSGVO Art. 35" → regulation + article
|
||||
Tier 3 — Ollama LLM: ask local LLM to identify article/paragraph from text
|
||||
"""
|
||||
|
||||
import hashlib
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime, timezone
|
||||
from typing import Optional
|
||||
|
||||
import httpx
|
||||
from sqlalchemy import text
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from .rag_client import ComplianceRAGClient, RAGSearchResult
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
OLLAMA_URL = os.getenv("OLLAMA_URL", "http://host.docker.internal:11434")
|
||||
OLLAMA_MODEL = os.getenv("CONTROL_GEN_OLLAMA_MODEL", "qwen3.5:35b-a3b")
|
||||
LLM_TIMEOUT = float(os.getenv("CONTROL_GEN_LLM_TIMEOUT", "180"))
|
||||
|
||||
ALL_COLLECTIONS = [
|
||||
"bp_compliance_ce",
|
||||
"bp_compliance_gesetze",
|
||||
"bp_compliance_datenschutz",
|
||||
"bp_dsfa_corpus",
|
||||
"bp_legal_templates",
|
||||
]
|
||||
|
||||
BACKFILL_SYSTEM_PROMPT = (
|
||||
"Du bist ein Rechtsexperte. Deine Aufgabe ist es, aus einem Gesetzestext "
|
||||
"den genauen Artikel und Absatz zu bestimmen. Antworte NUR mit validem JSON."
|
||||
)
|
||||
|
||||
# Regex to split concatenated source like "DSGVO Art. 35" or "NIS2 Artikel 21 Abs. 2"
|
||||
_SOURCE_ARTICLE_RE = re.compile(
|
||||
r"^(.+?)\s+(Art(?:ikel)?\.?\s*\d+.*)$", re.IGNORECASE
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class MatchResult:
|
||||
article: str
|
||||
paragraph: str
|
||||
method: str # "hash", "regex", "llm"
|
||||
|
||||
|
||||
@dataclass
|
||||
class BackfillResult:
|
||||
total_controls: int = 0
|
||||
matched_hash: int = 0
|
||||
matched_regex: int = 0
|
||||
matched_llm: int = 0
|
||||
unmatched: int = 0
|
||||
updated: int = 0
|
||||
errors: list = field(default_factory=list)
|
||||
|
||||
|
||||
class CitationBackfill:
|
||||
"""Backfill article/paragraph into existing control source_citations."""
|
||||
|
||||
def __init__(self, db: Session, rag_client: ComplianceRAGClient):
|
||||
self.db = db
|
||||
self.rag = rag_client
|
||||
self._rag_index: dict[str, RAGSearchResult] = {}
|
||||
|
||||
async def run(self, dry_run: bool = True, limit: int = 0) -> BackfillResult:
|
||||
"""Main entry: iterate controls missing article/paragraph, match to RAG, update."""
|
||||
result = BackfillResult()
|
||||
|
||||
# Load controls needing backfill
|
||||
controls = self._load_controls_needing_backfill(limit)
|
||||
result.total_controls = len(controls)
|
||||
logger.info("Backfill: %d controls need article/paragraph enrichment", len(controls))
|
||||
|
||||
if not controls:
|
||||
return result
|
||||
|
||||
# Collect hashes we need to find — only build index for controls with source text
|
||||
needed_hashes: set[str] = set()
|
||||
for ctrl in controls:
|
||||
src = ctrl.get("source_original_text")
|
||||
if src:
|
||||
needed_hashes.add(hashlib.sha256(src.encode()).hexdigest())
|
||||
|
||||
if needed_hashes:
|
||||
# Build targeted RAG index — only scroll collections that our controls reference
|
||||
logger.info("Building targeted RAG hash index for %d source texts...", len(needed_hashes))
|
||||
await self._build_rag_index_targeted(controls)
|
||||
logger.info("RAG index built: %d chunks indexed, %d hashes needed", len(self._rag_index), len(needed_hashes))
|
||||
else:
|
||||
logger.info("No source_original_text found — skipping RAG index build")
|
||||
|
||||
# Process each control
|
||||
for i, ctrl in enumerate(controls):
|
||||
if i > 0 and i % 100 == 0:
|
||||
logger.info("Backfill progress: %d/%d processed", i, result.total_controls)
|
||||
|
||||
try:
|
||||
match = await self._match_control(ctrl)
|
||||
if match:
|
||||
if match.method == "hash":
|
||||
result.matched_hash += 1
|
||||
elif match.method == "regex":
|
||||
result.matched_regex += 1
|
||||
elif match.method == "llm":
|
||||
result.matched_llm += 1
|
||||
|
||||
if not dry_run:
|
||||
self._update_control(ctrl, match)
|
||||
result.updated += 1
|
||||
else:
|
||||
logger.debug(
|
||||
"DRY RUN: Would update %s with article=%s paragraph=%s (method=%s)",
|
||||
ctrl["control_id"], match.article, match.paragraph, match.method,
|
||||
)
|
||||
else:
|
||||
result.unmatched += 1
|
||||
|
||||
except Exception as e:
|
||||
error_msg = f"Error backfilling {ctrl.get('control_id', '?')}: {e}"
|
||||
logger.error(error_msg)
|
||||
result.errors.append(error_msg)
|
||||
|
||||
if not dry_run:
|
||||
try:
|
||||
self.db.commit()
|
||||
except Exception as e:
|
||||
logger.error("Backfill commit failed: %s", e)
|
||||
result.errors.append(f"Commit failed: {e}")
|
||||
|
||||
logger.info(
|
||||
"Backfill complete: %d total, hash=%d regex=%d llm=%d unmatched=%d updated=%d",
|
||||
result.total_controls, result.matched_hash, result.matched_regex,
|
||||
result.matched_llm, result.unmatched, result.updated,
|
||||
)
|
||||
return result
|
||||
|
||||
def _load_controls_needing_backfill(self, limit: int = 0) -> list[dict]:
|
||||
"""Load controls where source_citation exists but lacks separate 'article' key."""
|
||||
query = """
|
||||
SELECT id, control_id, source_citation, source_original_text,
|
||||
generation_metadata, license_rule
|
||||
FROM canonical_controls
|
||||
WHERE license_rule IN (1, 2)
|
||||
AND source_citation IS NOT NULL
|
||||
AND (
|
||||
source_citation->>'article' IS NULL
|
||||
OR source_citation->>'article' = ''
|
||||
)
|
||||
ORDER BY control_id
|
||||
"""
|
||||
if limit > 0:
|
||||
query += f" LIMIT {limit}"
|
||||
|
||||
result = self.db.execute(text(query))
|
||||
cols = result.keys()
|
||||
controls = []
|
||||
for row in result:
|
||||
ctrl = dict(zip(cols, row))
|
||||
ctrl["id"] = str(ctrl["id"])
|
||||
# Parse JSON fields
|
||||
for jf in ("source_citation", "generation_metadata"):
|
||||
if isinstance(ctrl.get(jf), str):
|
||||
try:
|
||||
ctrl[jf] = json.loads(ctrl[jf])
|
||||
except (json.JSONDecodeError, TypeError):
|
||||
ctrl[jf] = {}
|
||||
controls.append(ctrl)
|
||||
return controls
|
||||
|
||||
async def _build_rag_index_targeted(self, controls: list[dict]):
|
||||
"""Build RAG index by scrolling only collections relevant to our controls.
|
||||
|
||||
Uses regulation codes from generation_metadata to identify which collections
|
||||
to search, falling back to all collections only if needed.
|
||||
"""
|
||||
# Determine which collections are relevant based on regulation codes
|
||||
regulation_to_collection = self._map_regulations_to_collections(controls)
|
||||
collections_to_search = set(regulation_to_collection.values()) or set(ALL_COLLECTIONS)
|
||||
|
||||
logger.info("Targeted index: searching %d collections: %s",
|
||||
len(collections_to_search), ", ".join(collections_to_search))
|
||||
|
||||
for collection in collections_to_search:
|
||||
offset = None
|
||||
page = 0
|
||||
seen_offsets: set[str] = set()
|
||||
while True:
|
||||
chunks, next_offset = await self.rag.scroll(
|
||||
collection=collection, offset=offset, limit=200,
|
||||
)
|
||||
if not chunks:
|
||||
break
|
||||
for chunk in chunks:
|
||||
if chunk.text and len(chunk.text.strip()) >= 50:
|
||||
h = hashlib.sha256(chunk.text.encode()).hexdigest()
|
||||
self._rag_index[h] = chunk
|
||||
page += 1
|
||||
if page % 50 == 0:
|
||||
logger.info("Indexing %s: page %d (%d chunks so far)",
|
||||
collection, page, len(self._rag_index))
|
||||
if not next_offset:
|
||||
break
|
||||
if next_offset in seen_offsets:
|
||||
logger.warning("Scroll loop in %s at page %d — stopping", collection, page)
|
||||
break
|
||||
seen_offsets.add(next_offset)
|
||||
offset = next_offset
|
||||
|
||||
logger.info("Indexed collection %s: %d pages", collection, page)
|
||||
|
||||
def _map_regulations_to_collections(self, controls: list[dict]) -> dict[str, str]:
|
||||
"""Map regulation codes from controls to likely Qdrant collections."""
|
||||
# Heuristic: regulation code prefix → collection
|
||||
collection_map = {
|
||||
"eu_": "bp_compliance_gesetze",
|
||||
"dsgvo": "bp_compliance_datenschutz",
|
||||
"bdsg": "bp_compliance_gesetze",
|
||||
"ttdsg": "bp_compliance_gesetze",
|
||||
"nist_": "bp_compliance_ce",
|
||||
"owasp": "bp_compliance_ce",
|
||||
"bsi_": "bp_compliance_ce",
|
||||
"enisa": "bp_compliance_ce",
|
||||
"at_": "bp_compliance_recht",
|
||||
"fr_": "bp_compliance_recht",
|
||||
"es_": "bp_compliance_recht",
|
||||
}
|
||||
result: dict[str, str] = {}
|
||||
for ctrl in controls:
|
||||
meta = ctrl.get("generation_metadata") or {}
|
||||
reg = meta.get("source_regulation", "")
|
||||
if not reg:
|
||||
continue
|
||||
for prefix, coll in collection_map.items():
|
||||
if reg.startswith(prefix):
|
||||
result[reg] = coll
|
||||
break
|
||||
else:
|
||||
# Unknown regulation — search all
|
||||
for coll in ALL_COLLECTIONS:
|
||||
result[f"_all_{coll}"] = coll
|
||||
return result
|
||||
|
||||
async def _match_control(self, ctrl: dict) -> Optional[MatchResult]:
|
||||
"""3-tier matching: hash → regex → LLM."""
|
||||
|
||||
# Tier 1: Hash match against RAG index
|
||||
source_text = ctrl.get("source_original_text")
|
||||
if source_text:
|
||||
h = hashlib.sha256(source_text.encode()).hexdigest()
|
||||
chunk = self._rag_index.get(h)
|
||||
if chunk and (chunk.article or chunk.paragraph):
|
||||
return MatchResult(
|
||||
article=chunk.article or "",
|
||||
paragraph=chunk.paragraph or "",
|
||||
method="hash",
|
||||
)
|
||||
|
||||
# Tier 2: Regex parse concatenated source
|
||||
citation = ctrl.get("source_citation") or {}
|
||||
source_str = citation.get("source", "")
|
||||
parsed = _parse_concatenated_source(source_str)
|
||||
if parsed and parsed["article"]:
|
||||
return MatchResult(
|
||||
article=parsed["article"],
|
||||
paragraph="", # Regex can't extract paragraph from concatenated format
|
||||
method="regex",
|
||||
)
|
||||
|
||||
# Tier 3: Ollama LLM
|
||||
if source_text:
|
||||
return await self._llm_match(ctrl)
|
||||
|
||||
return None
|
||||
|
||||
async def _llm_match(self, ctrl: dict) -> Optional[MatchResult]:
|
||||
"""Use Ollama to identify article/paragraph from source text."""
|
||||
citation = ctrl.get("source_citation") or {}
|
||||
regulation_name = citation.get("source", "")
|
||||
metadata = ctrl.get("generation_metadata") or {}
|
||||
regulation_code = metadata.get("source_regulation", "")
|
||||
source_text = ctrl.get("source_original_text", "")
|
||||
|
||||
prompt = f"""Analysiere den folgenden Gesetzestext und bestimme den genauen Artikel und Absatz.
|
||||
|
||||
Gesetz: {regulation_name} (Code: {regulation_code})
|
||||
|
||||
Text:
|
||||
---
|
||||
{source_text[:2000]}
|
||||
---
|
||||
|
||||
Antworte NUR mit JSON:
|
||||
{{"article": "Art. XX", "paragraph": "Abs. Y"}}
|
||||
|
||||
Falls kein spezifischer Absatz erkennbar ist, setze paragraph auf "".
|
||||
Falls kein Artikel erkennbar ist, setze article auf "".
|
||||
Bei deutschen Gesetzen mit § verwende: "§ XX" statt "Art. XX"."""
|
||||
|
||||
try:
|
||||
raw = await _llm_ollama(prompt, BACKFILL_SYSTEM_PROMPT)
|
||||
data = _parse_json(raw)
|
||||
if data and (data.get("article") or data.get("paragraph")):
|
||||
return MatchResult(
|
||||
article=data.get("article", ""),
|
||||
paragraph=data.get("paragraph", ""),
|
||||
method="llm",
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning("LLM match failed for %s: %s", ctrl.get("control_id"), e)
|
||||
|
||||
return None
|
||||
|
||||
def _update_control(self, ctrl: dict, match: MatchResult):
|
||||
"""Update source_citation and generation_metadata in DB."""
|
||||
citation = ctrl.get("source_citation") or {}
|
||||
|
||||
# Clean the source name: remove concatenated article if present
|
||||
source_str = citation.get("source", "")
|
||||
parsed = _parse_concatenated_source(source_str)
|
||||
if parsed:
|
||||
citation["source"] = parsed["name"]
|
||||
|
||||
# Add separate article/paragraph fields
|
||||
citation["article"] = match.article
|
||||
citation["paragraph"] = match.paragraph
|
||||
|
||||
# Update generation_metadata
|
||||
metadata = ctrl.get("generation_metadata") or {}
|
||||
if match.article:
|
||||
metadata["source_article"] = match.article
|
||||
metadata["source_paragraph"] = match.paragraph
|
||||
metadata["backfill_method"] = match.method
|
||||
metadata["backfill_at"] = datetime.now(timezone.utc).isoformat()
|
||||
|
||||
self.db.execute(
|
||||
text("""
|
||||
UPDATE canonical_controls
|
||||
SET source_citation = :citation,
|
||||
generation_metadata = :metadata,
|
||||
updated_at = NOW()
|
||||
WHERE id = CAST(:id AS uuid)
|
||||
"""),
|
||||
{
|
||||
"id": ctrl["id"],
|
||||
"citation": json.dumps(citation),
|
||||
"metadata": json.dumps(metadata),
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
def _parse_concatenated_source(source: str) -> Optional[dict]:
|
||||
"""Parse 'DSGVO Art. 35' → {name: 'DSGVO', article: 'Art. 35'}.
|
||||
|
||||
Also handles '§' format: 'BDSG § 42' → {name: 'BDSG', article: '§ 42'}.
|
||||
"""
|
||||
if not source:
|
||||
return None
|
||||
|
||||
# Try Art./Artikel pattern
|
||||
m = _SOURCE_ARTICLE_RE.match(source)
|
||||
if m:
|
||||
return {"name": m.group(1).strip(), "article": m.group(2).strip()}
|
||||
|
||||
# Try § pattern
|
||||
m2 = re.match(r"^(.+?)\s+(§\s*\d+.*)$", source)
|
||||
if m2:
|
||||
return {"name": m2.group(1).strip(), "article": m2.group(2).strip()}
|
||||
|
||||
return None
|
||||
|
||||
|
||||
async def _llm_ollama(prompt: str, system_prompt: Optional[str] = None) -> str:
|
||||
"""Call Ollama chat API for backfill matching."""
|
||||
messages = []
|
||||
if system_prompt:
|
||||
messages.append({"role": "system", "content": system_prompt})
|
||||
messages.append({"role": "user", "content": prompt})
|
||||
|
||||
payload = {
|
||||
"model": OLLAMA_MODEL,
|
||||
"messages": messages,
|
||||
"stream": False,
|
||||
"format": "json",
|
||||
"options": {"num_predict": 256},
|
||||
"think": False,
|
||||
}
|
||||
|
||||
try:
|
||||
async with httpx.AsyncClient(timeout=LLM_TIMEOUT) as client:
|
||||
resp = await client.post(f"{OLLAMA_URL}/api/chat", json=payload)
|
||||
if resp.status_code != 200:
|
||||
logger.error("Ollama backfill failed %d: %s", resp.status_code, resp.text[:300])
|
||||
return ""
|
||||
data = resp.json()
|
||||
msg = data.get("message", {})
|
||||
if isinstance(msg, dict):
|
||||
return msg.get("content", "")
|
||||
return data.get("response", str(msg))
|
||||
except Exception as e:
|
||||
logger.error("Ollama backfill request failed: %s", e)
|
||||
return ""
|
||||
|
||||
|
||||
def _parse_json(raw: str) -> Optional[dict]:
|
||||
"""Extract JSON object from LLM output."""
|
||||
if not raw:
|
||||
return None
|
||||
# Try direct parse
|
||||
try:
|
||||
return json.loads(raw)
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
# Try extracting from markdown code block
|
||||
m = re.search(r"```(?:json)?\s*(\{.*?\})\s*```", raw, re.DOTALL)
|
||||
if m:
|
||||
try:
|
||||
return json.loads(m.group(1))
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
# Try finding first { ... }
|
||||
m = re.search(r"\{[^{}]*\}", raw)
|
||||
if m:
|
||||
try:
|
||||
return json.loads(m.group(0))
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
return None
|
||||
546
control-pipeline/services/control_composer.py
Normal file
546
control-pipeline/services/control_composer.py
Normal file
@@ -0,0 +1,546 @@
|
||||
"""Control Composer — Pattern + Obligation → Master Control.
|
||||
|
||||
Takes an obligation (from ObligationExtractor) and a matched control pattern
|
||||
(from PatternMatcher), then uses LLM to compose a structured, actionable
|
||||
Master Control. Replaces the old Stage 3 (STRUCTURE/REFORM) with a
|
||||
pattern-guided approach.
|
||||
|
||||
Three composition modes based on license rules:
|
||||
Rule 1: Obligation + Pattern + original text → full control
|
||||
Rule 2: Obligation + Pattern + original text + citation → control
|
||||
Rule 3: Obligation + Pattern (NO original text) → reformulated control
|
||||
|
||||
Fallback: No pattern match → basic generation (tagged needs_pattern_assignment)
|
||||
|
||||
Part of the Multi-Layer Control Architecture (Phase 6 of 8).
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Optional
|
||||
|
||||
from services.obligation_extractor import (
|
||||
ObligationMatch,
|
||||
_llm_ollama,
|
||||
_parse_json,
|
||||
)
|
||||
from services.pattern_matcher import (
|
||||
ControlPattern,
|
||||
PatternMatchResult,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
OLLAMA_MODEL = os.getenv("CONTROL_GEN_OLLAMA_MODEL", "qwen3.5:35b-a3b")
|
||||
|
||||
# Valid values for generated control fields
|
||||
VALID_SEVERITIES = {"low", "medium", "high", "critical"}
|
||||
VALID_EFFORTS = {"s", "m", "l", "xl"}
|
||||
VALID_VERIFICATION = {"code_review", "document", "tool", "hybrid"}
|
||||
|
||||
|
||||
@dataclass
|
||||
class ComposedControl:
|
||||
"""A Master Control composed from an obligation + pattern."""
|
||||
|
||||
# Core fields (match canonical_controls schema)
|
||||
control_id: str = ""
|
||||
title: str = ""
|
||||
objective: str = ""
|
||||
rationale: str = ""
|
||||
scope: dict = field(default_factory=dict)
|
||||
requirements: list = field(default_factory=list)
|
||||
test_procedure: list = field(default_factory=list)
|
||||
evidence: list = field(default_factory=list)
|
||||
severity: str = "medium"
|
||||
risk_score: float = 5.0
|
||||
implementation_effort: str = "m"
|
||||
open_anchors: list = field(default_factory=list)
|
||||
release_state: str = "draft"
|
||||
tags: list = field(default_factory=list)
|
||||
# 3-Rule License fields
|
||||
license_rule: Optional[int] = None
|
||||
source_original_text: Optional[str] = None
|
||||
source_citation: Optional[dict] = None
|
||||
customer_visible: bool = True
|
||||
# Classification
|
||||
verification_method: Optional[str] = None
|
||||
category: Optional[str] = None
|
||||
target_audience: Optional[list] = None
|
||||
# Pattern + Obligation linkage
|
||||
pattern_id: Optional[str] = None
|
||||
obligation_ids: list = field(default_factory=list)
|
||||
# Metadata
|
||||
generation_metadata: dict = field(default_factory=dict)
|
||||
composition_method: str = "pattern_guided" # pattern_guided | fallback
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
"""Serialize for DB storage or API response."""
|
||||
return {
|
||||
"control_id": self.control_id,
|
||||
"title": self.title,
|
||||
"objective": self.objective,
|
||||
"rationale": self.rationale,
|
||||
"scope": self.scope,
|
||||
"requirements": self.requirements,
|
||||
"test_procedure": self.test_procedure,
|
||||
"evidence": self.evidence,
|
||||
"severity": self.severity,
|
||||
"risk_score": self.risk_score,
|
||||
"implementation_effort": self.implementation_effort,
|
||||
"open_anchors": self.open_anchors,
|
||||
"release_state": self.release_state,
|
||||
"tags": self.tags,
|
||||
"license_rule": self.license_rule,
|
||||
"source_original_text": self.source_original_text,
|
||||
"source_citation": self.source_citation,
|
||||
"customer_visible": self.customer_visible,
|
||||
"verification_method": self.verification_method,
|
||||
"category": self.category,
|
||||
"target_audience": self.target_audience,
|
||||
"pattern_id": self.pattern_id,
|
||||
"obligation_ids": self.obligation_ids,
|
||||
"generation_metadata": self.generation_metadata,
|
||||
"composition_method": self.composition_method,
|
||||
}
|
||||
|
||||
|
||||
class ControlComposer:
|
||||
"""Composes Master Controls from obligations + patterns.
|
||||
|
||||
Usage::
|
||||
|
||||
composer = ControlComposer()
|
||||
|
||||
control = await composer.compose(
|
||||
obligation=obligation_match,
|
||||
pattern_result=pattern_match_result,
|
||||
chunk_text="...",
|
||||
license_rule=1,
|
||||
source_citation={...},
|
||||
)
|
||||
"""
|
||||
|
||||
async def compose(
|
||||
self,
|
||||
obligation: ObligationMatch,
|
||||
pattern_result: PatternMatchResult,
|
||||
chunk_text: Optional[str] = None,
|
||||
license_rule: int = 3,
|
||||
source_citation: Optional[dict] = None,
|
||||
regulation_code: Optional[str] = None,
|
||||
) -> ComposedControl:
|
||||
"""Compose a Master Control from obligation + pattern.
|
||||
|
||||
Args:
|
||||
obligation: The extracted obligation (from ObligationExtractor).
|
||||
pattern_result: The matched pattern (from PatternMatcher).
|
||||
chunk_text: Original RAG chunk text (only used for Rules 1-2).
|
||||
license_rule: 1=free, 2=citation, 3=restricted.
|
||||
source_citation: Citation metadata for Rule 2.
|
||||
regulation_code: Source regulation code.
|
||||
|
||||
Returns:
|
||||
ComposedControl ready for storage.
|
||||
"""
|
||||
pattern = pattern_result.pattern if pattern_result else None
|
||||
|
||||
if pattern:
|
||||
control = await self._compose_with_pattern(
|
||||
obligation, pattern, chunk_text, license_rule, source_citation,
|
||||
)
|
||||
else:
|
||||
control = await self._compose_fallback(
|
||||
obligation, chunk_text, license_rule, source_citation,
|
||||
)
|
||||
|
||||
# Set linkage fields
|
||||
control.pattern_id = pattern.id if pattern else None
|
||||
if obligation.obligation_id:
|
||||
control.obligation_ids = [obligation.obligation_id]
|
||||
|
||||
# Set license fields
|
||||
control.license_rule = license_rule
|
||||
if license_rule in (1, 2) and chunk_text:
|
||||
control.source_original_text = chunk_text
|
||||
if license_rule == 2 and source_citation:
|
||||
control.source_citation = source_citation
|
||||
if license_rule == 3:
|
||||
control.customer_visible = False
|
||||
control.source_original_text = None
|
||||
control.source_citation = None
|
||||
|
||||
# Build metadata
|
||||
control.generation_metadata = {
|
||||
"composition_method": control.composition_method,
|
||||
"pattern_id": control.pattern_id,
|
||||
"pattern_confidence": round(pattern_result.confidence, 3) if pattern_result else 0,
|
||||
"pattern_method": pattern_result.method if pattern_result else "none",
|
||||
"obligation_id": obligation.obligation_id,
|
||||
"obligation_method": obligation.method,
|
||||
"obligation_confidence": round(obligation.confidence, 3),
|
||||
"license_rule": license_rule,
|
||||
"regulation_code": regulation_code,
|
||||
}
|
||||
|
||||
# Validate and fix fields
|
||||
_validate_control(control)
|
||||
|
||||
return control
|
||||
|
||||
async def compose_batch(
|
||||
self,
|
||||
items: list[dict],
|
||||
) -> list[ComposedControl]:
|
||||
"""Compose multiple controls.
|
||||
|
||||
Args:
|
||||
items: List of dicts with keys: obligation, pattern_result,
|
||||
chunk_text, license_rule, source_citation, regulation_code.
|
||||
|
||||
Returns:
|
||||
List of ComposedControl instances.
|
||||
"""
|
||||
results = []
|
||||
for item in items:
|
||||
control = await self.compose(
|
||||
obligation=item["obligation"],
|
||||
pattern_result=item.get("pattern_result", PatternMatchResult()),
|
||||
chunk_text=item.get("chunk_text"),
|
||||
license_rule=item.get("license_rule", 3),
|
||||
source_citation=item.get("source_citation"),
|
||||
regulation_code=item.get("regulation_code"),
|
||||
)
|
||||
results.append(control)
|
||||
return results
|
||||
|
||||
# -----------------------------------------------------------------------
|
||||
# Pattern-guided composition
|
||||
# -----------------------------------------------------------------------
|
||||
|
||||
async def _compose_with_pattern(
|
||||
self,
|
||||
obligation: ObligationMatch,
|
||||
pattern: ControlPattern,
|
||||
chunk_text: Optional[str],
|
||||
license_rule: int,
|
||||
source_citation: Optional[dict],
|
||||
) -> ComposedControl:
|
||||
"""Use LLM to fill the pattern template with obligation-specific details."""
|
||||
prompt = _build_compose_prompt(obligation, pattern, chunk_text, license_rule)
|
||||
system_prompt = _compose_system_prompt(license_rule)
|
||||
|
||||
llm_result = await _llm_ollama(prompt, system_prompt)
|
||||
if not llm_result:
|
||||
return self._compose_from_template(obligation, pattern)
|
||||
|
||||
parsed = _parse_json(llm_result)
|
||||
if not parsed:
|
||||
return self._compose_from_template(obligation, pattern)
|
||||
|
||||
control = ComposedControl(
|
||||
title=parsed.get("title", pattern.name_de)[:255],
|
||||
objective=parsed.get("objective", pattern.objective_template),
|
||||
rationale=parsed.get("rationale", pattern.rationale_template),
|
||||
requirements=_ensure_list(parsed.get("requirements", pattern.requirements_template)),
|
||||
test_procedure=_ensure_list(parsed.get("test_procedure", pattern.test_procedure_template)),
|
||||
evidence=_ensure_list(parsed.get("evidence", pattern.evidence_template)),
|
||||
severity=parsed.get("severity", pattern.severity_default),
|
||||
implementation_effort=parsed.get("implementation_effort", pattern.implementation_effort_default),
|
||||
category=parsed.get("category", pattern.category),
|
||||
tags=_ensure_list(parsed.get("tags", pattern.tags)),
|
||||
target_audience=_ensure_list(parsed.get("target_audience", [])),
|
||||
verification_method=parsed.get("verification_method"),
|
||||
open_anchors=_anchors_from_pattern(pattern),
|
||||
composition_method="pattern_guided",
|
||||
)
|
||||
|
||||
return control
|
||||
|
||||
def _compose_from_template(
|
||||
self,
|
||||
obligation: ObligationMatch,
|
||||
pattern: ControlPattern,
|
||||
) -> ComposedControl:
|
||||
"""Fallback: fill template directly without LLM (when LLM fails)."""
|
||||
obl_title = obligation.obligation_title or ""
|
||||
obl_text = obligation.obligation_text or ""
|
||||
|
||||
title = f"{pattern.name_de}"
|
||||
if obl_title:
|
||||
title = f"{pattern.name_de} — {obl_title}"
|
||||
|
||||
objective = pattern.objective_template
|
||||
if obl_text and len(obl_text) > 20:
|
||||
objective = f"{pattern.objective_template} Bezug: {obl_text[:200]}"
|
||||
|
||||
return ComposedControl(
|
||||
title=title[:255],
|
||||
objective=objective,
|
||||
rationale=pattern.rationale_template,
|
||||
requirements=list(pattern.requirements_template),
|
||||
test_procedure=list(pattern.test_procedure_template),
|
||||
evidence=list(pattern.evidence_template),
|
||||
severity=pattern.severity_default,
|
||||
implementation_effort=pattern.implementation_effort_default,
|
||||
category=pattern.category,
|
||||
tags=list(pattern.tags),
|
||||
open_anchors=_anchors_from_pattern(pattern),
|
||||
composition_method="template_only",
|
||||
)
|
||||
|
||||
# -----------------------------------------------------------------------
|
||||
# Fallback (no pattern)
|
||||
# -----------------------------------------------------------------------
|
||||
|
||||
async def _compose_fallback(
|
||||
self,
|
||||
obligation: ObligationMatch,
|
||||
chunk_text: Optional[str],
|
||||
license_rule: int,
|
||||
source_citation: Optional[dict],
|
||||
) -> ComposedControl:
|
||||
"""Generate a control without a pattern template (old-style)."""
|
||||
prompt = _build_fallback_prompt(obligation, chunk_text, license_rule)
|
||||
system_prompt = _compose_system_prompt(license_rule)
|
||||
|
||||
llm_result = await _llm_ollama(prompt, system_prompt)
|
||||
parsed = _parse_json(llm_result) if llm_result else {}
|
||||
|
||||
obl_text = obligation.obligation_text or ""
|
||||
|
||||
control = ComposedControl(
|
||||
title=parsed.get("title", obl_text[:100] if obl_text else "Untitled Control")[:255],
|
||||
objective=parsed.get("objective", obl_text[:500]),
|
||||
rationale=parsed.get("rationale", "Aus gesetzlicher Pflicht abgeleitet."),
|
||||
requirements=_ensure_list(parsed.get("requirements", [])),
|
||||
test_procedure=_ensure_list(parsed.get("test_procedure", [])),
|
||||
evidence=_ensure_list(parsed.get("evidence", [])),
|
||||
severity=parsed.get("severity", "medium"),
|
||||
implementation_effort=parsed.get("implementation_effort", "m"),
|
||||
category=parsed.get("category"),
|
||||
tags=_ensure_list(parsed.get("tags", [])),
|
||||
target_audience=_ensure_list(parsed.get("target_audience", [])),
|
||||
verification_method=parsed.get("verification_method"),
|
||||
composition_method="fallback",
|
||||
release_state="needs_review",
|
||||
)
|
||||
|
||||
return control
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Prompt builders
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _compose_system_prompt(license_rule: int) -> str:
|
||||
"""Build the system prompt based on license rule."""
|
||||
if license_rule == 3:
|
||||
return (
|
||||
"Du bist ein Security-Compliance-Experte. Deine Aufgabe ist es, "
|
||||
"eigenstaendige Security Controls zu formulieren. "
|
||||
"Du formulierst IMMER in eigenen Worten. "
|
||||
"KOPIERE KEINE Saetze aus dem Quelltext. "
|
||||
"Verwende eigene Begriffe und Struktur. "
|
||||
"NENNE NICHT die Quelle. Keine proprietaeren Bezeichner. "
|
||||
"Antworte NUR mit validem JSON."
|
||||
)
|
||||
return (
|
||||
"Du bist ein Security-Compliance-Experte. "
|
||||
"Erstelle ein praxisorientiertes, umsetzbares Security Control. "
|
||||
"Antworte NUR mit validem JSON."
|
||||
)
|
||||
|
||||
|
||||
def _build_compose_prompt(
|
||||
obligation: ObligationMatch,
|
||||
pattern: ControlPattern,
|
||||
chunk_text: Optional[str],
|
||||
license_rule: int,
|
||||
) -> str:
|
||||
"""Build the LLM prompt for pattern-guided composition."""
|
||||
obl_section = _obligation_section(obligation)
|
||||
pattern_section = _pattern_section(pattern)
|
||||
|
||||
if license_rule == 3:
|
||||
context_section = "KONTEXT: Intern analysiert (keine Quellenangabe)."
|
||||
elif chunk_text:
|
||||
context_section = f"KONTEXT (Originaltext):\n{chunk_text[:2000]}"
|
||||
else:
|
||||
context_section = "KONTEXT: Kein Originaltext verfuegbar."
|
||||
|
||||
return f"""Erstelle ein PRAXISORIENTIERTES Security Control.
|
||||
|
||||
{obl_section}
|
||||
|
||||
{pattern_section}
|
||||
|
||||
{context_section}
|
||||
|
||||
AUFGABE:
|
||||
Fuelle das Muster mit pflicht-spezifischen Details.
|
||||
Das Ergebnis muss UMSETZBAR sein — keine Gesetzesparaphrase.
|
||||
Formuliere konkret und handlungsorientiert.
|
||||
|
||||
Antworte als JSON:
|
||||
{{
|
||||
"title": "Kurzer praegnanter Titel (max 100 Zeichen, deutsch)",
|
||||
"objective": "Was soll erreicht werden? (1-3 Saetze)",
|
||||
"rationale": "Warum ist das wichtig? (1-2 Saetze)",
|
||||
"requirements": ["Konkrete Anforderung 1", "Anforderung 2", ...],
|
||||
"test_procedure": ["Pruefschritt 1", "Pruefschritt 2", ...],
|
||||
"evidence": ["Nachweis 1", "Nachweis 2", ...],
|
||||
"severity": "low|medium|high|critical",
|
||||
"implementation_effort": "s|m|l|xl",
|
||||
"category": "{pattern.category}",
|
||||
"tags": ["tag1", "tag2"],
|
||||
"target_audience": ["unternehmen", "behoerden", "entwickler"],
|
||||
"verification_method": "code_review|document|tool|hybrid"
|
||||
}}"""
|
||||
|
||||
|
||||
def _build_fallback_prompt(
|
||||
obligation: ObligationMatch,
|
||||
chunk_text: Optional[str],
|
||||
license_rule: int,
|
||||
) -> str:
|
||||
"""Build the LLM prompt for fallback composition (no pattern)."""
|
||||
obl_section = _obligation_section(obligation)
|
||||
|
||||
if license_rule == 3:
|
||||
context_section = "KONTEXT: Intern analysiert (keine Quellenangabe)."
|
||||
elif chunk_text:
|
||||
context_section = f"KONTEXT (Originaltext):\n{chunk_text[:2000]}"
|
||||
else:
|
||||
context_section = "KONTEXT: Kein Originaltext verfuegbar."
|
||||
|
||||
return f"""Erstelle ein Security Control aus der folgenden Pflicht.
|
||||
|
||||
{obl_section}
|
||||
|
||||
{context_section}
|
||||
|
||||
AUFGABE:
|
||||
Formuliere ein umsetzbares Security Control.
|
||||
Keine Gesetzesparaphrase — konkrete Massnahmen beschreiben.
|
||||
|
||||
Antworte als JSON:
|
||||
{{
|
||||
"title": "Kurzer praegnanter Titel (max 100 Zeichen, deutsch)",
|
||||
"objective": "Was soll erreicht werden? (1-3 Saetze)",
|
||||
"rationale": "Warum ist das wichtig? (1-2 Saetze)",
|
||||
"requirements": ["Konkrete Anforderung 1", "Anforderung 2", ...],
|
||||
"test_procedure": ["Pruefschritt 1", "Pruefschritt 2", ...],
|
||||
"evidence": ["Nachweis 1", "Nachweis 2", ...],
|
||||
"severity": "low|medium|high|critical",
|
||||
"implementation_effort": "s|m|l|xl",
|
||||
"category": "one of: authentication, encryption, data_protection, etc.",
|
||||
"tags": ["tag1", "tag2"],
|
||||
"target_audience": ["unternehmen"],
|
||||
"verification_method": "code_review|document|tool|hybrid"
|
||||
}}"""
|
||||
|
||||
|
||||
def _obligation_section(obligation: ObligationMatch) -> str:
|
||||
"""Format the obligation for the prompt."""
|
||||
parts = ["PFLICHT (was das Gesetz verlangt):"]
|
||||
if obligation.obligation_title:
|
||||
parts.append(f" Titel: {obligation.obligation_title}")
|
||||
if obligation.obligation_text:
|
||||
parts.append(f" Beschreibung: {obligation.obligation_text[:500]}")
|
||||
if obligation.obligation_id:
|
||||
parts.append(f" ID: {obligation.obligation_id}")
|
||||
if obligation.regulation_id:
|
||||
parts.append(f" Rechtsgrundlage: {obligation.regulation_id}")
|
||||
if not obligation.obligation_text and not obligation.obligation_title:
|
||||
parts.append(" (Keine spezifische Pflicht extrahiert)")
|
||||
return "\n".join(parts)
|
||||
|
||||
|
||||
def _pattern_section(pattern: ControlPattern) -> str:
|
||||
"""Format the pattern for the prompt."""
|
||||
reqs = "\n ".join(f"- {r}" for r in pattern.requirements_template[:5])
|
||||
tests = "\n ".join(f"- {t}" for t in pattern.test_procedure_template[:3])
|
||||
return f"""MUSTER (wie man es typischerweise umsetzt):
|
||||
Pattern: {pattern.name_de} ({pattern.id})
|
||||
Domain: {pattern.domain}
|
||||
Ziel-Template: {pattern.objective_template}
|
||||
Anforderungs-Template:
|
||||
{reqs}
|
||||
Pruefverfahren-Template:
|
||||
{tests}"""
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _ensure_list(value) -> list:
|
||||
"""Ensure a value is a list of strings."""
|
||||
if isinstance(value, list):
|
||||
return [str(v) for v in value if v]
|
||||
if isinstance(value, str):
|
||||
return [value]
|
||||
return []
|
||||
|
||||
|
||||
def _anchors_from_pattern(pattern: ControlPattern) -> list:
|
||||
"""Convert pattern's open_anchor_refs to control anchor format."""
|
||||
anchors = []
|
||||
for ref in pattern.open_anchor_refs:
|
||||
anchors.append({
|
||||
"framework": ref.get("framework", ""),
|
||||
"control_id": ref.get("ref", ""),
|
||||
"title": "",
|
||||
"alignment_score": 0.8,
|
||||
})
|
||||
return anchors
|
||||
|
||||
|
||||
def _validate_control(control: ComposedControl) -> None:
|
||||
"""Validate and fix control field values."""
|
||||
# Severity
|
||||
if control.severity not in VALID_SEVERITIES:
|
||||
control.severity = "medium"
|
||||
|
||||
# Implementation effort
|
||||
if control.implementation_effort not in VALID_EFFORTS:
|
||||
control.implementation_effort = "m"
|
||||
|
||||
# Verification method
|
||||
if control.verification_method and control.verification_method not in VALID_VERIFICATION:
|
||||
control.verification_method = None
|
||||
|
||||
# Risk score
|
||||
if not (0 <= control.risk_score <= 10):
|
||||
control.risk_score = _severity_to_risk(control.severity)
|
||||
|
||||
# Title length
|
||||
if len(control.title) > 255:
|
||||
control.title = control.title[:252] + "..."
|
||||
|
||||
# Ensure minimum content
|
||||
if not control.objective:
|
||||
control.objective = control.title
|
||||
if not control.rationale:
|
||||
control.rationale = "Aus regulatorischer Anforderung abgeleitet."
|
||||
if not control.requirements:
|
||||
control.requirements = ["Anforderung gemaess Pflichtbeschreibung umsetzen"]
|
||||
if not control.test_procedure:
|
||||
control.test_procedure = ["Umsetzung der Anforderungen pruefen"]
|
||||
if not control.evidence:
|
||||
control.evidence = ["Dokumentation der Umsetzung"]
|
||||
|
||||
|
||||
def _severity_to_risk(severity: str) -> float:
|
||||
"""Map severity to a default risk score."""
|
||||
return {
|
||||
"critical": 9.0,
|
||||
"high": 7.0,
|
||||
"medium": 5.0,
|
||||
"low": 3.0,
|
||||
}.get(severity, 5.0)
|
||||
745
control-pipeline/services/control_dedup.py
Normal file
745
control-pipeline/services/control_dedup.py
Normal file
@@ -0,0 +1,745 @@
|
||||
"""Control Deduplication Engine — 4-Stage Matching Pipeline.
|
||||
|
||||
Prevents duplicate atomic controls during Pass 0b by checking candidates
|
||||
against existing controls before insertion.
|
||||
|
||||
Stages:
|
||||
1. Pattern-Gate: pattern_id must match (hard gate)
|
||||
2. Action-Check: normalized action verb must match (hard gate)
|
||||
3. Object-Norm: normalized object must match (soft gate with high threshold)
|
||||
4. Embedding: cosine similarity with tiered thresholds (Qdrant)
|
||||
|
||||
Verdicts:
|
||||
- NEW: create a new atomic control
|
||||
- LINK: add parent link to existing control (similarity > LINK_THRESHOLD)
|
||||
- REVIEW: queue for human review (REVIEW_THRESHOLD < sim < LINK_THRESHOLD)
|
||||
"""
|
||||
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Optional, Callable, Awaitable
|
||||
|
||||
import httpx
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# ── Configuration ────────────────────────────────────────────────────
|
||||
|
||||
DEDUP_ENABLED = os.getenv("DEDUP_ENABLED", "true").lower() == "true"
|
||||
LINK_THRESHOLD = float(os.getenv("DEDUP_LINK_THRESHOLD", "0.92"))
|
||||
REVIEW_THRESHOLD = float(os.getenv("DEDUP_REVIEW_THRESHOLD", "0.85"))
|
||||
LINK_THRESHOLD_DIFF_OBJECT = float(os.getenv("DEDUP_LINK_THRESHOLD_DIFF_OBJ", "0.95"))
|
||||
CROSS_REG_LINK_THRESHOLD = float(os.getenv("DEDUP_CROSS_REG_THRESHOLD", "0.95"))
|
||||
QDRANT_COLLECTION = os.getenv("DEDUP_QDRANT_COLLECTION", "atomic_controls")
|
||||
QDRANT_URL = os.getenv("QDRANT_URL", "http://host.docker.internal:6333")
|
||||
EMBEDDING_URL = os.getenv("EMBEDDING_URL", "http://embedding-service:8087")
|
||||
|
||||
|
||||
# ── Result Dataclass ─────────────────────────────────────────────────
|
||||
|
||||
@dataclass
|
||||
class DedupResult:
|
||||
"""Outcome of the dedup check."""
|
||||
verdict: str # "new" | "link" | "review"
|
||||
matched_control_uuid: Optional[str] = None
|
||||
matched_control_id: Optional[str] = None
|
||||
matched_title: Optional[str] = None
|
||||
stage: str = "" # which stage decided
|
||||
similarity_score: float = 0.0
|
||||
link_type: str = "dedup_merge" # "dedup_merge" | "cross_regulation"
|
||||
details: dict = field(default_factory=dict)
|
||||
|
||||
|
||||
# ── Action Normalization ─────────────────────────────────────────────
|
||||
|
||||
_ACTION_SYNONYMS: dict[str, str] = {
|
||||
# German → canonical English
|
||||
"implementieren": "implement",
|
||||
"umsetzen": "implement",
|
||||
"einrichten": "implement",
|
||||
"einführen": "implement",
|
||||
"aufbauen": "implement",
|
||||
"bereitstellen": "implement",
|
||||
"aktivieren": "implement",
|
||||
"konfigurieren": "configure",
|
||||
"einstellen": "configure",
|
||||
"parametrieren": "configure",
|
||||
"testen": "test",
|
||||
"prüfen": "test",
|
||||
"überprüfen": "test",
|
||||
"verifizieren": "test",
|
||||
"validieren": "test",
|
||||
"kontrollieren": "test",
|
||||
"auditieren": "audit",
|
||||
"dokumentieren": "document",
|
||||
"protokollieren": "log",
|
||||
"aufzeichnen": "log",
|
||||
"loggen": "log",
|
||||
"überwachen": "monitor",
|
||||
"monitoring": "monitor",
|
||||
"beobachten": "monitor",
|
||||
"schulen": "train",
|
||||
"trainieren": "train",
|
||||
"sensibilisieren": "train",
|
||||
"löschen": "delete",
|
||||
"entfernen": "delete",
|
||||
"verschlüsseln": "encrypt",
|
||||
"sperren": "block",
|
||||
"beschränken": "restrict",
|
||||
"einschränken": "restrict",
|
||||
"begrenzen": "restrict",
|
||||
"autorisieren": "authorize",
|
||||
"genehmigen": "authorize",
|
||||
"freigeben": "authorize",
|
||||
"authentifizieren": "authenticate",
|
||||
"identifizieren": "identify",
|
||||
"melden": "report",
|
||||
"benachrichtigen": "notify",
|
||||
"informieren": "notify",
|
||||
"aktualisieren": "update",
|
||||
"erneuern": "update",
|
||||
"sichern": "backup",
|
||||
"wiederherstellen": "restore",
|
||||
# English passthrough
|
||||
"implement": "implement",
|
||||
"configure": "configure",
|
||||
"test": "test",
|
||||
"verify": "test",
|
||||
"validate": "test",
|
||||
"audit": "audit",
|
||||
"document": "document",
|
||||
"log": "log",
|
||||
"monitor": "monitor",
|
||||
"train": "train",
|
||||
"delete": "delete",
|
||||
"encrypt": "encrypt",
|
||||
"restrict": "restrict",
|
||||
"authorize": "authorize",
|
||||
"authenticate": "authenticate",
|
||||
"report": "report",
|
||||
"update": "update",
|
||||
"backup": "backup",
|
||||
"restore": "restore",
|
||||
}
|
||||
|
||||
|
||||
def normalize_action(action: str) -> str:
|
||||
"""Normalize an action verb to a canonical English form."""
|
||||
if not action:
|
||||
return ""
|
||||
action = action.strip().lower()
|
||||
# Strip German infinitive/conjugation suffixes for lookup
|
||||
action_base = re.sub(r"(en|t|st|e|te|tet|end)$", "", action)
|
||||
# Try exact match first, then base form
|
||||
if action in _ACTION_SYNONYMS:
|
||||
return _ACTION_SYNONYMS[action]
|
||||
if action_base in _ACTION_SYNONYMS:
|
||||
return _ACTION_SYNONYMS[action_base]
|
||||
# Fuzzy: check if action starts with any known verb
|
||||
for verb, canonical in _ACTION_SYNONYMS.items():
|
||||
if action.startswith(verb) or verb.startswith(action):
|
||||
return canonical
|
||||
return action # fallback: return as-is
|
||||
|
||||
|
||||
# ── Object Normalization ─────────────────────────────────────────────
|
||||
|
||||
_OBJECT_SYNONYMS: dict[str, str] = {
|
||||
# Authentication / Access
|
||||
"mfa": "multi_factor_auth",
|
||||
"multi-faktor-authentifizierung": "multi_factor_auth",
|
||||
"mehrfaktorauthentifizierung": "multi_factor_auth",
|
||||
"multi-factor authentication": "multi_factor_auth",
|
||||
"two-factor": "multi_factor_auth",
|
||||
"2fa": "multi_factor_auth",
|
||||
"passwort": "password_policy",
|
||||
"kennwort": "password_policy",
|
||||
"password": "password_policy",
|
||||
"zugangsdaten": "credentials",
|
||||
"credentials": "credentials",
|
||||
"admin-konten": "privileged_access",
|
||||
"admin accounts": "privileged_access",
|
||||
"administratorkonten": "privileged_access",
|
||||
"privilegierte zugriffe": "privileged_access",
|
||||
"privileged accounts": "privileged_access",
|
||||
"remote-zugriff": "remote_access",
|
||||
"fernzugriff": "remote_access",
|
||||
"remote access": "remote_access",
|
||||
"session": "session_management",
|
||||
"sitzung": "session_management",
|
||||
"sitzungsverwaltung": "session_management",
|
||||
# Encryption
|
||||
"verschlüsselung": "encryption",
|
||||
"encryption": "encryption",
|
||||
"kryptografie": "encryption",
|
||||
"kryptografische verfahren": "encryption",
|
||||
"schlüssel": "key_management",
|
||||
"key management": "key_management",
|
||||
"schlüsselverwaltung": "key_management",
|
||||
"zertifikat": "certificate_management",
|
||||
"certificate": "certificate_management",
|
||||
"tls": "transport_encryption",
|
||||
"ssl": "transport_encryption",
|
||||
"https": "transport_encryption",
|
||||
# Network
|
||||
"firewall": "firewall",
|
||||
"netzwerk": "network_security",
|
||||
"network": "network_security",
|
||||
"vpn": "vpn",
|
||||
"segmentierung": "network_segmentation",
|
||||
"segmentation": "network_segmentation",
|
||||
# Logging / Monitoring
|
||||
"audit-log": "audit_logging",
|
||||
"audit log": "audit_logging",
|
||||
"protokoll": "audit_logging",
|
||||
"logging": "audit_logging",
|
||||
"monitoring": "monitoring",
|
||||
"überwachung": "monitoring",
|
||||
"alerting": "alerting",
|
||||
"alarmierung": "alerting",
|
||||
"siem": "siem",
|
||||
# Data
|
||||
"personenbezogene daten": "personal_data",
|
||||
"personal data": "personal_data",
|
||||
"sensible daten": "sensitive_data",
|
||||
"sensitive data": "sensitive_data",
|
||||
"datensicherung": "backup",
|
||||
"backup": "backup",
|
||||
"wiederherstellung": "disaster_recovery",
|
||||
"disaster recovery": "disaster_recovery",
|
||||
# Policy / Process
|
||||
"richtlinie": "policy",
|
||||
"policy": "policy",
|
||||
"verfahrensanweisung": "procedure",
|
||||
"procedure": "procedure",
|
||||
"prozess": "process",
|
||||
"schulung": "training",
|
||||
"training": "training",
|
||||
"awareness": "awareness",
|
||||
"sensibilisierung": "awareness",
|
||||
# Incident
|
||||
"vorfall": "incident",
|
||||
"incident": "incident",
|
||||
"sicherheitsvorfall": "security_incident",
|
||||
"security incident": "security_incident",
|
||||
# Vulnerability
|
||||
"schwachstelle": "vulnerability",
|
||||
"vulnerability": "vulnerability",
|
||||
"patch": "patch_management",
|
||||
"update": "patch_management",
|
||||
"patching": "patch_management",
|
||||
}
|
||||
|
||||
# Precompile for substring matching (longest first)
|
||||
_OBJECT_KEYS_SORTED = sorted(_OBJECT_SYNONYMS.keys(), key=len, reverse=True)
|
||||
|
||||
|
||||
def normalize_object(obj: str) -> str:
|
||||
"""Normalize a compliance object to a canonical token."""
|
||||
if not obj:
|
||||
return ""
|
||||
obj_lower = obj.strip().lower()
|
||||
# Exact match
|
||||
if obj_lower in _OBJECT_SYNONYMS:
|
||||
return _OBJECT_SYNONYMS[obj_lower]
|
||||
# Substring match (longest first)
|
||||
for phrase in _OBJECT_KEYS_SORTED:
|
||||
if phrase in obj_lower:
|
||||
return _OBJECT_SYNONYMS[phrase]
|
||||
# Fallback: strip articles/prepositions, join with underscore
|
||||
cleaned = re.sub(r"\b(der|die|das|den|dem|des|ein|eine|eines|einem|einen"
|
||||
r"|für|von|zu|auf|in|an|bei|mit|nach|über|unter|the|a|an"
|
||||
r"|for|of|to|on|in|at|by|with)\b", "", obj_lower)
|
||||
tokens = [t for t in cleaned.split() if len(t) > 2]
|
||||
return "_".join(tokens[:4]) if tokens else obj_lower.replace(" ", "_")
|
||||
|
||||
|
||||
# ── Canonicalization ─────────────────────────────────────────────────
|
||||
|
||||
def canonicalize_text(action: str, obj: str, title: str = "") -> str:
|
||||
"""Build a canonical English text for embedding.
|
||||
|
||||
Transforms German compliance text into normalized English tokens
|
||||
for more stable embedding comparisons.
|
||||
"""
|
||||
norm_action = normalize_action(action)
|
||||
norm_object = normalize_object(obj)
|
||||
# Build canonical sentence
|
||||
parts = [norm_action, norm_object]
|
||||
if title:
|
||||
# Add title keywords (stripped of common filler)
|
||||
title_clean = re.sub(
|
||||
r"\b(und|oder|für|von|zu|der|die|das|den|dem|des|ein|eine"
|
||||
r"|bei|mit|nach|gemäß|gem\.|laut|entsprechend)\b",
|
||||
"", title.lower()
|
||||
)
|
||||
title_tokens = [t for t in title_clean.split() if len(t) > 3][:5]
|
||||
if title_tokens:
|
||||
parts.append("for")
|
||||
parts.extend(title_tokens)
|
||||
return " ".join(parts)
|
||||
|
||||
|
||||
# ── Embedding Helper ─────────────────────────────────────────────────
|
||||
|
||||
async def get_embedding(text: str) -> list[float]:
|
||||
"""Get embedding vector for a single text via embedding service."""
|
||||
try:
|
||||
async with httpx.AsyncClient(timeout=10.0) as client:
|
||||
resp = await client.post(
|
||||
f"{EMBEDDING_URL}/embed",
|
||||
json={"texts": [text]},
|
||||
)
|
||||
embeddings = resp.json().get("embeddings", [])
|
||||
return embeddings[0] if embeddings else []
|
||||
except Exception as e:
|
||||
logger.warning("Embedding failed: %s", e)
|
||||
return []
|
||||
|
||||
|
||||
def cosine_similarity(a: list[float], b: list[float]) -> float:
|
||||
"""Compute cosine similarity between two vectors."""
|
||||
if not a or not b or len(a) != len(b):
|
||||
return 0.0
|
||||
dot = sum(x * y for x, y in zip(a, b))
|
||||
norm_a = sum(x * x for x in a) ** 0.5
|
||||
norm_b = sum(x * x for x in b) ** 0.5
|
||||
if norm_a == 0 or norm_b == 0:
|
||||
return 0.0
|
||||
return dot / (norm_a * norm_b)
|
||||
|
||||
|
||||
# ── Qdrant Helpers ───────────────────────────────────────────────────
|
||||
|
||||
async def qdrant_search(
|
||||
embedding: list[float],
|
||||
pattern_id: str,
|
||||
top_k: int = 10,
|
||||
collection: Optional[str] = None,
|
||||
) -> list[dict]:
|
||||
"""Search Qdrant for similar atomic controls, filtered by pattern_id."""
|
||||
if not embedding:
|
||||
return []
|
||||
coll = collection or QDRANT_COLLECTION
|
||||
body: dict = {
|
||||
"vector": embedding,
|
||||
"limit": top_k,
|
||||
"with_payload": True,
|
||||
"filter": {
|
||||
"must": [
|
||||
{"key": "pattern_id", "match": {"value": pattern_id}}
|
||||
]
|
||||
},
|
||||
}
|
||||
try:
|
||||
async with httpx.AsyncClient(timeout=10.0) as client:
|
||||
resp = await client.post(
|
||||
f"{QDRANT_URL}/collections/{coll}/points/search",
|
||||
json=body,
|
||||
)
|
||||
if resp.status_code != 200:
|
||||
logger.warning("Qdrant search failed: %d", resp.status_code)
|
||||
return []
|
||||
return resp.json().get("result", [])
|
||||
except Exception as e:
|
||||
logger.warning("Qdrant search error: %s", e)
|
||||
return []
|
||||
|
||||
|
||||
async def qdrant_search_cross_regulation(
|
||||
embedding: list[float],
|
||||
top_k: int = 5,
|
||||
collection: Optional[str] = None,
|
||||
) -> list[dict]:
|
||||
"""Search Qdrant for similar controls across ALL regulations (no pattern_id filter).
|
||||
|
||||
Used for cross-regulation linking (e.g. DSGVO Art. 25 ↔ NIS2 Art. 21).
|
||||
"""
|
||||
if not embedding:
|
||||
return []
|
||||
coll = collection or QDRANT_COLLECTION
|
||||
body: dict = {
|
||||
"vector": embedding,
|
||||
"limit": top_k,
|
||||
"with_payload": True,
|
||||
}
|
||||
try:
|
||||
async with httpx.AsyncClient(timeout=10.0) as client:
|
||||
resp = await client.post(
|
||||
f"{QDRANT_URL}/collections/{coll}/points/search",
|
||||
json=body,
|
||||
)
|
||||
if resp.status_code != 200:
|
||||
logger.warning("Qdrant cross-reg search failed: %d", resp.status_code)
|
||||
return []
|
||||
return resp.json().get("result", [])
|
||||
except Exception as e:
|
||||
logger.warning("Qdrant cross-reg search error: %s", e)
|
||||
return []
|
||||
|
||||
|
||||
async def qdrant_upsert(
|
||||
point_id: str,
|
||||
embedding: list[float],
|
||||
payload: dict,
|
||||
collection: Optional[str] = None,
|
||||
) -> bool:
|
||||
"""Upsert a single point into a Qdrant collection."""
|
||||
if not embedding:
|
||||
return False
|
||||
coll = collection or QDRANT_COLLECTION
|
||||
body = {
|
||||
"points": [{
|
||||
"id": point_id,
|
||||
"vector": embedding,
|
||||
"payload": payload,
|
||||
}]
|
||||
}
|
||||
try:
|
||||
async with httpx.AsyncClient(timeout=10.0) as client:
|
||||
resp = await client.put(
|
||||
f"{QDRANT_URL}/collections/{coll}/points",
|
||||
json=body,
|
||||
)
|
||||
return resp.status_code == 200
|
||||
except Exception as e:
|
||||
logger.warning("Qdrant upsert error: %s", e)
|
||||
return False
|
||||
|
||||
|
||||
async def ensure_qdrant_collection(
|
||||
vector_size: int = 1024,
|
||||
collection: Optional[str] = None,
|
||||
) -> bool:
|
||||
"""Create a Qdrant collection if it doesn't exist (idempotent)."""
|
||||
coll = collection or QDRANT_COLLECTION
|
||||
try:
|
||||
async with httpx.AsyncClient(timeout=10.0) as client:
|
||||
# Check if exists
|
||||
resp = await client.get(f"{QDRANT_URL}/collections/{coll}")
|
||||
if resp.status_code == 200:
|
||||
return True
|
||||
# Create
|
||||
resp = await client.put(
|
||||
f"{QDRANT_URL}/collections/{coll}",
|
||||
json={
|
||||
"vectors": {"size": vector_size, "distance": "Cosine"},
|
||||
},
|
||||
)
|
||||
if resp.status_code == 200:
|
||||
logger.info("Created Qdrant collection: %s", coll)
|
||||
# Create payload indexes
|
||||
for field_name in ["pattern_id", "action_normalized", "object_normalized", "control_id"]:
|
||||
await client.put(
|
||||
f"{QDRANT_URL}/collections/{coll}/index",
|
||||
json={"field_name": field_name, "field_schema": "keyword"},
|
||||
)
|
||||
return True
|
||||
logger.error("Failed to create Qdrant collection: %d", resp.status_code)
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.warning("Qdrant collection check error: %s", e)
|
||||
return False
|
||||
|
||||
|
||||
# ── Main Dedup Checker ───────────────────────────────────────────────
|
||||
|
||||
class ControlDedupChecker:
|
||||
"""4-stage dedup checker for atomic controls.
|
||||
|
||||
Usage:
|
||||
checker = ControlDedupChecker(db_session)
|
||||
result = await checker.check_duplicate(candidate_action, candidate_object, candidate_title, pattern_id)
|
||||
if result.verdict == "link":
|
||||
checker.add_parent_link(result.matched_control_uuid, parent_uuid)
|
||||
elif result.verdict == "review":
|
||||
checker.write_review(candidate, result)
|
||||
else:
|
||||
# Insert new control
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
db,
|
||||
embed_fn: Optional[Callable[[str], Awaitable[list[float]]]] = None,
|
||||
search_fn: Optional[Callable] = None,
|
||||
):
|
||||
self.db = db
|
||||
self._embed = embed_fn or get_embedding
|
||||
self._search = search_fn or qdrant_search
|
||||
self._cache: dict[str, list[dict]] = {} # pattern_id → existing controls
|
||||
|
||||
def _load_existing(self, pattern_id: str) -> list[dict]:
|
||||
"""Load existing atomic controls with same pattern_id from DB."""
|
||||
if pattern_id in self._cache:
|
||||
return self._cache[pattern_id]
|
||||
from sqlalchemy import text
|
||||
rows = self.db.execute(text("""
|
||||
SELECT id::text, control_id, title, objective,
|
||||
pattern_id,
|
||||
generation_metadata->>'obligation_type' as obligation_type
|
||||
FROM canonical_controls
|
||||
WHERE parent_control_uuid IS NOT NULL
|
||||
AND release_state != 'deprecated'
|
||||
AND pattern_id = :pid
|
||||
"""), {"pid": pattern_id}).fetchall()
|
||||
result = [
|
||||
{
|
||||
"uuid": r[0], "control_id": r[1], "title": r[2],
|
||||
"objective": r[3], "pattern_id": r[4],
|
||||
"obligation_type": r[5],
|
||||
}
|
||||
for r in rows
|
||||
]
|
||||
self._cache[pattern_id] = result
|
||||
return result
|
||||
|
||||
async def check_duplicate(
|
||||
self,
|
||||
action: str,
|
||||
obj: str,
|
||||
title: str,
|
||||
pattern_id: Optional[str],
|
||||
) -> DedupResult:
|
||||
"""Run the 4-stage dedup pipeline + cross-regulation linking.
|
||||
|
||||
Returns DedupResult with verdict: new/link/review.
|
||||
"""
|
||||
# No pattern_id → can't dedup meaningfully
|
||||
if not pattern_id:
|
||||
return DedupResult(verdict="new", stage="no_pattern")
|
||||
|
||||
# Stage 1: Pattern-Gate
|
||||
existing = self._load_existing(pattern_id)
|
||||
if not existing:
|
||||
return DedupResult(
|
||||
verdict="new", stage="pattern_gate",
|
||||
details={"reason": "no existing controls with this pattern_id"},
|
||||
)
|
||||
|
||||
# Stage 2: Action-Check
|
||||
norm_action = normalize_action(action)
|
||||
# We don't have action stored on existing controls from DB directly,
|
||||
# so we use embedding for controls that passed pattern gate.
|
||||
# But we CAN check via generation_metadata if available.
|
||||
|
||||
# Stage 3: Object-Normalization
|
||||
norm_object = normalize_object(obj)
|
||||
|
||||
# Stage 4: Embedding Similarity
|
||||
canonical = canonicalize_text(action, obj, title)
|
||||
embedding = await self._embed(canonical)
|
||||
if not embedding:
|
||||
# Can't compute embedding → default to new
|
||||
return DedupResult(
|
||||
verdict="new", stage="embedding_unavailable",
|
||||
details={"canonical_text": canonical},
|
||||
)
|
||||
|
||||
# Search Qdrant
|
||||
results = await self._search(embedding, pattern_id, top_k=5)
|
||||
|
||||
if not results:
|
||||
# No intra-pattern matches → try cross-regulation
|
||||
return await self._check_cross_regulation(embedding, DedupResult(
|
||||
verdict="new", stage="no_qdrant_matches",
|
||||
details={"canonical_text": canonical, "action": norm_action, "object": norm_object},
|
||||
))
|
||||
|
||||
# Evaluate best match
|
||||
best = results[0]
|
||||
best_score = best.get("score", 0.0)
|
||||
best_payload = best.get("payload", {})
|
||||
best_action = best_payload.get("action_normalized", "")
|
||||
best_object = best_payload.get("object_normalized", "")
|
||||
|
||||
# Action differs → NEW (even if embedding is high)
|
||||
if best_action and norm_action and best_action != norm_action:
|
||||
return await self._check_cross_regulation(embedding, DedupResult(
|
||||
verdict="new", stage="action_mismatch",
|
||||
similarity_score=best_score,
|
||||
matched_control_id=best_payload.get("control_id"),
|
||||
details={
|
||||
"candidate_action": norm_action,
|
||||
"existing_action": best_action,
|
||||
"similarity": best_score,
|
||||
},
|
||||
))
|
||||
|
||||
# Object differs → use higher threshold
|
||||
if best_object and norm_object and best_object != norm_object:
|
||||
if best_score > LINK_THRESHOLD_DIFF_OBJECT:
|
||||
return DedupResult(
|
||||
verdict="link", stage="embedding_diff_object",
|
||||
matched_control_uuid=best_payload.get("control_uuid"),
|
||||
matched_control_id=best_payload.get("control_id"),
|
||||
matched_title=best_payload.get("title"),
|
||||
similarity_score=best_score,
|
||||
details={"candidate_object": norm_object, "existing_object": best_object},
|
||||
)
|
||||
return await self._check_cross_regulation(embedding, DedupResult(
|
||||
verdict="new", stage="object_mismatch_below_threshold",
|
||||
similarity_score=best_score,
|
||||
matched_control_id=best_payload.get("control_id"),
|
||||
details={
|
||||
"candidate_object": norm_object,
|
||||
"existing_object": best_object,
|
||||
"threshold": LINK_THRESHOLD_DIFF_OBJECT,
|
||||
},
|
||||
))
|
||||
|
||||
# Same action + same object → tiered thresholds
|
||||
if best_score > LINK_THRESHOLD:
|
||||
return DedupResult(
|
||||
verdict="link", stage="embedding_match",
|
||||
matched_control_uuid=best_payload.get("control_uuid"),
|
||||
matched_control_id=best_payload.get("control_id"),
|
||||
matched_title=best_payload.get("title"),
|
||||
similarity_score=best_score,
|
||||
)
|
||||
if best_score > REVIEW_THRESHOLD:
|
||||
return DedupResult(
|
||||
verdict="review", stage="embedding_review",
|
||||
matched_control_uuid=best_payload.get("control_uuid"),
|
||||
matched_control_id=best_payload.get("control_id"),
|
||||
matched_title=best_payload.get("title"),
|
||||
similarity_score=best_score,
|
||||
)
|
||||
return await self._check_cross_regulation(embedding, DedupResult(
|
||||
verdict="new", stage="embedding_below_threshold",
|
||||
similarity_score=best_score,
|
||||
details={"threshold": REVIEW_THRESHOLD},
|
||||
))
|
||||
|
||||
async def _check_cross_regulation(
|
||||
self,
|
||||
embedding: list[float],
|
||||
intra_result: DedupResult,
|
||||
) -> DedupResult:
|
||||
"""Second pass: cross-regulation linking for controls deemed 'new'.
|
||||
|
||||
Searches Qdrant WITHOUT pattern_id filter. Uses a higher threshold
|
||||
(0.95) to avoid false positives across regulation boundaries.
|
||||
"""
|
||||
if intra_result.verdict != "new" or not embedding:
|
||||
return intra_result
|
||||
|
||||
cross_results = await qdrant_search_cross_regulation(embedding, top_k=5)
|
||||
if not cross_results:
|
||||
return intra_result
|
||||
|
||||
best = cross_results[0]
|
||||
best_score = best.get("score", 0.0)
|
||||
if best_score > CROSS_REG_LINK_THRESHOLD:
|
||||
best_payload = best.get("payload", {})
|
||||
return DedupResult(
|
||||
verdict="link",
|
||||
stage="cross_regulation",
|
||||
matched_control_uuid=best_payload.get("control_uuid"),
|
||||
matched_control_id=best_payload.get("control_id"),
|
||||
matched_title=best_payload.get("title"),
|
||||
similarity_score=best_score,
|
||||
link_type="cross_regulation",
|
||||
details={
|
||||
"cross_reg_score": best_score,
|
||||
"cross_reg_threshold": CROSS_REG_LINK_THRESHOLD,
|
||||
},
|
||||
)
|
||||
|
||||
return intra_result
|
||||
|
||||
def add_parent_link(
|
||||
self,
|
||||
control_uuid: str,
|
||||
parent_control_uuid: str,
|
||||
link_type: str = "dedup_merge",
|
||||
confidence: float = 0.0,
|
||||
source_regulation: Optional[str] = None,
|
||||
source_article: Optional[str] = None,
|
||||
obligation_candidate_id: Optional[str] = None,
|
||||
) -> None:
|
||||
"""Add a parent link to an existing atomic control."""
|
||||
from sqlalchemy import text
|
||||
self.db.execute(text("""
|
||||
INSERT INTO control_parent_links
|
||||
(control_uuid, parent_control_uuid, link_type, confidence,
|
||||
source_regulation, source_article, obligation_candidate_id)
|
||||
VALUES (:cu, :pu, :lt, :conf, :sr, :sa, :oci::uuid)
|
||||
ON CONFLICT (control_uuid, parent_control_uuid) DO NOTHING
|
||||
"""), {
|
||||
"cu": control_uuid,
|
||||
"pu": parent_control_uuid,
|
||||
"lt": link_type,
|
||||
"conf": confidence,
|
||||
"sr": source_regulation,
|
||||
"sa": source_article,
|
||||
"oci": obligation_candidate_id,
|
||||
})
|
||||
self.db.commit()
|
||||
|
||||
def write_review(
|
||||
self,
|
||||
candidate_control_id: str,
|
||||
candidate_title: str,
|
||||
candidate_objective: str,
|
||||
result: DedupResult,
|
||||
parent_control_uuid: Optional[str] = None,
|
||||
obligation_candidate_id: Optional[str] = None,
|
||||
) -> None:
|
||||
"""Write a dedup review queue entry."""
|
||||
from sqlalchemy import text
|
||||
self.db.execute(text("""
|
||||
INSERT INTO control_dedup_reviews
|
||||
(candidate_control_id, candidate_title, candidate_objective,
|
||||
matched_control_uuid, matched_control_id,
|
||||
similarity_score, dedup_stage, dedup_details,
|
||||
parent_control_uuid, obligation_candidate_id)
|
||||
VALUES (:ccid, :ct, :co, :mcu::uuid, :mci, :ss, :ds,
|
||||
:dd::jsonb, :pcu::uuid, :oci)
|
||||
"""), {
|
||||
"ccid": candidate_control_id,
|
||||
"ct": candidate_title,
|
||||
"co": candidate_objective,
|
||||
"mcu": result.matched_control_uuid,
|
||||
"mci": result.matched_control_id,
|
||||
"ss": result.similarity_score,
|
||||
"ds": result.stage,
|
||||
"dd": __import__("json").dumps(result.details),
|
||||
"pcu": parent_control_uuid,
|
||||
"oci": obligation_candidate_id,
|
||||
})
|
||||
self.db.commit()
|
||||
|
||||
async def index_control(
|
||||
self,
|
||||
control_uuid: str,
|
||||
control_id: str,
|
||||
title: str,
|
||||
action: str,
|
||||
obj: str,
|
||||
pattern_id: str,
|
||||
collection: Optional[str] = None,
|
||||
) -> bool:
|
||||
"""Index a new atomic control in Qdrant for future dedup checks."""
|
||||
norm_action = normalize_action(action)
|
||||
norm_object = normalize_object(obj)
|
||||
canonical = canonicalize_text(action, obj, title)
|
||||
embedding = await self._embed(canonical)
|
||||
if not embedding:
|
||||
return False
|
||||
return await qdrant_upsert(
|
||||
point_id=control_uuid,
|
||||
embedding=embedding,
|
||||
payload={
|
||||
"control_uuid": control_uuid,
|
||||
"control_id": control_id,
|
||||
"title": title,
|
||||
"pattern_id": pattern_id,
|
||||
"action_normalized": norm_action,
|
||||
"object_normalized": norm_object,
|
||||
"canonical_text": canonical,
|
||||
},
|
||||
collection=collection,
|
||||
)
|
||||
2431
control-pipeline/services/control_generator.py
Normal file
2431
control-pipeline/services/control_generator.py
Normal file
File diff suppressed because it is too large
Load Diff
154
control-pipeline/services/control_status_machine.py
Normal file
154
control-pipeline/services/control_status_machine.py
Normal file
@@ -0,0 +1,154 @@
|
||||
"""
|
||||
Control Status Transition State Machine.
|
||||
|
||||
Enforces that controls cannot be set to "pass" without sufficient evidence.
|
||||
Prevents Compliance-Theater where controls claim compliance without real proof.
|
||||
|
||||
Transition rules:
|
||||
planned → in_progress : always allowed
|
||||
in_progress → pass : requires ≥1 evidence with confidence ≥ E2 and
|
||||
truth_status in (uploaded, observed, validated_internal)
|
||||
in_progress → partial : requires ≥1 evidence (any level)
|
||||
pass → fail : always allowed (degradation)
|
||||
any → n/a : requires status_justification
|
||||
any → planned : always allowed (reset)
|
||||
"""
|
||||
|
||||
from typing import Any, List, Optional, Tuple
|
||||
|
||||
# EvidenceDB is an ORM model from compliance — we only need duck-typed objects
|
||||
# with .confidence_level and .truth_status attributes.
|
||||
EvidenceDB = Any
|
||||
|
||||
|
||||
# Confidence level ordering for comparisons
|
||||
CONFIDENCE_ORDER = {"E0": 0, "E1": 1, "E2": 2, "E3": 3, "E4": 4}
|
||||
|
||||
# Truth statuses that qualify as "real" evidence for pass transitions
|
||||
VALID_TRUTH_STATUSES = {"uploaded", "observed", "validated_internal", "accepted_by_auditor", "provided_to_auditor"}
|
||||
|
||||
|
||||
def validate_transition(
|
||||
current_status: str,
|
||||
new_status: str,
|
||||
evidence_list: Optional[List[EvidenceDB]] = None,
|
||||
status_justification: Optional[str] = None,
|
||||
bypass_for_auto_updater: bool = False,
|
||||
) -> Tuple[bool, List[str]]:
|
||||
"""
|
||||
Validate whether a control status transition is allowed.
|
||||
|
||||
Args:
|
||||
current_status: Current control status value (e.g. "planned", "pass")
|
||||
new_status: Requested new status
|
||||
evidence_list: List of EvidenceDB objects linked to this control
|
||||
status_justification: Text justification (required for n/a transitions)
|
||||
bypass_for_auto_updater: If True, skip evidence checks (used by CI/CD auto-updater
|
||||
which creates evidence atomically with status change)
|
||||
|
||||
Returns:
|
||||
Tuple of (allowed: bool, violations: list[str])
|
||||
"""
|
||||
violations: List[str] = []
|
||||
evidence_list = evidence_list or []
|
||||
|
||||
# Same status → no-op, always allowed
|
||||
if current_status == new_status:
|
||||
return True, []
|
||||
|
||||
# Reset to planned is always allowed
|
||||
if new_status == "planned":
|
||||
return True, []
|
||||
|
||||
# n/a requires justification
|
||||
if new_status == "n/a":
|
||||
if not status_justification or not status_justification.strip():
|
||||
violations.append("Transition to 'n/a' requires a status_justification explaining why this control is not applicable.")
|
||||
return len(violations) == 0, violations
|
||||
|
||||
# Degradation: pass → fail is always allowed
|
||||
if current_status == "pass" and new_status == "fail":
|
||||
return True, []
|
||||
|
||||
# planned → in_progress: always allowed
|
||||
if current_status == "planned" and new_status == "in_progress":
|
||||
return True, []
|
||||
|
||||
# in_progress → partial: needs at least 1 evidence
|
||||
if new_status == "partial":
|
||||
if not bypass_for_auto_updater and len(evidence_list) == 0:
|
||||
violations.append("Transition to 'partial' requires at least 1 evidence record.")
|
||||
return len(violations) == 0, violations
|
||||
|
||||
# in_progress → pass: strict requirements
|
||||
if new_status == "pass":
|
||||
if bypass_for_auto_updater:
|
||||
return True, []
|
||||
|
||||
if len(evidence_list) == 0:
|
||||
violations.append("Transition to 'pass' requires at least 1 evidence record.")
|
||||
return False, violations
|
||||
|
||||
# Check for at least one qualifying evidence
|
||||
has_qualifying = False
|
||||
for e in evidence_list:
|
||||
conf = getattr(e, "confidence_level", None)
|
||||
truth = getattr(e, "truth_status", None)
|
||||
|
||||
# Get string values from enum or string
|
||||
conf_val = conf.value if hasattr(conf, "value") else str(conf) if conf else "E1"
|
||||
truth_val = truth.value if hasattr(truth, "value") else str(truth) if truth else "uploaded"
|
||||
|
||||
if CONFIDENCE_ORDER.get(conf_val, 1) >= CONFIDENCE_ORDER["E2"] and truth_val in VALID_TRUTH_STATUSES:
|
||||
has_qualifying = True
|
||||
break
|
||||
|
||||
if not has_qualifying:
|
||||
violations.append(
|
||||
"Transition to 'pass' requires at least 1 evidence with confidence >= E2 "
|
||||
"and truth_status in (uploaded, observed, validated_internal, accepted_by_auditor). "
|
||||
"Current evidence does not meet this threshold."
|
||||
)
|
||||
|
||||
return len(violations) == 0, violations
|
||||
|
||||
# in_progress → fail: always allowed
|
||||
if new_status == "fail":
|
||||
return True, []
|
||||
|
||||
# Any other transition from planned/fail to pass requires going through in_progress
|
||||
if current_status in ("planned", "fail") and new_status == "pass":
|
||||
if bypass_for_auto_updater:
|
||||
return True, []
|
||||
violations.append(
|
||||
f"Direct transition from '{current_status}' to 'pass' is not allowed. "
|
||||
f"Move to 'in_progress' first, then to 'pass' with qualifying evidence."
|
||||
)
|
||||
return False, violations
|
||||
|
||||
# Default: allow other transitions (e.g. fail → partial, partial → pass)
|
||||
# For partial → pass, apply the same evidence checks
|
||||
if current_status == "partial" and new_status == "pass":
|
||||
if bypass_for_auto_updater:
|
||||
return True, []
|
||||
|
||||
has_qualifying = False
|
||||
for e in evidence_list:
|
||||
conf = getattr(e, "confidence_level", None)
|
||||
truth = getattr(e, "truth_status", None)
|
||||
conf_val = conf.value if hasattr(conf, "value") else str(conf) if conf else "E1"
|
||||
truth_val = truth.value if hasattr(truth, "value") else str(truth) if truth else "uploaded"
|
||||
|
||||
if CONFIDENCE_ORDER.get(conf_val, 1) >= CONFIDENCE_ORDER["E2"] and truth_val in VALID_TRUTH_STATUSES:
|
||||
has_qualifying = True
|
||||
break
|
||||
|
||||
if not has_qualifying:
|
||||
violations.append(
|
||||
"Transition from 'partial' to 'pass' requires at least 1 evidence with confidence >= E2 "
|
||||
"and truth_status in (uploaded, observed, validated_internal, accepted_by_auditor)."
|
||||
)
|
||||
return len(violations) == 0, violations
|
||||
|
||||
# All other transitions allowed
|
||||
return True, []
|
||||
3932
control-pipeline/services/decomposition_pass.py
Normal file
3932
control-pipeline/services/decomposition_pass.py
Normal file
File diff suppressed because it is too large
Load Diff
714
control-pipeline/services/framework_decomposition.py
Normal file
714
control-pipeline/services/framework_decomposition.py
Normal file
@@ -0,0 +1,714 @@
|
||||
"""Framework Decomposition Engine — decomposes framework-container obligations.
|
||||
|
||||
Sits between Pass 0a (obligation extraction) and Pass 0b (atomic control
|
||||
composition). Detects obligations that reference a framework domain (e.g.
|
||||
"CCM-Praktiken fuer AIS") and decomposes them into concrete sub-obligations
|
||||
using an internal framework registry.
|
||||
|
||||
Three routing types:
|
||||
atomic → pass through to Pass 0b unchanged
|
||||
compound → split compound verbs, then Pass 0b
|
||||
framework_container → decompose via registry, then Pass 0b
|
||||
|
||||
The registry is a set of JSON files under compliance/data/frameworks/.
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import uuid
|
||||
from dataclasses import dataclass, field
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Registry loading
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
_REGISTRY_DIR = Path(__file__).resolve().parent.parent / "data" / "frameworks"
|
||||
_REGISTRY: dict[str, dict] = {} # framework_id → framework dict
|
||||
|
||||
|
||||
def _load_registry() -> dict[str, dict]:
|
||||
"""Load all framework JSON files from the registry directory."""
|
||||
registry: dict[str, dict] = {}
|
||||
if not _REGISTRY_DIR.is_dir():
|
||||
logger.warning("Framework registry dir not found: %s", _REGISTRY_DIR)
|
||||
return registry
|
||||
|
||||
for fpath in sorted(_REGISTRY_DIR.glob("*.json")):
|
||||
try:
|
||||
with open(fpath, encoding="utf-8") as f:
|
||||
fw = json.load(f)
|
||||
fw_id = fw.get("framework_id", fpath.stem)
|
||||
registry[fw_id] = fw
|
||||
logger.info(
|
||||
"Loaded framework: %s (%d domains)",
|
||||
fw_id,
|
||||
len(fw.get("domains", [])),
|
||||
)
|
||||
except Exception:
|
||||
logger.exception("Failed to load framework file: %s", fpath)
|
||||
return registry
|
||||
|
||||
|
||||
def get_registry() -> dict[str, dict]:
|
||||
"""Return the global framework registry (lazy-loaded)."""
|
||||
global _REGISTRY
|
||||
if not _REGISTRY:
|
||||
_REGISTRY = _load_registry()
|
||||
return _REGISTRY
|
||||
|
||||
|
||||
def reload_registry() -> dict[str, dict]:
|
||||
"""Force-reload the framework registry from disk."""
|
||||
global _REGISTRY
|
||||
_REGISTRY = _load_registry()
|
||||
return _REGISTRY
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Framework alias index (built from registry)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def _build_alias_index(registry: dict[str, dict]) -> dict[str, str]:
|
||||
"""Build a lowercase alias → framework_id lookup."""
|
||||
idx: dict[str, str] = {}
|
||||
for fw_id, fw in registry.items():
|
||||
# Framework-level aliases
|
||||
idx[fw_id.lower()] = fw_id
|
||||
name = fw.get("display_name", "")
|
||||
if name:
|
||||
idx[name.lower()] = fw_id
|
||||
# Common short forms
|
||||
for part in fw_id.lower().replace("_", " ").split():
|
||||
if len(part) >= 3:
|
||||
idx[part] = fw_id
|
||||
return idx
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Routing — classify obligation type
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
# Extended patterns for framework detection (beyond the simple _COMPOSITE_RE
|
||||
# in decomposition_pass.py — here we also capture the framework name)
|
||||
_FRAMEWORK_PATTERN = re.compile(
|
||||
r"(?:praktiken|kontrollen|ma(?:ss|ß)nahmen|anforderungen|vorgaben|controls|practices|measures|requirements)"
|
||||
r"\s+(?:f(?:ue|ü)r|aus|gem(?:ae|ä)(?:ss|ß)|nach|from|of|for|per)\s+"
|
||||
r"(.+?)(?:\s+(?:m(?:ue|ü)ssen|sollen|sind|werden|implementieren|umsetzen|einf(?:ue|ü)hren)|\.|,|$)",
|
||||
re.IGNORECASE,
|
||||
)
|
||||
|
||||
# Direct framework name references
|
||||
_DIRECT_FRAMEWORK_RE = re.compile(
|
||||
r"\b(?:CSA\s*CCM|NIST\s*(?:SP\s*)?800-53|OWASP\s*(?:ASVS|SAMM|Top\s*10)"
|
||||
r"|CIS\s*Controls|BSI\s*(?:IT-)?Grundschutz|ENISA|ISO\s*2700[12]"
|
||||
r"|COBIT|SOX|PCI\s*DSS|HITRUST|SOC\s*2|KRITIS)\b",
|
||||
re.IGNORECASE,
|
||||
)
|
||||
|
||||
# Compound verb patterns (multiple main verbs)
|
||||
_COMPOUND_VERB_RE = re.compile(
|
||||
r"\b(?:und|sowie|als\s+auch|or|and)\b",
|
||||
re.IGNORECASE,
|
||||
)
|
||||
|
||||
# No-split phrases that look compound but aren't
|
||||
_NO_SPLIT_PHRASES = [
|
||||
"pflegen und aufrechterhalten",
|
||||
"dokumentieren und pflegen",
|
||||
"definieren und dokumentieren",
|
||||
"erstellen und freigeben",
|
||||
"pruefen und genehmigen",
|
||||
"identifizieren und bewerten",
|
||||
"erkennen und melden",
|
||||
"define and maintain",
|
||||
"create and maintain",
|
||||
"establish and maintain",
|
||||
"monitor and review",
|
||||
"detect and respond",
|
||||
]
|
||||
|
||||
|
||||
@dataclass
|
||||
class RoutingResult:
|
||||
"""Result of obligation routing classification."""
|
||||
routing_type: str # atomic | compound | framework_container | unknown_review
|
||||
framework_ref: Optional[str] = None
|
||||
framework_domain: Optional[str] = None
|
||||
domain_title: Optional[str] = None
|
||||
confidence: float = 0.0
|
||||
reason: str = ""
|
||||
|
||||
|
||||
def classify_routing(
|
||||
obligation_text: str,
|
||||
action_raw: str,
|
||||
object_raw: str,
|
||||
condition_raw: Optional[str] = None,
|
||||
) -> RoutingResult:
|
||||
"""Classify an obligation into atomic / compound / framework_container."""
|
||||
combined = f"{obligation_text} {object_raw}".lower()
|
||||
|
||||
# --- Step 1: Framework container detection ---
|
||||
fw_result = _detect_framework(obligation_text, object_raw)
|
||||
if fw_result.routing_type == "framework_container":
|
||||
return fw_result
|
||||
|
||||
# --- Step 2: Compound verb detection ---
|
||||
if _is_compound_obligation(action_raw, obligation_text):
|
||||
return RoutingResult(
|
||||
routing_type="compound",
|
||||
confidence=0.7,
|
||||
reason="multiple_main_verbs",
|
||||
)
|
||||
|
||||
# --- Step 3: Default = atomic ---
|
||||
return RoutingResult(
|
||||
routing_type="atomic",
|
||||
confidence=0.9,
|
||||
reason="single_action_single_object",
|
||||
)
|
||||
|
||||
|
||||
def _detect_framework(
|
||||
obligation_text: str, object_raw: str,
|
||||
) -> RoutingResult:
|
||||
"""Detect if obligation references a framework domain."""
|
||||
combined = f"{obligation_text} {object_raw}"
|
||||
registry = get_registry()
|
||||
alias_idx = _build_alias_index(registry)
|
||||
|
||||
# Strategy 1: direct framework name match
|
||||
m = _DIRECT_FRAMEWORK_RE.search(combined)
|
||||
if m:
|
||||
fw_name = m.group(0).strip()
|
||||
fw_id = _resolve_framework_id(fw_name, alias_idx, registry)
|
||||
if fw_id:
|
||||
domain_id, domain_title = _match_domain(
|
||||
combined, registry[fw_id],
|
||||
)
|
||||
return RoutingResult(
|
||||
routing_type="framework_container",
|
||||
framework_ref=fw_id,
|
||||
framework_domain=domain_id,
|
||||
domain_title=domain_title,
|
||||
confidence=0.95 if domain_id else 0.75,
|
||||
reason=f"direct_framework_match:{fw_name}",
|
||||
)
|
||||
else:
|
||||
# Framework name recognized but not in registry
|
||||
return RoutingResult(
|
||||
routing_type="framework_container",
|
||||
framework_ref=None,
|
||||
framework_domain=None,
|
||||
confidence=0.6,
|
||||
reason=f"direct_framework_match_no_registry:{fw_name}",
|
||||
)
|
||||
|
||||
# Strategy 2: pattern match ("Praktiken fuer X")
|
||||
m2 = _FRAMEWORK_PATTERN.search(combined)
|
||||
if m2:
|
||||
ref_text = m2.group(1).strip()
|
||||
fw_id, domain_id, domain_title = _resolve_from_ref_text(
|
||||
ref_text, registry, alias_idx,
|
||||
)
|
||||
if fw_id:
|
||||
return RoutingResult(
|
||||
routing_type="framework_container",
|
||||
framework_ref=fw_id,
|
||||
framework_domain=domain_id,
|
||||
domain_title=domain_title,
|
||||
confidence=0.85 if domain_id else 0.65,
|
||||
reason=f"pattern_match:{ref_text}",
|
||||
)
|
||||
|
||||
# Strategy 3: keyword-heavy object
|
||||
if _has_framework_keywords(object_raw):
|
||||
return RoutingResult(
|
||||
routing_type="framework_container",
|
||||
framework_ref=None,
|
||||
framework_domain=None,
|
||||
confidence=0.5,
|
||||
reason="framework_keywords_in_object",
|
||||
)
|
||||
|
||||
return RoutingResult(routing_type="atomic", confidence=0.0)
|
||||
|
||||
|
||||
def _resolve_framework_id(
|
||||
name: str,
|
||||
alias_idx: dict[str, str],
|
||||
registry: dict[str, dict],
|
||||
) -> Optional[str]:
|
||||
"""Resolve a framework name to its registry ID."""
|
||||
normalized = re.sub(r"\s+", " ", name.strip().lower())
|
||||
# Direct alias match
|
||||
if normalized in alias_idx:
|
||||
return alias_idx[normalized]
|
||||
# Try compact form (strip spaces, hyphens, underscores)
|
||||
compact = re.sub(r"[\s_\-]+", "", normalized)
|
||||
for alias, fw_id in alias_idx.items():
|
||||
if re.sub(r"[\s_\-]+", "", alias) == compact:
|
||||
return fw_id
|
||||
# Substring match in display names
|
||||
for fw_id, fw in registry.items():
|
||||
display = fw.get("display_name", "").lower()
|
||||
if normalized in display or display in normalized:
|
||||
return fw_id
|
||||
# Partial match: check if normalized contains any alias (for multi-word refs)
|
||||
for alias, fw_id in alias_idx.items():
|
||||
if len(alias) >= 4 and alias in normalized:
|
||||
return fw_id
|
||||
return None
|
||||
|
||||
|
||||
def _match_domain(
|
||||
text: str, framework: dict,
|
||||
) -> tuple[Optional[str], Optional[str]]:
|
||||
"""Match a domain within a framework from text references."""
|
||||
text_lower = text.lower()
|
||||
best_id: Optional[str] = None
|
||||
best_title: Optional[str] = None
|
||||
best_score = 0
|
||||
|
||||
for domain in framework.get("domains", []):
|
||||
score = 0
|
||||
domain_id = domain["domain_id"]
|
||||
title = domain.get("title", "")
|
||||
|
||||
# Exact domain ID match (e.g. "AIS")
|
||||
if re.search(rf"\b{re.escape(domain_id)}\b", text, re.IGNORECASE):
|
||||
score += 10
|
||||
|
||||
# Full title match
|
||||
if title.lower() in text_lower:
|
||||
score += 8
|
||||
|
||||
# Alias match
|
||||
for alias in domain.get("aliases", []):
|
||||
if alias.lower() in text_lower:
|
||||
score += 6
|
||||
break
|
||||
|
||||
# Keyword overlap
|
||||
kw_hits = sum(
|
||||
1 for kw in domain.get("keywords", [])
|
||||
if kw.lower() in text_lower
|
||||
)
|
||||
score += kw_hits
|
||||
|
||||
if score > best_score:
|
||||
best_score = score
|
||||
best_id = domain_id
|
||||
best_title = title
|
||||
|
||||
if best_score >= 3:
|
||||
return best_id, best_title
|
||||
return None, None
|
||||
|
||||
|
||||
def _resolve_from_ref_text(
|
||||
ref_text: str,
|
||||
registry: dict[str, dict],
|
||||
alias_idx: dict[str, str],
|
||||
) -> tuple[Optional[str], Optional[str], Optional[str]]:
|
||||
"""Resolve framework + domain from a reference text like 'AIS' or 'Application Security'."""
|
||||
ref_lower = ref_text.lower()
|
||||
|
||||
for fw_id, fw in registry.items():
|
||||
for domain in fw.get("domains", []):
|
||||
# Check domain ID
|
||||
if domain["domain_id"].lower() in ref_lower:
|
||||
return fw_id, domain["domain_id"], domain.get("title")
|
||||
# Check title
|
||||
if domain.get("title", "").lower() in ref_lower:
|
||||
return fw_id, domain["domain_id"], domain.get("title")
|
||||
# Check aliases
|
||||
for alias in domain.get("aliases", []):
|
||||
if alias.lower() in ref_lower or ref_lower in alias.lower():
|
||||
return fw_id, domain["domain_id"], domain.get("title")
|
||||
|
||||
return None, None, None
|
||||
|
||||
|
||||
_FRAMEWORK_KW_SET = {
|
||||
"praktiken", "kontrollen", "massnahmen", "maßnahmen",
|
||||
"anforderungen", "vorgaben", "framework", "standard",
|
||||
"baseline", "katalog", "domain", "family", "category",
|
||||
"practices", "controls", "measures", "requirements",
|
||||
}
|
||||
|
||||
|
||||
def _has_framework_keywords(text: str) -> bool:
|
||||
"""Check if text contains framework-indicator keywords."""
|
||||
words = set(re.findall(r"[a-zäöüß]+", text.lower()))
|
||||
return len(words & _FRAMEWORK_KW_SET) >= 2
|
||||
|
||||
|
||||
def _is_compound_obligation(action_raw: str, obligation_text: str) -> bool:
|
||||
"""Detect if the obligation has multiple competing main verbs."""
|
||||
if not action_raw:
|
||||
return False
|
||||
|
||||
action_lower = action_raw.lower().strip()
|
||||
|
||||
# Check no-split phrases first
|
||||
for phrase in _NO_SPLIT_PHRASES:
|
||||
if phrase in action_lower:
|
||||
return False
|
||||
|
||||
# Must have a conjunction
|
||||
if not _COMPOUND_VERB_RE.search(action_lower):
|
||||
return False
|
||||
|
||||
# Split by conjunctions and check if we get 2+ meaningful verbs
|
||||
parts = re.split(r"\b(?:und|sowie|als\s+auch|or|and)\b", action_lower)
|
||||
meaningful = [p.strip() for p in parts if len(p.strip()) >= 3]
|
||||
return len(meaningful) >= 2
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Framework Decomposition
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
@dataclass
|
||||
class DecomposedObligation:
|
||||
"""A concrete obligation derived from a framework container."""
|
||||
obligation_candidate_id: str
|
||||
parent_control_id: str
|
||||
parent_framework_container_id: str
|
||||
source_ref_law: str
|
||||
source_ref_article: str
|
||||
obligation_text: str
|
||||
actor: str
|
||||
action_raw: str
|
||||
object_raw: str
|
||||
condition_raw: Optional[str] = None
|
||||
trigger_raw: Optional[str] = None
|
||||
routing_type: str = "atomic"
|
||||
release_state: str = "decomposed"
|
||||
subcontrol_id: str = ""
|
||||
# Metadata
|
||||
action_hint: str = ""
|
||||
object_hint: str = ""
|
||||
object_class: str = ""
|
||||
keywords: list[str] = field(default_factory=list)
|
||||
|
||||
|
||||
@dataclass
|
||||
class FrameworkDecompositionResult:
|
||||
"""Result of framework decomposition."""
|
||||
framework_container_id: str
|
||||
source_obligation_candidate_id: str
|
||||
framework_ref: Optional[str]
|
||||
framework_domain: Optional[str]
|
||||
domain_title: Optional[str]
|
||||
matched_subcontrols: list[str]
|
||||
decomposition_confidence: float
|
||||
release_state: str # decomposed | unmatched | error
|
||||
decomposed_obligations: list[DecomposedObligation]
|
||||
issues: list[str]
|
||||
|
||||
|
||||
def decompose_framework_container(
|
||||
obligation_candidate_id: str,
|
||||
parent_control_id: str,
|
||||
obligation_text: str,
|
||||
framework_ref: Optional[str],
|
||||
framework_domain: Optional[str],
|
||||
actor: str = "organization",
|
||||
) -> FrameworkDecompositionResult:
|
||||
"""Decompose a framework-container obligation into concrete sub-obligations.
|
||||
|
||||
Steps:
|
||||
1. Resolve framework from registry
|
||||
2. Resolve domain within framework
|
||||
3. Select relevant subcontrols (keyword filter or full domain)
|
||||
4. Generate decomposed obligations
|
||||
"""
|
||||
container_id = f"FWC-{uuid.uuid4().hex[:8]}"
|
||||
registry = get_registry()
|
||||
issues: list[str] = []
|
||||
|
||||
# Step 1: Resolve framework
|
||||
fw = None
|
||||
if framework_ref and framework_ref in registry:
|
||||
fw = registry[framework_ref]
|
||||
else:
|
||||
# Try to find by name in text
|
||||
fw, framework_ref = _find_framework_in_text(obligation_text, registry)
|
||||
|
||||
if not fw:
|
||||
issues.append("ERROR: framework_not_matched")
|
||||
return FrameworkDecompositionResult(
|
||||
framework_container_id=container_id,
|
||||
source_obligation_candidate_id=obligation_candidate_id,
|
||||
framework_ref=framework_ref,
|
||||
framework_domain=framework_domain,
|
||||
domain_title=None,
|
||||
matched_subcontrols=[],
|
||||
decomposition_confidence=0.0,
|
||||
release_state="unmatched",
|
||||
decomposed_obligations=[],
|
||||
issues=issues,
|
||||
)
|
||||
|
||||
# Step 2: Resolve domain
|
||||
domain_data = None
|
||||
domain_title = None
|
||||
if framework_domain:
|
||||
for d in fw.get("domains", []):
|
||||
if d["domain_id"].lower() == framework_domain.lower():
|
||||
domain_data = d
|
||||
domain_title = d.get("title")
|
||||
break
|
||||
if not domain_data:
|
||||
# Try matching from text
|
||||
domain_id, domain_title = _match_domain(obligation_text, fw)
|
||||
if domain_id:
|
||||
for d in fw.get("domains", []):
|
||||
if d["domain_id"] == domain_id:
|
||||
domain_data = d
|
||||
framework_domain = domain_id
|
||||
break
|
||||
|
||||
if not domain_data:
|
||||
issues.append("WARN: domain_not_matched — using all domains")
|
||||
# Fall back to all subcontrols across all domains
|
||||
all_subcontrols = []
|
||||
for d in fw.get("domains", []):
|
||||
for sc in d.get("subcontrols", []):
|
||||
sc["_domain_id"] = d["domain_id"]
|
||||
all_subcontrols.append(sc)
|
||||
subcontrols = _select_subcontrols(obligation_text, all_subcontrols)
|
||||
if not subcontrols:
|
||||
issues.append("ERROR: no_subcontrols_matched")
|
||||
return FrameworkDecompositionResult(
|
||||
framework_container_id=container_id,
|
||||
source_obligation_candidate_id=obligation_candidate_id,
|
||||
framework_ref=framework_ref,
|
||||
framework_domain=framework_domain,
|
||||
domain_title=None,
|
||||
matched_subcontrols=[],
|
||||
decomposition_confidence=0.0,
|
||||
release_state="unmatched",
|
||||
decomposed_obligations=[],
|
||||
issues=issues,
|
||||
)
|
||||
else:
|
||||
# Step 3: Select subcontrols from domain
|
||||
raw_subcontrols = domain_data.get("subcontrols", [])
|
||||
subcontrols = _select_subcontrols(obligation_text, raw_subcontrols)
|
||||
if not subcontrols:
|
||||
# Full domain decomposition
|
||||
subcontrols = raw_subcontrols
|
||||
|
||||
# Quality check: too many subcontrols
|
||||
if len(subcontrols) > 25:
|
||||
issues.append(f"WARN: {len(subcontrols)} subcontrols — may be too broad")
|
||||
|
||||
# Step 4: Generate decomposed obligations
|
||||
display_name = fw.get("display_name", framework_ref or "Unknown")
|
||||
decomposed: list[DecomposedObligation] = []
|
||||
matched_ids: list[str] = []
|
||||
|
||||
for sc in subcontrols:
|
||||
sc_id = sc.get("subcontrol_id", "")
|
||||
matched_ids.append(sc_id)
|
||||
|
||||
action_hint = sc.get("action_hint", "")
|
||||
object_hint = sc.get("object_hint", "")
|
||||
|
||||
# Quality warnings
|
||||
if not action_hint:
|
||||
issues.append(f"WARN: {sc_id} missing action_hint")
|
||||
if not object_hint:
|
||||
issues.append(f"WARN: {sc_id} missing object_hint")
|
||||
|
||||
obl_id = f"{obligation_candidate_id}-{sc_id}"
|
||||
|
||||
decomposed.append(DecomposedObligation(
|
||||
obligation_candidate_id=obl_id,
|
||||
parent_control_id=parent_control_id,
|
||||
parent_framework_container_id=container_id,
|
||||
source_ref_law=display_name,
|
||||
source_ref_article=sc_id,
|
||||
obligation_text=sc.get("statement", ""),
|
||||
actor=actor,
|
||||
action_raw=action_hint or _infer_action(sc.get("statement", "")),
|
||||
object_raw=object_hint or _infer_object(sc.get("statement", "")),
|
||||
routing_type="atomic",
|
||||
release_state="decomposed",
|
||||
subcontrol_id=sc_id,
|
||||
action_hint=action_hint,
|
||||
object_hint=object_hint,
|
||||
object_class=sc.get("object_class", ""),
|
||||
keywords=sc.get("keywords", []),
|
||||
))
|
||||
|
||||
# Check if decomposed are identical to container
|
||||
for d in decomposed:
|
||||
if d.obligation_text.strip() == obligation_text.strip():
|
||||
issues.append(f"WARN: {d.subcontrol_id} identical to container text")
|
||||
|
||||
confidence = _compute_decomposition_confidence(
|
||||
framework_ref, framework_domain, domain_data, len(subcontrols), issues,
|
||||
)
|
||||
|
||||
return FrameworkDecompositionResult(
|
||||
framework_container_id=container_id,
|
||||
source_obligation_candidate_id=obligation_candidate_id,
|
||||
framework_ref=framework_ref,
|
||||
framework_domain=framework_domain,
|
||||
domain_title=domain_title,
|
||||
matched_subcontrols=matched_ids,
|
||||
decomposition_confidence=confidence,
|
||||
release_state="decomposed",
|
||||
decomposed_obligations=decomposed,
|
||||
issues=issues,
|
||||
)
|
||||
|
||||
|
||||
def _find_framework_in_text(
|
||||
text: str, registry: dict[str, dict],
|
||||
) -> tuple[Optional[dict], Optional[str]]:
|
||||
"""Try to find a framework by searching text for known names."""
|
||||
alias_idx = _build_alias_index(registry)
|
||||
m = _DIRECT_FRAMEWORK_RE.search(text)
|
||||
if m:
|
||||
fw_id = _resolve_framework_id(m.group(0), alias_idx, registry)
|
||||
if fw_id and fw_id in registry:
|
||||
return registry[fw_id], fw_id
|
||||
return None, None
|
||||
|
||||
|
||||
def _select_subcontrols(
|
||||
obligation_text: str, subcontrols: list[dict],
|
||||
) -> list[dict]:
|
||||
"""Select relevant subcontrols based on keyword matching.
|
||||
|
||||
Returns empty list if no targeted match found (caller falls back to
|
||||
full domain).
|
||||
"""
|
||||
text_lower = obligation_text.lower()
|
||||
scored: list[tuple[int, dict]] = []
|
||||
|
||||
for sc in subcontrols:
|
||||
score = 0
|
||||
for kw in sc.get("keywords", []):
|
||||
if kw.lower() in text_lower:
|
||||
score += 1
|
||||
# Title match
|
||||
title = sc.get("title", "").lower()
|
||||
if title and title in text_lower:
|
||||
score += 3
|
||||
# Object hint in text
|
||||
obj = sc.get("object_hint", "").lower()
|
||||
if obj and obj in text_lower:
|
||||
score += 2
|
||||
|
||||
if score > 0:
|
||||
scored.append((score, sc))
|
||||
|
||||
if not scored:
|
||||
return []
|
||||
|
||||
# Only return those with meaningful overlap (score >= 2)
|
||||
scored.sort(key=lambda x: x[0], reverse=True)
|
||||
return [sc for score, sc in scored if score >= 2]
|
||||
|
||||
|
||||
def _infer_action(statement: str) -> str:
|
||||
"""Infer a basic action verb from a statement."""
|
||||
s = statement.lower()
|
||||
if any(w in s for w in ["definiert", "definieren", "define"]):
|
||||
return "definieren"
|
||||
if any(w in s for w in ["implementiert", "implementieren", "implement"]):
|
||||
return "implementieren"
|
||||
if any(w in s for w in ["dokumentiert", "dokumentieren", "document"]):
|
||||
return "dokumentieren"
|
||||
if any(w in s for w in ["ueberwacht", "ueberwachen", "monitor"]):
|
||||
return "ueberwachen"
|
||||
if any(w in s for w in ["getestet", "testen", "test"]):
|
||||
return "testen"
|
||||
if any(w in s for w in ["geschuetzt", "schuetzen", "protect"]):
|
||||
return "implementieren"
|
||||
if any(w in s for w in ["verwaltet", "verwalten", "manage"]):
|
||||
return "pflegen"
|
||||
if any(w in s for w in ["gemeldet", "melden", "report"]):
|
||||
return "melden"
|
||||
return "implementieren"
|
||||
|
||||
|
||||
def _infer_object(statement: str) -> str:
|
||||
"""Infer the primary object from a statement (first noun phrase)."""
|
||||
# Simple heuristic: take the text after "muessen"/"muss" up to the verb
|
||||
m = re.search(
|
||||
r"(?:muessen|muss|m(?:ü|ue)ssen)\s+(.+?)(?:\s+werden|\s+sein|\.|,|$)",
|
||||
statement,
|
||||
re.IGNORECASE,
|
||||
)
|
||||
if m:
|
||||
return m.group(1).strip()[:80]
|
||||
# Fallback: first 80 chars
|
||||
return statement[:80] if statement else ""
|
||||
|
||||
|
||||
def _compute_decomposition_confidence(
|
||||
framework_ref: Optional[str],
|
||||
domain: Optional[str],
|
||||
domain_data: Optional[dict],
|
||||
num_subcontrols: int,
|
||||
issues: list[str],
|
||||
) -> float:
|
||||
"""Compute confidence score for the decomposition."""
|
||||
score = 0.3
|
||||
if framework_ref:
|
||||
score += 0.25
|
||||
if domain:
|
||||
score += 0.20
|
||||
if domain_data:
|
||||
score += 0.10
|
||||
if 1 <= num_subcontrols <= 15:
|
||||
score += 0.10
|
||||
elif num_subcontrols > 15:
|
||||
score += 0.05 # less confident with too many
|
||||
|
||||
# Penalize errors
|
||||
errors = sum(1 for i in issues if i.startswith("ERROR:"))
|
||||
score -= errors * 0.15
|
||||
return round(max(min(score, 1.0), 0.0), 2)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Registry statistics (for admin/debugging)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def registry_stats() -> dict:
|
||||
"""Return summary statistics about the loaded registry."""
|
||||
reg = get_registry()
|
||||
stats = {
|
||||
"frameworks": len(reg),
|
||||
"details": [],
|
||||
}
|
||||
total_domains = 0
|
||||
total_subcontrols = 0
|
||||
for fw_id, fw in reg.items():
|
||||
domains = fw.get("domains", [])
|
||||
n_sc = sum(len(d.get("subcontrols", [])) for d in domains)
|
||||
total_domains += len(domains)
|
||||
total_subcontrols += n_sc
|
||||
stats["details"].append({
|
||||
"framework_id": fw_id,
|
||||
"display_name": fw.get("display_name", ""),
|
||||
"domains": len(domains),
|
||||
"subcontrols": n_sc,
|
||||
})
|
||||
stats["total_domains"] = total_domains
|
||||
stats["total_subcontrols"] = total_subcontrols
|
||||
return stats
|
||||
116
control-pipeline/services/license_gate.py
Normal file
116
control-pipeline/services/license_gate.py
Normal file
@@ -0,0 +1,116 @@
|
||||
"""
|
||||
License Gate — checks whether a given source may be used for a specific purpose.
|
||||
|
||||
Usage types:
|
||||
- analysis: Read + analyse internally (TDM under UrhG 44b)
|
||||
- store_excerpt: Store verbatim excerpt in vault
|
||||
- ship_embeddings: Ship embeddings in product
|
||||
- ship_in_product: Ship text/content in product
|
||||
|
||||
Policy is driven by the canonical_control_sources table columns:
|
||||
allowed_analysis, allowed_store_excerpt, allowed_ship_embeddings, allowed_ship_in_product
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
from sqlalchemy import text
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
USAGE_COLUMN_MAP = {
|
||||
"analysis": "allowed_analysis",
|
||||
"store_excerpt": "allowed_store_excerpt",
|
||||
"ship_embeddings": "allowed_ship_embeddings",
|
||||
"ship_in_product": "allowed_ship_in_product",
|
||||
}
|
||||
|
||||
|
||||
def check_source_allowed(db: Session, source_id: str, usage_type: str) -> bool:
|
||||
"""Check whether *source_id* may be used for *usage_type*.
|
||||
|
||||
Returns False if the source is unknown or the usage is not allowed.
|
||||
"""
|
||||
col = USAGE_COLUMN_MAP.get(usage_type)
|
||||
if col is None:
|
||||
logger.warning("Unknown usage_type=%s", usage_type)
|
||||
return False
|
||||
|
||||
row = db.execute(
|
||||
text(f"SELECT {col} FROM canonical_control_sources WHERE source_id = :sid"),
|
||||
{"sid": source_id},
|
||||
).fetchone()
|
||||
|
||||
if row is None:
|
||||
logger.warning("Source %s not found in registry", source_id)
|
||||
return False
|
||||
|
||||
return bool(row[0])
|
||||
|
||||
|
||||
def get_license_matrix(db: Session) -> list[dict[str, Any]]:
|
||||
"""Return the full license matrix with allowed usages per license."""
|
||||
rows = db.execute(
|
||||
text("""
|
||||
SELECT license_id, name, terms_url, commercial_use,
|
||||
ai_training_restriction, tdm_allowed_under_44b,
|
||||
deletion_required, notes
|
||||
FROM canonical_control_licenses
|
||||
ORDER BY license_id
|
||||
""")
|
||||
).fetchall()
|
||||
|
||||
return [
|
||||
{
|
||||
"license_id": r.license_id,
|
||||
"name": r.name,
|
||||
"terms_url": r.terms_url,
|
||||
"commercial_use": r.commercial_use,
|
||||
"ai_training_restriction": r.ai_training_restriction,
|
||||
"tdm_allowed_under_44b": r.tdm_allowed_under_44b,
|
||||
"deletion_required": r.deletion_required,
|
||||
"notes": r.notes,
|
||||
}
|
||||
for r in rows
|
||||
]
|
||||
|
||||
|
||||
def get_source_permissions(db: Session) -> list[dict[str, Any]]:
|
||||
"""Return all sources with their permission flags."""
|
||||
rows = db.execute(
|
||||
text("""
|
||||
SELECT s.source_id, s.title, s.publisher, s.url, s.version_label,
|
||||
s.language, s.license_id,
|
||||
s.allowed_analysis, s.allowed_store_excerpt,
|
||||
s.allowed_ship_embeddings, s.allowed_ship_in_product,
|
||||
s.vault_retention_days, s.vault_access_tier,
|
||||
l.name AS license_name, l.commercial_use
|
||||
FROM canonical_control_sources s
|
||||
JOIN canonical_control_licenses l ON l.license_id = s.license_id
|
||||
ORDER BY s.source_id
|
||||
""")
|
||||
).fetchall()
|
||||
|
||||
return [
|
||||
{
|
||||
"source_id": r.source_id,
|
||||
"title": r.title,
|
||||
"publisher": r.publisher,
|
||||
"url": r.url,
|
||||
"version_label": r.version_label,
|
||||
"language": r.language,
|
||||
"license_id": r.license_id,
|
||||
"license_name": r.license_name,
|
||||
"commercial_use": r.commercial_use,
|
||||
"allowed_analysis": r.allowed_analysis,
|
||||
"allowed_store_excerpt": r.allowed_store_excerpt,
|
||||
"allowed_ship_embeddings": r.allowed_ship_embeddings,
|
||||
"allowed_ship_in_product": r.allowed_ship_in_product,
|
||||
"vault_retention_days": r.vault_retention_days,
|
||||
"vault_access_tier": r.vault_access_tier,
|
||||
}
|
||||
for r in rows
|
||||
]
|
||||
624
control-pipeline/services/llm_provider.py
Normal file
624
control-pipeline/services/llm_provider.py
Normal file
@@ -0,0 +1,624 @@
|
||||
"""
|
||||
LLM Provider Abstraction for Compliance AI Features.
|
||||
|
||||
Supports:
|
||||
- Anthropic Claude API (default)
|
||||
- Self-Hosted LLMs (Ollama, vLLM, LocalAI, etc.)
|
||||
- HashiCorp Vault integration for secure API key storage
|
||||
|
||||
Configuration via environment variables:
|
||||
- COMPLIANCE_LLM_PROVIDER: "anthropic" or "self_hosted"
|
||||
- ANTHROPIC_API_KEY: API key for Claude (or loaded from Vault)
|
||||
- ANTHROPIC_MODEL: Model name (default: claude-sonnet-4-20250514)
|
||||
- SELF_HOSTED_LLM_URL: Base URL for self-hosted LLM
|
||||
- SELF_HOSTED_LLM_MODEL: Model name for self-hosted
|
||||
- SELF_HOSTED_LLM_KEY: Optional API key for self-hosted
|
||||
|
||||
Vault Configuration:
|
||||
- VAULT_ADDR: Vault server address (e.g., http://vault:8200)
|
||||
- VAULT_TOKEN: Vault authentication token
|
||||
- USE_VAULT_SECRETS: Set to "true" to enable Vault integration
|
||||
- VAULT_SECRET_PATH: Path to secrets (default: secret/breakpilot/api_keys)
|
||||
"""
|
||||
|
||||
import os
|
||||
import asyncio
|
||||
import logging
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import List, Optional, Dict, Any
|
||||
from dataclasses import dataclass
|
||||
from enum import Enum
|
||||
|
||||
import httpx
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Vault Integration
|
||||
# =============================================================================
|
||||
|
||||
class VaultClient:
|
||||
"""
|
||||
HashiCorp Vault client for retrieving secrets.
|
||||
|
||||
Supports KV v2 secrets engine.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
addr: Optional[str] = None,
|
||||
token: Optional[str] = None
|
||||
):
|
||||
self.addr = addr or os.getenv("VAULT_ADDR", "http://localhost:8200")
|
||||
self.token = token or os.getenv("VAULT_TOKEN")
|
||||
self._cache: Dict[str, Any] = {}
|
||||
self._cache_ttl = 300 # 5 minutes cache
|
||||
|
||||
def _get_headers(self) -> Dict[str, str]:
|
||||
"""Get request headers with Vault token."""
|
||||
headers = {"Content-Type": "application/json"}
|
||||
if self.token:
|
||||
headers["X-Vault-Token"] = self.token
|
||||
return headers
|
||||
|
||||
def get_secret(self, path: str, key: str = "value") -> Optional[str]:
|
||||
"""
|
||||
Get a secret from Vault KV v2.
|
||||
|
||||
Args:
|
||||
path: Secret path (e.g., "breakpilot/api_keys/anthropic")
|
||||
key: Key within the secret data (default: "value")
|
||||
|
||||
Returns:
|
||||
Secret value or None if not found
|
||||
"""
|
||||
cache_key = f"{path}:{key}"
|
||||
|
||||
# Check cache first
|
||||
if cache_key in self._cache:
|
||||
return self._cache[cache_key]
|
||||
|
||||
try:
|
||||
# KV v2 uses /data/ in the path
|
||||
full_path = f"{self.addr}/v1/secret/data/{path}"
|
||||
|
||||
response = httpx.get(
|
||||
full_path,
|
||||
headers=self._get_headers(),
|
||||
timeout=10.0
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
data = response.json()
|
||||
secret_data = data.get("data", {}).get("data", {})
|
||||
secret_value = secret_data.get(key)
|
||||
|
||||
if secret_value:
|
||||
self._cache[cache_key] = secret_value
|
||||
logger.info(f"Successfully loaded secret from Vault: {path}")
|
||||
return secret_value
|
||||
|
||||
elif response.status_code == 404:
|
||||
logger.warning(f"Secret not found in Vault: {path}")
|
||||
else:
|
||||
logger.error(f"Vault error {response.status_code}: {response.text}")
|
||||
|
||||
except httpx.RequestError as e:
|
||||
logger.error(f"Failed to connect to Vault at {self.addr}: {e}")
|
||||
except Exception as e:
|
||||
logger.error(f"Error retrieving secret from Vault: {e}")
|
||||
|
||||
return None
|
||||
|
||||
def get_anthropic_key(self) -> Optional[str]:
|
||||
"""Get Anthropic API key from Vault."""
|
||||
path = os.getenv("VAULT_ANTHROPIC_PATH", "breakpilot/api_keys/anthropic")
|
||||
return self.get_secret(path, "value")
|
||||
|
||||
def is_available(self) -> bool:
|
||||
"""Check if Vault is available and authenticated."""
|
||||
try:
|
||||
response = httpx.get(
|
||||
f"{self.addr}/v1/sys/health",
|
||||
headers=self._get_headers(),
|
||||
timeout=5.0
|
||||
)
|
||||
return response.status_code in (200, 429, 472, 473, 501, 503)
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
|
||||
# Singleton Vault client
|
||||
_vault_client: Optional[VaultClient] = None
|
||||
|
||||
|
||||
def get_vault_client() -> VaultClient:
|
||||
"""Get shared Vault client instance."""
|
||||
global _vault_client
|
||||
if _vault_client is None:
|
||||
_vault_client = VaultClient()
|
||||
return _vault_client
|
||||
|
||||
|
||||
def get_secret_from_vault_or_env(
|
||||
vault_path: str,
|
||||
env_var: str,
|
||||
vault_key: str = "value"
|
||||
) -> Optional[str]:
|
||||
"""
|
||||
Get a secret, trying Vault first, then falling back to environment variable.
|
||||
|
||||
Args:
|
||||
vault_path: Path in Vault (e.g., "breakpilot/api_keys/anthropic")
|
||||
env_var: Environment variable name as fallback
|
||||
vault_key: Key within Vault secret data
|
||||
|
||||
Returns:
|
||||
Secret value or None
|
||||
"""
|
||||
use_vault = os.getenv("USE_VAULT_SECRETS", "").lower() in ("true", "1", "yes")
|
||||
|
||||
if use_vault:
|
||||
vault = get_vault_client()
|
||||
secret = vault.get_secret(vault_path, vault_key)
|
||||
if secret:
|
||||
return secret
|
||||
logger.info(f"Vault secret not found, falling back to env: {env_var}")
|
||||
|
||||
return os.getenv(env_var)
|
||||
|
||||
|
||||
class LLMProviderType(str, Enum):
|
||||
"""Supported LLM provider types."""
|
||||
ANTHROPIC = "anthropic"
|
||||
SELF_HOSTED = "self_hosted"
|
||||
OLLAMA = "ollama" # Alias for self_hosted (Ollama-specific)
|
||||
MOCK = "mock" # For testing
|
||||
|
||||
|
||||
@dataclass
|
||||
class LLMResponse:
|
||||
"""Standard response from LLM."""
|
||||
content: str
|
||||
model: str
|
||||
provider: str
|
||||
usage: Optional[Dict[str, int]] = None
|
||||
raw_response: Optional[Dict[str, Any]] = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class LLMConfig:
|
||||
"""Configuration for LLM provider."""
|
||||
provider_type: LLMProviderType
|
||||
api_key: Optional[str] = None
|
||||
model: str = "claude-sonnet-4-20250514"
|
||||
base_url: Optional[str] = None
|
||||
max_tokens: int = 4096
|
||||
temperature: float = 0.3
|
||||
timeout: float = 60.0
|
||||
|
||||
|
||||
class LLMProvider(ABC):
|
||||
"""Abstract base class for LLM providers."""
|
||||
|
||||
def __init__(self, config: LLMConfig):
|
||||
self.config = config
|
||||
|
||||
@abstractmethod
|
||||
async def complete(
|
||||
self,
|
||||
prompt: str,
|
||||
system_prompt: Optional[str] = None,
|
||||
max_tokens: Optional[int] = None,
|
||||
temperature: Optional[float] = None
|
||||
) -> LLMResponse:
|
||||
"""Generate a completion for the given prompt."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def batch_complete(
|
||||
self,
|
||||
prompts: List[str],
|
||||
system_prompt: Optional[str] = None,
|
||||
max_tokens: Optional[int] = None,
|
||||
rate_limit: float = 1.0
|
||||
) -> List[LLMResponse]:
|
||||
"""Generate completions for multiple prompts with rate limiting."""
|
||||
pass
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def provider_name(self) -> str:
|
||||
"""Return the provider name."""
|
||||
pass
|
||||
|
||||
|
||||
class AnthropicProvider(LLMProvider):
|
||||
"""Claude API Provider using Anthropic's official API."""
|
||||
|
||||
ANTHROPIC_API_URL = "https://api.anthropic.com/v1/messages"
|
||||
|
||||
def __init__(self, config: LLMConfig):
|
||||
super().__init__(config)
|
||||
if not config.api_key:
|
||||
raise ValueError("Anthropic API key is required")
|
||||
self.api_key = config.api_key
|
||||
self.model = config.model or "claude-sonnet-4-20250514"
|
||||
|
||||
@property
|
||||
def provider_name(self) -> str:
|
||||
return "anthropic"
|
||||
|
||||
async def complete(
|
||||
self,
|
||||
prompt: str,
|
||||
system_prompt: Optional[str] = None,
|
||||
max_tokens: Optional[int] = None,
|
||||
temperature: Optional[float] = None
|
||||
) -> LLMResponse:
|
||||
"""Generate completion using Claude API."""
|
||||
|
||||
headers = {
|
||||
"x-api-key": self.api_key,
|
||||
"anthropic-version": "2023-06-01",
|
||||
"content-type": "application/json"
|
||||
}
|
||||
|
||||
messages = [{"role": "user", "content": prompt}]
|
||||
|
||||
payload = {
|
||||
"model": self.model,
|
||||
"max_tokens": max_tokens or self.config.max_tokens,
|
||||
"messages": messages
|
||||
}
|
||||
|
||||
if system_prompt:
|
||||
payload["system"] = system_prompt
|
||||
|
||||
if temperature is not None:
|
||||
payload["temperature"] = temperature
|
||||
elif self.config.temperature is not None:
|
||||
payload["temperature"] = self.config.temperature
|
||||
|
||||
async with httpx.AsyncClient(timeout=self.config.timeout) as client:
|
||||
try:
|
||||
response = await client.post(
|
||||
self.ANTHROPIC_API_URL,
|
||||
headers=headers,
|
||||
json=payload
|
||||
)
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
|
||||
content = ""
|
||||
if data.get("content"):
|
||||
content = data["content"][0].get("text", "")
|
||||
|
||||
return LLMResponse(
|
||||
content=content,
|
||||
model=self.model,
|
||||
provider=self.provider_name,
|
||||
usage=data.get("usage"),
|
||||
raw_response=data
|
||||
)
|
||||
|
||||
except httpx.HTTPStatusError as e:
|
||||
logger.error(f"Anthropic API error: {e.response.status_code} - {e.response.text}")
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Anthropic API request failed: {e}")
|
||||
raise
|
||||
|
||||
async def batch_complete(
|
||||
self,
|
||||
prompts: List[str],
|
||||
system_prompt: Optional[str] = None,
|
||||
max_tokens: Optional[int] = None,
|
||||
rate_limit: float = 1.0
|
||||
) -> List[LLMResponse]:
|
||||
"""Process multiple prompts with rate limiting."""
|
||||
results = []
|
||||
|
||||
for i, prompt in enumerate(prompts):
|
||||
if i > 0:
|
||||
await asyncio.sleep(rate_limit)
|
||||
|
||||
try:
|
||||
result = await self.complete(
|
||||
prompt=prompt,
|
||||
system_prompt=system_prompt,
|
||||
max_tokens=max_tokens
|
||||
)
|
||||
results.append(result)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to process prompt {i}: {e}")
|
||||
# Append error response
|
||||
results.append(LLMResponse(
|
||||
content=f"Error: {str(e)}",
|
||||
model=self.model,
|
||||
provider=self.provider_name
|
||||
))
|
||||
|
||||
return results
|
||||
|
||||
|
||||
class SelfHostedProvider(LLMProvider):
|
||||
"""Self-Hosted LLM Provider supporting Ollama, vLLM, LocalAI, etc."""
|
||||
|
||||
def __init__(self, config: LLMConfig):
|
||||
super().__init__(config)
|
||||
if not config.base_url:
|
||||
raise ValueError("Base URL is required for self-hosted provider")
|
||||
self.base_url = config.base_url.rstrip("/")
|
||||
self.model = config.model
|
||||
self.api_key = config.api_key
|
||||
|
||||
@property
|
||||
def provider_name(self) -> str:
|
||||
return "self_hosted"
|
||||
|
||||
def _detect_api_format(self) -> str:
|
||||
"""Detect the API format based on URL patterns."""
|
||||
if "11434" in self.base_url or "ollama" in self.base_url.lower():
|
||||
return "ollama"
|
||||
elif "openai" in self.base_url.lower() or "v1" in self.base_url:
|
||||
return "openai"
|
||||
else:
|
||||
return "ollama" # Default to Ollama format
|
||||
|
||||
async def complete(
|
||||
self,
|
||||
prompt: str,
|
||||
system_prompt: Optional[str] = None,
|
||||
max_tokens: Optional[int] = None,
|
||||
temperature: Optional[float] = None
|
||||
) -> LLMResponse:
|
||||
"""Generate completion using self-hosted LLM."""
|
||||
|
||||
api_format = self._detect_api_format()
|
||||
|
||||
headers = {"content-type": "application/json"}
|
||||
if self.api_key:
|
||||
headers["Authorization"] = f"Bearer {self.api_key}"
|
||||
|
||||
if api_format == "ollama":
|
||||
# Ollama API format
|
||||
endpoint = f"{self.base_url}/api/generate"
|
||||
full_prompt = prompt
|
||||
if system_prompt:
|
||||
full_prompt = f"{system_prompt}\n\n{prompt}"
|
||||
|
||||
payload = {
|
||||
"model": self.model,
|
||||
"prompt": full_prompt,
|
||||
"stream": False,
|
||||
"think": False, # Disable thinking mode (qwen3.5 etc.)
|
||||
"options": {}
|
||||
}
|
||||
|
||||
if max_tokens:
|
||||
payload["options"]["num_predict"] = max_tokens
|
||||
if temperature is not None:
|
||||
payload["options"]["temperature"] = temperature
|
||||
|
||||
else:
|
||||
# OpenAI-compatible format (vLLM, LocalAI, etc.)
|
||||
endpoint = f"{self.base_url}/v1/chat/completions"
|
||||
|
||||
messages = []
|
||||
if system_prompt:
|
||||
messages.append({"role": "system", "content": system_prompt})
|
||||
messages.append({"role": "user", "content": prompt})
|
||||
|
||||
payload = {
|
||||
"model": self.model,
|
||||
"messages": messages,
|
||||
"max_tokens": max_tokens or self.config.max_tokens,
|
||||
"temperature": temperature if temperature is not None else self.config.temperature
|
||||
}
|
||||
|
||||
async with httpx.AsyncClient(timeout=self.config.timeout) as client:
|
||||
try:
|
||||
response = await client.post(endpoint, headers=headers, json=payload)
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
|
||||
# Parse response based on format
|
||||
if api_format == "ollama":
|
||||
content = data.get("response", "")
|
||||
else:
|
||||
# OpenAI format
|
||||
content = data.get("choices", [{}])[0].get("message", {}).get("content", "")
|
||||
|
||||
return LLMResponse(
|
||||
content=content,
|
||||
model=self.model,
|
||||
provider=self.provider_name,
|
||||
usage=data.get("usage"),
|
||||
raw_response=data
|
||||
)
|
||||
|
||||
except httpx.HTTPStatusError as e:
|
||||
logger.error(f"Self-hosted LLM error: {e.response.status_code} - {e.response.text}")
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Self-hosted LLM request failed: {e}")
|
||||
raise
|
||||
|
||||
async def batch_complete(
|
||||
self,
|
||||
prompts: List[str],
|
||||
system_prompt: Optional[str] = None,
|
||||
max_tokens: Optional[int] = None,
|
||||
rate_limit: float = 0.5 # Self-hosted can be faster
|
||||
) -> List[LLMResponse]:
|
||||
"""Process multiple prompts with rate limiting."""
|
||||
results = []
|
||||
|
||||
for i, prompt in enumerate(prompts):
|
||||
if i > 0:
|
||||
await asyncio.sleep(rate_limit)
|
||||
|
||||
try:
|
||||
result = await self.complete(
|
||||
prompt=prompt,
|
||||
system_prompt=system_prompt,
|
||||
max_tokens=max_tokens
|
||||
)
|
||||
results.append(result)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to process prompt {i}: {e}")
|
||||
results.append(LLMResponse(
|
||||
content=f"Error: {str(e)}",
|
||||
model=self.model,
|
||||
provider=self.provider_name
|
||||
))
|
||||
|
||||
return results
|
||||
|
||||
|
||||
class MockProvider(LLMProvider):
|
||||
"""Mock provider for testing without actual API calls."""
|
||||
|
||||
def __init__(self, config: LLMConfig):
|
||||
super().__init__(config)
|
||||
self.responses: List[str] = []
|
||||
self.call_count = 0
|
||||
|
||||
@property
|
||||
def provider_name(self) -> str:
|
||||
return "mock"
|
||||
|
||||
def set_responses(self, responses: List[str]):
|
||||
"""Set predetermined responses for testing."""
|
||||
self.responses = responses
|
||||
self.call_count = 0
|
||||
|
||||
async def complete(
|
||||
self,
|
||||
prompt: str,
|
||||
system_prompt: Optional[str] = None,
|
||||
max_tokens: Optional[int] = None,
|
||||
temperature: Optional[float] = None
|
||||
) -> LLMResponse:
|
||||
"""Return mock response."""
|
||||
if self.responses:
|
||||
content = self.responses[self.call_count % len(self.responses)]
|
||||
else:
|
||||
content = f"Mock response for: {prompt[:50]}..."
|
||||
|
||||
self.call_count += 1
|
||||
|
||||
return LLMResponse(
|
||||
content=content,
|
||||
model="mock-model",
|
||||
provider=self.provider_name,
|
||||
usage={"input_tokens": len(prompt), "output_tokens": len(content)}
|
||||
)
|
||||
|
||||
async def batch_complete(
|
||||
self,
|
||||
prompts: List[str],
|
||||
system_prompt: Optional[str] = None,
|
||||
max_tokens: Optional[int] = None,
|
||||
rate_limit: float = 0.0
|
||||
) -> List[LLMResponse]:
|
||||
"""Return mock responses for batch."""
|
||||
return [await self.complete(p, system_prompt, max_tokens) for p in prompts]
|
||||
|
||||
|
||||
def get_llm_config() -> LLMConfig:
|
||||
"""
|
||||
Create LLM config from environment variables or Vault.
|
||||
|
||||
Priority for API key:
|
||||
1. Vault (if USE_VAULT_SECRETS=true and Vault is available)
|
||||
2. Environment variable (ANTHROPIC_API_KEY)
|
||||
"""
|
||||
provider_type_str = os.getenv("COMPLIANCE_LLM_PROVIDER", "anthropic")
|
||||
|
||||
try:
|
||||
provider_type = LLMProviderType(provider_type_str)
|
||||
except ValueError:
|
||||
logger.warning(f"Unknown LLM provider: {provider_type_str}, falling back to mock")
|
||||
provider_type = LLMProviderType.MOCK
|
||||
|
||||
# Get API key from Vault or environment
|
||||
api_key = None
|
||||
if provider_type == LLMProviderType.ANTHROPIC:
|
||||
api_key = get_secret_from_vault_or_env(
|
||||
vault_path="breakpilot/api_keys/anthropic",
|
||||
env_var="ANTHROPIC_API_KEY"
|
||||
)
|
||||
elif provider_type in (LLMProviderType.SELF_HOSTED, LLMProviderType.OLLAMA):
|
||||
api_key = get_secret_from_vault_or_env(
|
||||
vault_path="breakpilot/api_keys/self_hosted_llm",
|
||||
env_var="SELF_HOSTED_LLM_KEY"
|
||||
)
|
||||
|
||||
# Select model based on provider type
|
||||
if provider_type == LLMProviderType.ANTHROPIC:
|
||||
model = os.getenv("ANTHROPIC_MODEL", "claude-sonnet-4-20250514")
|
||||
elif provider_type in (LLMProviderType.SELF_HOSTED, LLMProviderType.OLLAMA):
|
||||
model = os.getenv("SELF_HOSTED_LLM_MODEL", "qwen2.5:14b")
|
||||
else:
|
||||
model = "mock-model"
|
||||
|
||||
return LLMConfig(
|
||||
provider_type=provider_type,
|
||||
api_key=api_key,
|
||||
model=model,
|
||||
base_url=os.getenv("SELF_HOSTED_LLM_URL"),
|
||||
max_tokens=int(os.getenv("COMPLIANCE_LLM_MAX_TOKENS", "4096")),
|
||||
temperature=float(os.getenv("COMPLIANCE_LLM_TEMPERATURE", "0.3")),
|
||||
timeout=float(os.getenv("COMPLIANCE_LLM_TIMEOUT", "60.0"))
|
||||
)
|
||||
|
||||
|
||||
def get_llm_provider(config: Optional[LLMConfig] = None) -> LLMProvider:
|
||||
"""
|
||||
Factory function to get the appropriate LLM provider based on configuration.
|
||||
|
||||
Usage:
|
||||
provider = get_llm_provider()
|
||||
response = await provider.complete("Analyze this requirement...")
|
||||
"""
|
||||
if config is None:
|
||||
config = get_llm_config()
|
||||
|
||||
if config.provider_type == LLMProviderType.ANTHROPIC:
|
||||
if not config.api_key:
|
||||
logger.warning("No Anthropic API key found, using mock provider")
|
||||
return MockProvider(config)
|
||||
return AnthropicProvider(config)
|
||||
|
||||
elif config.provider_type in (LLMProviderType.SELF_HOSTED, LLMProviderType.OLLAMA):
|
||||
if not config.base_url:
|
||||
logger.warning("No self-hosted LLM URL found, using mock provider")
|
||||
return MockProvider(config)
|
||||
return SelfHostedProvider(config)
|
||||
|
||||
elif config.provider_type == LLMProviderType.MOCK:
|
||||
return MockProvider(config)
|
||||
|
||||
else:
|
||||
raise ValueError(f"Unsupported LLM provider type: {config.provider_type}")
|
||||
|
||||
|
||||
# Singleton instance for reuse
|
||||
_provider_instance: Optional[LLMProvider] = None
|
||||
|
||||
|
||||
def get_shared_provider() -> LLMProvider:
|
||||
"""Get a shared LLM provider instance."""
|
||||
global _provider_instance
|
||||
if _provider_instance is None:
|
||||
_provider_instance = get_llm_provider()
|
||||
return _provider_instance
|
||||
|
||||
|
||||
def reset_shared_provider():
|
||||
"""Reset the shared provider instance (useful for testing)."""
|
||||
global _provider_instance
|
||||
_provider_instance = None
|
||||
59
control-pipeline/services/normative_patterns.py
Normal file
59
control-pipeline/services/normative_patterns.py
Normal file
@@ -0,0 +1,59 @@
|
||||
"""Shared normative language patterns for assertion classification.
|
||||
|
||||
Extracted from decomposition_pass.py for reuse in the assertion engine.
|
||||
"""
|
||||
|
||||
import re
|
||||
|
||||
_PFLICHT_SIGNALS = [
|
||||
r"\bmüssen\b", r"\bmuss\b", r"\bhat\s+sicherzustellen\b",
|
||||
r"\bhaben\s+sicherzustellen\b", r"\bsind\s+verpflichtet\b",
|
||||
r"\bist\s+verpflichtet\b",
|
||||
r"\bist\s+zu\s+\w+en\b", r"\bsind\s+zu\s+\w+en\b",
|
||||
r"\bhat\s+zu\s+\w+en\b", r"\bhaben\s+zu\s+\w+en\b",
|
||||
r"\bist\s+\w+zu\w+en\b", r"\bsind\s+\w+zu\w+en\b",
|
||||
r"\bist\s+\w+\s+zu\s+\w+en\b", r"\bsind\s+\w+\s+zu\s+\w+en\b",
|
||||
r"\bhat\s+\w+\s+zu\s+\w+en\b", r"\bhaben\s+\w+\s+zu\s+\w+en\b",
|
||||
r"\bshall\b", r"\bmust\b", r"\brequired\b",
|
||||
r"\b\w+zuteilen\b", r"\b\w+zuwenden\b", r"\b\w+zustellen\b", r"\b\w+zulegen\b",
|
||||
r"\b\w+zunehmen\b", r"\b\w+zuführen\b", r"\b\w+zuhalten\b", r"\b\w+zusetzen\b",
|
||||
r"\b\w+zuweisen\b", r"\b\w+zuordnen\b", r"\b\w+zufügen\b", r"\b\w+zugeben\b",
|
||||
r"\bist\b.{1,80}\bzu\s+\w+en\b", r"\bsind\b.{1,80}\bzu\s+\w+en\b",
|
||||
]
|
||||
PFLICHT_RE = re.compile("|".join(_PFLICHT_SIGNALS), re.IGNORECASE)
|
||||
|
||||
_EMPFEHLUNG_SIGNALS = [
|
||||
r"\bsoll\b", r"\bsollen\b", r"\bsollte\b", r"\bsollten\b",
|
||||
r"\bgewährleisten\b", r"\bsicherstellen\b",
|
||||
r"\bshould\b", r"\bensure\b", r"\brecommend\w*\b",
|
||||
r"\bnachweisen\b", r"\beinhalten\b", r"\bunterlassen\b", r"\bwahren\b",
|
||||
r"\bdokumentieren\b", r"\bimplementieren\b", r"\büberprüfen\b", r"\büberwachen\b",
|
||||
r"\bprüfen,\s+ob\b", r"\bkontrollieren,\s+ob\b",
|
||||
]
|
||||
EMPFEHLUNG_RE = re.compile("|".join(_EMPFEHLUNG_SIGNALS), re.IGNORECASE)
|
||||
|
||||
_KANN_SIGNALS = [
|
||||
r"\bkann\b", r"\bkönnen\b", r"\bdarf\b", r"\bdürfen\b",
|
||||
r"\bmay\b", r"\boptional\b",
|
||||
]
|
||||
KANN_RE = re.compile("|".join(_KANN_SIGNALS), re.IGNORECASE)
|
||||
|
||||
NORMATIVE_RE = re.compile(
|
||||
"|".join(_PFLICHT_SIGNALS + _EMPFEHLUNG_SIGNALS + _KANN_SIGNALS),
|
||||
re.IGNORECASE,
|
||||
)
|
||||
|
||||
_RATIONALE_SIGNALS = [
|
||||
r"\bda\s+", r"\bweil\b", r"\bgrund\b", r"\berwägung",
|
||||
r"\bbecause\b", r"\breason\b", r"\brationale\b",
|
||||
r"\bkönnen\s+.*\s+verursachen\b", r"\bführt\s+zu\b",
|
||||
]
|
||||
RATIONALE_RE = re.compile("|".join(_RATIONALE_SIGNALS), re.IGNORECASE)
|
||||
|
||||
# Evidence-related keywords (for fact detection)
|
||||
_EVIDENCE_KEYWORDS = [
|
||||
r"\bnachweis\b", r"\bzertifikat\b", r"\baudit.report\b",
|
||||
r"\bprotokoll\b", r"\bdokumentation\b", r"\bbericht\b",
|
||||
r"\bcertificate\b", r"\bevidence\b", r"\bproof\b",
|
||||
]
|
||||
EVIDENCE_RE = re.compile("|".join(_EVIDENCE_KEYWORDS), re.IGNORECASE)
|
||||
563
control-pipeline/services/obligation_extractor.py
Normal file
563
control-pipeline/services/obligation_extractor.py
Normal file
@@ -0,0 +1,563 @@
|
||||
"""Obligation Extractor — 3-Tier Chunk-to-Obligation Linking.
|
||||
|
||||
Maps RAG chunks to obligations from the v2 obligation framework using
|
||||
three tiers (fastest first):
|
||||
|
||||
Tier 1: EXACT MATCH — regulation_code + article → obligation_id (~40%)
|
||||
Tier 2: EMBEDDING — chunk text vs. obligation descriptions (~30%)
|
||||
Tier 3: LLM EXTRACT — local Ollama extracts obligation text (~25%)
|
||||
|
||||
Part of the Multi-Layer Control Architecture (Phase 4 of 8).
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
from dataclasses import dataclass, field
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
import httpx
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
EMBEDDING_URL = os.getenv("EMBEDDING_URL", "http://embedding-service:8087")
|
||||
OLLAMA_URL = os.getenv("OLLAMA_URL", "http://host.docker.internal:11434")
|
||||
OLLAMA_MODEL = os.getenv("CONTROL_GEN_OLLAMA_MODEL", "qwen3.5:35b-a3b")
|
||||
LLM_TIMEOUT = float(os.getenv("CONTROL_GEN_LLM_TIMEOUT", "180"))
|
||||
|
||||
# Embedding similarity thresholds for Tier 2
|
||||
EMBEDDING_MATCH_THRESHOLD = 0.80
|
||||
EMBEDDING_CANDIDATE_THRESHOLD = 0.60
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Regulation code mapping: RAG chunk codes → obligation file regulation IDs
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
_REGULATION_CODE_TO_ID = {
|
||||
# DSGVO
|
||||
"eu_2016_679": "dsgvo",
|
||||
"dsgvo": "dsgvo",
|
||||
"gdpr": "dsgvo",
|
||||
# AI Act
|
||||
"eu_2024_1689": "ai_act",
|
||||
"ai_act": "ai_act",
|
||||
"aiact": "ai_act",
|
||||
# NIS2
|
||||
"eu_2022_2555": "nis2",
|
||||
"nis2": "nis2",
|
||||
"bsig": "nis2",
|
||||
# BDSG
|
||||
"bdsg": "bdsg",
|
||||
# TTDSG
|
||||
"ttdsg": "ttdsg",
|
||||
# DSA
|
||||
"eu_2022_2065": "dsa",
|
||||
"dsa": "dsa",
|
||||
# Data Act
|
||||
"eu_2023_2854": "data_act",
|
||||
"data_act": "data_act",
|
||||
# EU Machinery
|
||||
"eu_2023_1230": "eu_machinery",
|
||||
"eu_machinery": "eu_machinery",
|
||||
# DORA
|
||||
"eu_2022_2554": "dora",
|
||||
"dora": "dora",
|
||||
}
|
||||
|
||||
|
||||
@dataclass
|
||||
class ObligationMatch:
|
||||
"""Result of obligation extraction."""
|
||||
|
||||
obligation_id: Optional[str] = None
|
||||
obligation_title: Optional[str] = None
|
||||
obligation_text: Optional[str] = None
|
||||
method: str = "none" # exact_match | embedding_match | llm_extracted | inferred
|
||||
confidence: float = 0.0
|
||||
regulation_id: Optional[str] = None # e.g. "dsgvo"
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return {
|
||||
"obligation_id": self.obligation_id,
|
||||
"obligation_title": self.obligation_title,
|
||||
"obligation_text": self.obligation_text,
|
||||
"method": self.method,
|
||||
"confidence": self.confidence,
|
||||
"regulation_id": self.regulation_id,
|
||||
}
|
||||
|
||||
|
||||
@dataclass
|
||||
class _ObligationEntry:
|
||||
"""Internal representation of a loaded obligation."""
|
||||
|
||||
id: str
|
||||
title: str
|
||||
description: str
|
||||
regulation_id: str
|
||||
articles: list[str] = field(default_factory=list) # normalized: ["art. 30", "§ 38"]
|
||||
embedding: list[float] = field(default_factory=list)
|
||||
|
||||
|
||||
class ObligationExtractor:
|
||||
"""3-Tier obligation extraction from RAG chunks.
|
||||
|
||||
Usage::
|
||||
|
||||
extractor = ObligationExtractor()
|
||||
await extractor.initialize() # loads obligations + embeddings
|
||||
|
||||
match = await extractor.extract(
|
||||
chunk_text="...",
|
||||
regulation_code="eu_2016_679",
|
||||
article="Art. 30",
|
||||
paragraph="Abs. 1",
|
||||
)
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self._article_lookup: dict[str, list[str]] = {} # "dsgvo/art. 30" → ["DSGVO-OBL-001"]
|
||||
self._obligations: dict[str, _ObligationEntry] = {} # id → entry
|
||||
self._obligation_embeddings: list[list[float]] = []
|
||||
self._obligation_ids: list[str] = []
|
||||
self._initialized = False
|
||||
|
||||
async def initialize(self) -> None:
|
||||
"""Load all obligations from v2 JSON files and compute embeddings."""
|
||||
if self._initialized:
|
||||
return
|
||||
|
||||
self._load_obligations()
|
||||
await self._compute_embeddings()
|
||||
self._initialized = True
|
||||
logger.info(
|
||||
"ObligationExtractor initialized: %d obligations, %d article lookups, %d embeddings",
|
||||
len(self._obligations),
|
||||
len(self._article_lookup),
|
||||
sum(1 for e in self._obligation_embeddings if e),
|
||||
)
|
||||
|
||||
async def extract(
|
||||
self,
|
||||
chunk_text: str,
|
||||
regulation_code: str,
|
||||
article: Optional[str] = None,
|
||||
paragraph: Optional[str] = None,
|
||||
) -> ObligationMatch:
|
||||
"""Extract obligation from a chunk using 3-tier strategy."""
|
||||
if not self._initialized:
|
||||
await self.initialize()
|
||||
|
||||
reg_id = _normalize_regulation(regulation_code)
|
||||
|
||||
# Tier 1: Exact match via article lookup
|
||||
if article:
|
||||
match = self._tier1_exact(reg_id, article)
|
||||
if match:
|
||||
return match
|
||||
|
||||
# Tier 2: Embedding similarity
|
||||
match = await self._tier2_embedding(chunk_text, reg_id)
|
||||
if match:
|
||||
return match
|
||||
|
||||
# Tier 3: LLM extraction
|
||||
match = await self._tier3_llm(chunk_text, regulation_code, article)
|
||||
return match
|
||||
|
||||
# -----------------------------------------------------------------------
|
||||
# Tier 1: Exact Match
|
||||
# -----------------------------------------------------------------------
|
||||
|
||||
def _tier1_exact(self, reg_id: Optional[str], article: str) -> Optional[ObligationMatch]:
|
||||
"""Look up obligation by regulation + article."""
|
||||
if not reg_id:
|
||||
return None
|
||||
|
||||
norm_article = _normalize_article(article)
|
||||
key = f"{reg_id}/{norm_article}"
|
||||
|
||||
obl_ids = self._article_lookup.get(key)
|
||||
if not obl_ids:
|
||||
return None
|
||||
|
||||
# Take the first match (highest priority)
|
||||
obl_id = obl_ids[0]
|
||||
entry = self._obligations.get(obl_id)
|
||||
if not entry:
|
||||
return None
|
||||
|
||||
return ObligationMatch(
|
||||
obligation_id=entry.id,
|
||||
obligation_title=entry.title,
|
||||
obligation_text=entry.description,
|
||||
method="exact_match",
|
||||
confidence=1.0,
|
||||
regulation_id=reg_id,
|
||||
)
|
||||
|
||||
# -----------------------------------------------------------------------
|
||||
# Tier 2: Embedding Match
|
||||
# -----------------------------------------------------------------------
|
||||
|
||||
async def _tier2_embedding(
|
||||
self, chunk_text: str, reg_id: Optional[str]
|
||||
) -> Optional[ObligationMatch]:
|
||||
"""Find nearest obligation by embedding similarity."""
|
||||
if not self._obligation_embeddings:
|
||||
return None
|
||||
|
||||
chunk_embedding = await _get_embedding(chunk_text[:2000])
|
||||
if not chunk_embedding:
|
||||
return None
|
||||
|
||||
best_idx = -1
|
||||
best_score = 0.0
|
||||
|
||||
for i, obl_emb in enumerate(self._obligation_embeddings):
|
||||
if not obl_emb:
|
||||
continue
|
||||
# Prefer same-regulation matches
|
||||
obl_id = self._obligation_ids[i]
|
||||
entry = self._obligations.get(obl_id)
|
||||
score = _cosine_sim(chunk_embedding, obl_emb)
|
||||
|
||||
# Domain bonus: +0.05 if same regulation
|
||||
if entry and reg_id and entry.regulation_id == reg_id:
|
||||
score += 0.05
|
||||
|
||||
if score > best_score:
|
||||
best_score = score
|
||||
best_idx = i
|
||||
|
||||
if best_idx < 0:
|
||||
return None
|
||||
|
||||
# Remove domain bonus for threshold comparison
|
||||
raw_score = best_score
|
||||
obl_id = self._obligation_ids[best_idx]
|
||||
entry = self._obligations.get(obl_id)
|
||||
if entry and reg_id and entry.regulation_id == reg_id:
|
||||
raw_score -= 0.05
|
||||
|
||||
if raw_score >= EMBEDDING_MATCH_THRESHOLD:
|
||||
return ObligationMatch(
|
||||
obligation_id=entry.id if entry else obl_id,
|
||||
obligation_title=entry.title if entry else None,
|
||||
obligation_text=entry.description if entry else None,
|
||||
method="embedding_match",
|
||||
confidence=round(min(raw_score, 1.0), 3),
|
||||
regulation_id=entry.regulation_id if entry else reg_id,
|
||||
)
|
||||
|
||||
return None
|
||||
|
||||
# -----------------------------------------------------------------------
|
||||
# Tier 3: LLM Extraction
|
||||
# -----------------------------------------------------------------------
|
||||
|
||||
async def _tier3_llm(
|
||||
self, chunk_text: str, regulation_code: str, article: Optional[str]
|
||||
) -> ObligationMatch:
|
||||
"""Use local LLM to extract the obligation from the chunk."""
|
||||
prompt = f"""Analysiere den folgenden Gesetzestext und extrahiere die zentrale rechtliche Pflicht.
|
||||
|
||||
Text:
|
||||
{chunk_text[:3000]}
|
||||
|
||||
Quelle: {regulation_code} {article or ''}
|
||||
|
||||
Antworte NUR als JSON:
|
||||
{{
|
||||
"obligation_text": "Die zentrale Pflicht in einem Satz",
|
||||
"actor": "Wer muss handeln (z.B. Verantwortlicher, Auftragsverarbeiter)",
|
||||
"action": "Was muss getan werden",
|
||||
"normative_strength": "muss|soll|kann"
|
||||
}}"""
|
||||
|
||||
system_prompt = (
|
||||
"Du bist ein Rechtsexperte fuer EU-Datenschutz- und Digitalrecht. "
|
||||
"Extrahiere die zentrale rechtliche Pflicht aus Gesetzestexten. "
|
||||
"Antworte ausschliesslich als JSON."
|
||||
)
|
||||
|
||||
result_text = await _llm_ollama(prompt, system_prompt)
|
||||
if not result_text:
|
||||
return ObligationMatch(
|
||||
method="llm_extracted",
|
||||
confidence=0.0,
|
||||
regulation_id=_normalize_regulation(regulation_code),
|
||||
)
|
||||
|
||||
parsed = _parse_json(result_text)
|
||||
obligation_text = parsed.get("obligation_text", result_text[:500])
|
||||
|
||||
return ObligationMatch(
|
||||
obligation_id=None,
|
||||
obligation_title=None,
|
||||
obligation_text=obligation_text,
|
||||
method="llm_extracted",
|
||||
confidence=0.60,
|
||||
regulation_id=_normalize_regulation(regulation_code),
|
||||
)
|
||||
|
||||
# -----------------------------------------------------------------------
|
||||
# Initialization helpers
|
||||
# -----------------------------------------------------------------------
|
||||
|
||||
def _load_obligations(self) -> None:
|
||||
"""Load all obligation files from v2 framework."""
|
||||
v2_dir = _find_obligations_dir()
|
||||
if not v2_dir:
|
||||
logger.warning("Obligations v2 directory not found — Tier 1 disabled")
|
||||
return
|
||||
|
||||
manifest_path = v2_dir / "_manifest.json"
|
||||
if not manifest_path.exists():
|
||||
logger.warning("Manifest not found at %s", manifest_path)
|
||||
return
|
||||
|
||||
with open(manifest_path) as f:
|
||||
manifest = json.load(f)
|
||||
|
||||
for reg_info in manifest.get("regulations", []):
|
||||
reg_id = reg_info["id"]
|
||||
reg_file = v2_dir / reg_info["file"]
|
||||
if not reg_file.exists():
|
||||
logger.warning("Regulation file not found: %s", reg_file)
|
||||
continue
|
||||
|
||||
with open(reg_file) as f:
|
||||
data = json.load(f)
|
||||
|
||||
for obl in data.get("obligations", []):
|
||||
obl_id = obl["id"]
|
||||
entry = _ObligationEntry(
|
||||
id=obl_id,
|
||||
title=obl.get("title", ""),
|
||||
description=obl.get("description", ""),
|
||||
regulation_id=reg_id,
|
||||
)
|
||||
|
||||
# Build article lookup from legal_basis
|
||||
for basis in obl.get("legal_basis", []):
|
||||
article_raw = basis.get("article", "")
|
||||
if article_raw:
|
||||
norm_art = _normalize_article(article_raw)
|
||||
key = f"{reg_id}/{norm_art}"
|
||||
if key not in self._article_lookup:
|
||||
self._article_lookup[key] = []
|
||||
self._article_lookup[key].append(obl_id)
|
||||
entry.articles.append(norm_art)
|
||||
|
||||
self._obligations[obl_id] = entry
|
||||
|
||||
logger.info(
|
||||
"Loaded %d obligations from %d regulations",
|
||||
len(self._obligations),
|
||||
len(manifest.get("regulations", [])),
|
||||
)
|
||||
|
||||
async def _compute_embeddings(self) -> None:
|
||||
"""Compute embeddings for all obligation descriptions."""
|
||||
if not self._obligations:
|
||||
return
|
||||
|
||||
self._obligation_ids = list(self._obligations.keys())
|
||||
texts = [
|
||||
f"{self._obligations[oid].title}: {self._obligations[oid].description}"
|
||||
for oid in self._obligation_ids
|
||||
]
|
||||
|
||||
logger.info("Computing embeddings for %d obligations...", len(texts))
|
||||
self._obligation_embeddings = await _get_embeddings_batch(texts)
|
||||
valid = sum(1 for e in self._obligation_embeddings if e)
|
||||
logger.info("Got %d/%d valid embeddings", valid, len(texts))
|
||||
|
||||
# -----------------------------------------------------------------------
|
||||
# Stats
|
||||
# -----------------------------------------------------------------------
|
||||
|
||||
def stats(self) -> dict:
|
||||
"""Return initialization statistics."""
|
||||
return {
|
||||
"total_obligations": len(self._obligations),
|
||||
"article_lookups": len(self._article_lookup),
|
||||
"embeddings_valid": sum(1 for e in self._obligation_embeddings if e),
|
||||
"regulations": list(
|
||||
{e.regulation_id for e in self._obligations.values()}
|
||||
),
|
||||
"initialized": self._initialized,
|
||||
}
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Module-level helpers (reusable by other modules)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _normalize_regulation(regulation_code: str) -> Optional[str]:
|
||||
"""Map a RAG regulation_code to obligation framework regulation ID."""
|
||||
if not regulation_code:
|
||||
return None
|
||||
code = regulation_code.lower().strip()
|
||||
|
||||
# Direct lookup
|
||||
if code in _REGULATION_CODE_TO_ID:
|
||||
return _REGULATION_CODE_TO_ID[code]
|
||||
|
||||
# Prefix matching for families
|
||||
for prefix, reg_id in [
|
||||
("eu_2016_679", "dsgvo"),
|
||||
("eu_2024_1689", "ai_act"),
|
||||
("eu_2022_2555", "nis2"),
|
||||
("eu_2022_2065", "dsa"),
|
||||
("eu_2023_2854", "data_act"),
|
||||
("eu_2023_1230", "eu_machinery"),
|
||||
("eu_2022_2554", "dora"),
|
||||
]:
|
||||
if code.startswith(prefix):
|
||||
return reg_id
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def _normalize_article(article: str) -> str:
|
||||
"""Normalize article references for consistent lookup.
|
||||
|
||||
Examples:
|
||||
"Art. 30" → "art. 30"
|
||||
"§ 38 BDSG" → "§ 38"
|
||||
"Article 10" → "art. 10"
|
||||
"Art. 30 Abs. 1" → "art. 30"
|
||||
"Artikel 35" → "art. 35"
|
||||
"""
|
||||
if not article:
|
||||
return ""
|
||||
s = article.strip()
|
||||
|
||||
# Remove trailing law name: "§ 38 BDSG" → "§ 38"
|
||||
s = re.sub(r"\s+(DSGVO|BDSG|TTDSG|DSA|NIS2|DORA|AI.?Act)\s*$", "", s, flags=re.IGNORECASE)
|
||||
|
||||
# Remove paragraph references: "Art. 30 Abs. 1" → "Art. 30"
|
||||
s = re.sub(r"\s+(Abs|Absatz|para|paragraph|lit|Satz)\.?\s+.*$", "", s, flags=re.IGNORECASE)
|
||||
|
||||
# Normalize "Article" / "Artikel" → "Art."
|
||||
s = re.sub(r"^(Article|Artikel)\s+", "Art. ", s, flags=re.IGNORECASE)
|
||||
|
||||
return s.lower().strip()
|
||||
|
||||
|
||||
def _cosine_sim(a: list[float], b: list[float]) -> float:
|
||||
"""Compute cosine similarity between two vectors."""
|
||||
if not a or not b or len(a) != len(b):
|
||||
return 0.0
|
||||
dot = sum(x * y for x, y in zip(a, b))
|
||||
norm_a = sum(x * x for x in a) ** 0.5
|
||||
norm_b = sum(x * x for x in b) ** 0.5
|
||||
if norm_a == 0 or norm_b == 0:
|
||||
return 0.0
|
||||
return dot / (norm_a * norm_b)
|
||||
|
||||
|
||||
def _find_obligations_dir() -> Optional[Path]:
|
||||
"""Locate the obligations v2 directory."""
|
||||
candidates = [
|
||||
Path(__file__).resolve().parent.parent.parent.parent
|
||||
/ "ai-compliance-sdk" / "policies" / "obligations" / "v2",
|
||||
Path("/app/ai-compliance-sdk/policies/obligations/v2"),
|
||||
Path("ai-compliance-sdk/policies/obligations/v2"),
|
||||
]
|
||||
for p in candidates:
|
||||
if p.is_dir() and (p / "_manifest.json").exists():
|
||||
return p
|
||||
return None
|
||||
|
||||
|
||||
async def _get_embedding(text: str) -> list[float]:
|
||||
"""Get embedding vector for a single text."""
|
||||
try:
|
||||
async with httpx.AsyncClient(timeout=10.0) as client:
|
||||
resp = await client.post(
|
||||
f"{EMBEDDING_URL}/embed",
|
||||
json={"texts": [text]},
|
||||
)
|
||||
resp.raise_for_status()
|
||||
embeddings = resp.json().get("embeddings", [])
|
||||
return embeddings[0] if embeddings else []
|
||||
except Exception:
|
||||
return []
|
||||
|
||||
|
||||
async def _get_embeddings_batch(
|
||||
texts: list[str], batch_size: int = 32
|
||||
) -> list[list[float]]:
|
||||
"""Get embeddings for multiple texts in batches."""
|
||||
all_embeddings: list[list[float]] = []
|
||||
for i in range(0, len(texts), batch_size):
|
||||
batch = texts[i : i + batch_size]
|
||||
try:
|
||||
async with httpx.AsyncClient(timeout=30.0) as client:
|
||||
resp = await client.post(
|
||||
f"{EMBEDDING_URL}/embed",
|
||||
json={"texts": batch},
|
||||
)
|
||||
resp.raise_for_status()
|
||||
embeddings = resp.json().get("embeddings", [])
|
||||
all_embeddings.extend(embeddings)
|
||||
except Exception as e:
|
||||
logger.warning("Batch embedding failed for %d texts: %s", len(batch), e)
|
||||
all_embeddings.extend([[] for _ in batch])
|
||||
return all_embeddings
|
||||
|
||||
|
||||
async def _llm_ollama(prompt: str, system_prompt: Optional[str] = None) -> str:
|
||||
"""Call local Ollama for LLM extraction."""
|
||||
messages = []
|
||||
if system_prompt:
|
||||
messages.append({"role": "system", "content": system_prompt})
|
||||
messages.append({"role": "user", "content": prompt})
|
||||
|
||||
payload = {
|
||||
"model": OLLAMA_MODEL,
|
||||
"messages": messages,
|
||||
"stream": False,
|
||||
"format": "json",
|
||||
"options": {"num_predict": 512},
|
||||
"think": False,
|
||||
}
|
||||
|
||||
try:
|
||||
async with httpx.AsyncClient(timeout=LLM_TIMEOUT) as client:
|
||||
resp = await client.post(f"{OLLAMA_URL}/api/chat", json=payload)
|
||||
if resp.status_code != 200:
|
||||
logger.error(
|
||||
"Ollama chat failed %d: %s", resp.status_code, resp.text[:300]
|
||||
)
|
||||
return ""
|
||||
data = resp.json()
|
||||
return data.get("message", {}).get("content", "")
|
||||
except Exception as e:
|
||||
logger.warning("Ollama call failed: %s", e)
|
||||
return ""
|
||||
|
||||
|
||||
def _parse_json(text: str) -> dict:
|
||||
"""Extract JSON from LLM response text."""
|
||||
# Try direct parse
|
||||
try:
|
||||
return json.loads(text)
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
|
||||
# Try extracting JSON block
|
||||
match = re.search(r"\{[^{}]*\}", text, re.DOTALL)
|
||||
if match:
|
||||
try:
|
||||
return json.loads(match.group())
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
|
||||
return {}
|
||||
532
control-pipeline/services/pattern_matcher.py
Normal file
532
control-pipeline/services/pattern_matcher.py
Normal file
@@ -0,0 +1,532 @@
|
||||
"""Pattern Matcher — Obligation-to-Control-Pattern Linking.
|
||||
|
||||
Maps obligations (from the ObligationExtractor) to control patterns
|
||||
using two tiers:
|
||||
|
||||
Tier 1: KEYWORD MATCH — obligation_match_keywords from patterns (~70%)
|
||||
Tier 2: EMBEDDING — cosine similarity with domain bonus (~25%)
|
||||
|
||||
Part of the Multi-Layer Control Architecture (Phase 5 of 8).
|
||||
"""
|
||||
|
||||
import logging
|
||||
import os
|
||||
from dataclasses import dataclass, field
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
import yaml
|
||||
|
||||
from services.obligation_extractor import (
|
||||
_cosine_sim,
|
||||
_get_embedding,
|
||||
_get_embeddings_batch,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Minimum keyword score to accept a match (at least 2 keyword hits)
|
||||
KEYWORD_MATCH_MIN_HITS = 2
|
||||
# Embedding threshold for Tier 2
|
||||
EMBEDDING_PATTERN_THRESHOLD = 0.75
|
||||
# Domain bonus when regulation maps to the pattern's domain
|
||||
DOMAIN_BONUS = 0.10
|
||||
|
||||
# Map regulation IDs to pattern domains that are likely relevant
|
||||
_REGULATION_DOMAIN_AFFINITY = {
|
||||
"dsgvo": ["DATA", "COMP", "GOV"],
|
||||
"bdsg": ["DATA", "COMP"],
|
||||
"ttdsg": ["DATA"],
|
||||
"ai_act": ["AI", "COMP", "DATA"],
|
||||
"nis2": ["SEC", "INC", "NET", "LOG", "CRYP"],
|
||||
"dsa": ["DATA", "COMP"],
|
||||
"data_act": ["DATA", "COMP"],
|
||||
"eu_machinery": ["SEC", "COMP"],
|
||||
"dora": ["SEC", "INC", "FIN", "COMP"],
|
||||
}
|
||||
|
||||
|
||||
@dataclass
|
||||
class ControlPattern:
|
||||
"""Python representation of a control pattern from YAML."""
|
||||
|
||||
id: str
|
||||
name: str
|
||||
name_de: str
|
||||
domain: str
|
||||
category: str
|
||||
description: str
|
||||
objective_template: str
|
||||
rationale_template: str
|
||||
requirements_template: list[str] = field(default_factory=list)
|
||||
test_procedure_template: list[str] = field(default_factory=list)
|
||||
evidence_template: list[str] = field(default_factory=list)
|
||||
severity_default: str = "medium"
|
||||
implementation_effort_default: str = "m"
|
||||
obligation_match_keywords: list[str] = field(default_factory=list)
|
||||
tags: list[str] = field(default_factory=list)
|
||||
composable_with: list[str] = field(default_factory=list)
|
||||
open_anchor_refs: list[dict] = field(default_factory=list)
|
||||
|
||||
|
||||
@dataclass
|
||||
class PatternMatchResult:
|
||||
"""Result of pattern matching."""
|
||||
|
||||
pattern: Optional[ControlPattern] = None
|
||||
pattern_id: Optional[str] = None
|
||||
method: str = "none" # keyword | embedding | combined | none
|
||||
confidence: float = 0.0
|
||||
keyword_hits: int = 0
|
||||
total_keywords: int = 0
|
||||
embedding_score: float = 0.0
|
||||
domain_bonus_applied: bool = False
|
||||
composable_patterns: list[str] = field(default_factory=list)
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return {
|
||||
"pattern_id": self.pattern_id,
|
||||
"method": self.method,
|
||||
"confidence": round(self.confidence, 3),
|
||||
"keyword_hits": self.keyword_hits,
|
||||
"total_keywords": self.total_keywords,
|
||||
"embedding_score": round(self.embedding_score, 3),
|
||||
"domain_bonus_applied": self.domain_bonus_applied,
|
||||
"composable_patterns": self.composable_patterns,
|
||||
}
|
||||
|
||||
|
||||
class PatternMatcher:
|
||||
"""Links obligations to control patterns using keyword + embedding matching.
|
||||
|
||||
Usage::
|
||||
|
||||
matcher = PatternMatcher()
|
||||
await matcher.initialize()
|
||||
|
||||
result = await matcher.match(
|
||||
obligation_text="Fuehrung eines Verarbeitungsverzeichnisses...",
|
||||
regulation_id="dsgvo",
|
||||
)
|
||||
print(result.pattern_id) # e.g. "CP-COMP-001"
|
||||
print(result.confidence) # e.g. 0.85
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self._patterns: list[ControlPattern] = []
|
||||
self._by_id: dict[str, ControlPattern] = {}
|
||||
self._by_domain: dict[str, list[ControlPattern]] = {}
|
||||
self._keyword_index: dict[str, list[str]] = {} # keyword → [pattern_ids]
|
||||
self._pattern_embeddings: list[list[float]] = []
|
||||
self._pattern_ids: list[str] = []
|
||||
self._initialized = False
|
||||
|
||||
async def initialize(self) -> None:
|
||||
"""Load patterns from YAML and compute embeddings."""
|
||||
if self._initialized:
|
||||
return
|
||||
|
||||
self._load_patterns()
|
||||
self._build_keyword_index()
|
||||
await self._compute_embeddings()
|
||||
self._initialized = True
|
||||
logger.info(
|
||||
"PatternMatcher initialized: %d patterns, %d keywords, %d embeddings",
|
||||
len(self._patterns),
|
||||
len(self._keyword_index),
|
||||
sum(1 for e in self._pattern_embeddings if e),
|
||||
)
|
||||
|
||||
async def match(
|
||||
self,
|
||||
obligation_text: str,
|
||||
regulation_id: Optional[str] = None,
|
||||
top_n: int = 1,
|
||||
) -> PatternMatchResult:
|
||||
"""Match obligation text to the best control pattern.
|
||||
|
||||
Args:
|
||||
obligation_text: The obligation description to match against.
|
||||
regulation_id: Source regulation (for domain bonus).
|
||||
top_n: Number of top results to consider for composability.
|
||||
|
||||
Returns:
|
||||
PatternMatchResult with the best match.
|
||||
"""
|
||||
if not self._initialized:
|
||||
await self.initialize()
|
||||
|
||||
if not obligation_text or not self._patterns:
|
||||
return PatternMatchResult()
|
||||
|
||||
# Tier 1: Keyword matching
|
||||
keyword_result = self._tier1_keyword(obligation_text, regulation_id)
|
||||
|
||||
# Tier 2: Embedding matching
|
||||
embedding_result = await self._tier2_embedding(obligation_text, regulation_id)
|
||||
|
||||
# Combine scores: prefer keyword match, boost with embedding if available
|
||||
best = self._combine_results(keyword_result, embedding_result)
|
||||
|
||||
# Attach composable patterns
|
||||
if best.pattern:
|
||||
best.composable_patterns = [
|
||||
pid for pid in best.pattern.composable_with
|
||||
if pid in self._by_id
|
||||
]
|
||||
|
||||
return best
|
||||
|
||||
async def match_top_n(
|
||||
self,
|
||||
obligation_text: str,
|
||||
regulation_id: Optional[str] = None,
|
||||
n: int = 3,
|
||||
) -> list[PatternMatchResult]:
|
||||
"""Return top-N pattern matches sorted by confidence descending."""
|
||||
if not self._initialized:
|
||||
await self.initialize()
|
||||
|
||||
if not obligation_text or not self._patterns:
|
||||
return []
|
||||
|
||||
keyword_scores = self._keyword_scores(obligation_text, regulation_id)
|
||||
embedding_scores = await self._embedding_scores(obligation_text, regulation_id)
|
||||
|
||||
# Merge scores
|
||||
all_pattern_ids = set(keyword_scores.keys()) | set(embedding_scores.keys())
|
||||
results: list[PatternMatchResult] = []
|
||||
|
||||
for pid in all_pattern_ids:
|
||||
pattern = self._by_id.get(pid)
|
||||
if not pattern:
|
||||
continue
|
||||
|
||||
kw_score = keyword_scores.get(pid, (0, 0, 0.0)) # (hits, total, score)
|
||||
emb_score = embedding_scores.get(pid, (0.0, False)) # (score, bonus_applied)
|
||||
|
||||
kw_hits, kw_total, kw_confidence = kw_score
|
||||
emb_confidence, bonus_applied = emb_score
|
||||
|
||||
# Combined confidence: max of keyword and embedding, with boost if both
|
||||
if kw_confidence > 0 and emb_confidence > 0:
|
||||
combined = max(kw_confidence, emb_confidence) + 0.05
|
||||
method = "combined"
|
||||
elif kw_confidence > 0:
|
||||
combined = kw_confidence
|
||||
method = "keyword"
|
||||
else:
|
||||
combined = emb_confidence
|
||||
method = "embedding"
|
||||
|
||||
results.append(PatternMatchResult(
|
||||
pattern=pattern,
|
||||
pattern_id=pid,
|
||||
method=method,
|
||||
confidence=min(combined, 1.0),
|
||||
keyword_hits=kw_hits,
|
||||
total_keywords=kw_total,
|
||||
embedding_score=emb_confidence,
|
||||
domain_bonus_applied=bonus_applied,
|
||||
composable_patterns=[
|
||||
p for p in pattern.composable_with if p in self._by_id
|
||||
],
|
||||
))
|
||||
|
||||
# Sort by confidence descending
|
||||
results.sort(key=lambda r: r.confidence, reverse=True)
|
||||
return results[:n]
|
||||
|
||||
# -----------------------------------------------------------------------
|
||||
# Tier 1: Keyword Match
|
||||
# -----------------------------------------------------------------------
|
||||
|
||||
def _tier1_keyword(
|
||||
self, obligation_text: str, regulation_id: Optional[str]
|
||||
) -> Optional[PatternMatchResult]:
|
||||
"""Match by counting keyword hits in the obligation text."""
|
||||
scores = self._keyword_scores(obligation_text, regulation_id)
|
||||
if not scores:
|
||||
return None
|
||||
|
||||
# Find best match
|
||||
best_pid = max(scores, key=lambda pid: scores[pid][2])
|
||||
hits, total, confidence = scores[best_pid]
|
||||
|
||||
if hits < KEYWORD_MATCH_MIN_HITS:
|
||||
return None
|
||||
|
||||
pattern = self._by_id.get(best_pid)
|
||||
if not pattern:
|
||||
return None
|
||||
|
||||
# Check domain bonus
|
||||
bonus_applied = False
|
||||
if regulation_id and self._domain_matches(pattern.domain, regulation_id):
|
||||
confidence = min(confidence + DOMAIN_BONUS, 1.0)
|
||||
bonus_applied = True
|
||||
|
||||
return PatternMatchResult(
|
||||
pattern=pattern,
|
||||
pattern_id=best_pid,
|
||||
method="keyword",
|
||||
confidence=confidence,
|
||||
keyword_hits=hits,
|
||||
total_keywords=total,
|
||||
domain_bonus_applied=bonus_applied,
|
||||
)
|
||||
|
||||
def _keyword_scores(
|
||||
self, text: str, regulation_id: Optional[str]
|
||||
) -> dict[str, tuple[int, int, float]]:
|
||||
"""Compute keyword match scores for all patterns.
|
||||
|
||||
Returns dict: pattern_id → (hits, total_keywords, confidence).
|
||||
"""
|
||||
text_lower = text.lower()
|
||||
hits_by_pattern: dict[str, int] = {}
|
||||
|
||||
for keyword, pattern_ids in self._keyword_index.items():
|
||||
if keyword in text_lower:
|
||||
for pid in pattern_ids:
|
||||
hits_by_pattern[pid] = hits_by_pattern.get(pid, 0) + 1
|
||||
|
||||
result: dict[str, tuple[int, int, float]] = {}
|
||||
for pid, hits in hits_by_pattern.items():
|
||||
pattern = self._by_id.get(pid)
|
||||
if not pattern:
|
||||
continue
|
||||
total = len(pattern.obligation_match_keywords)
|
||||
confidence = hits / total if total > 0 else 0.0
|
||||
result[pid] = (hits, total, confidence)
|
||||
|
||||
return result
|
||||
|
||||
# -----------------------------------------------------------------------
|
||||
# Tier 2: Embedding Match
|
||||
# -----------------------------------------------------------------------
|
||||
|
||||
async def _tier2_embedding(
|
||||
self, obligation_text: str, regulation_id: Optional[str]
|
||||
) -> Optional[PatternMatchResult]:
|
||||
"""Match by embedding similarity against pattern objective_templates."""
|
||||
scores = await self._embedding_scores(obligation_text, regulation_id)
|
||||
if not scores:
|
||||
return None
|
||||
|
||||
best_pid = max(scores, key=lambda pid: scores[pid][0])
|
||||
emb_score, bonus_applied = scores[best_pid]
|
||||
|
||||
if emb_score < EMBEDDING_PATTERN_THRESHOLD:
|
||||
return None
|
||||
|
||||
pattern = self._by_id.get(best_pid)
|
||||
if not pattern:
|
||||
return None
|
||||
|
||||
return PatternMatchResult(
|
||||
pattern=pattern,
|
||||
pattern_id=best_pid,
|
||||
method="embedding",
|
||||
confidence=min(emb_score, 1.0),
|
||||
embedding_score=emb_score,
|
||||
domain_bonus_applied=bonus_applied,
|
||||
)
|
||||
|
||||
async def _embedding_scores(
|
||||
self, obligation_text: str, regulation_id: Optional[str]
|
||||
) -> dict[str, tuple[float, bool]]:
|
||||
"""Compute embedding similarity scores for all patterns.
|
||||
|
||||
Returns dict: pattern_id → (score, domain_bonus_applied).
|
||||
"""
|
||||
if not self._pattern_embeddings:
|
||||
return {}
|
||||
|
||||
chunk_embedding = await _get_embedding(obligation_text[:2000])
|
||||
if not chunk_embedding:
|
||||
return {}
|
||||
|
||||
result: dict[str, tuple[float, bool]] = {}
|
||||
for i, pat_emb in enumerate(self._pattern_embeddings):
|
||||
if not pat_emb:
|
||||
continue
|
||||
pid = self._pattern_ids[i]
|
||||
pattern = self._by_id.get(pid)
|
||||
if not pattern:
|
||||
continue
|
||||
|
||||
score = _cosine_sim(chunk_embedding, pat_emb)
|
||||
|
||||
# Domain bonus
|
||||
bonus_applied = False
|
||||
if regulation_id and self._domain_matches(pattern.domain, regulation_id):
|
||||
score += DOMAIN_BONUS
|
||||
bonus_applied = True
|
||||
|
||||
result[pid] = (score, bonus_applied)
|
||||
|
||||
return result
|
||||
|
||||
# -----------------------------------------------------------------------
|
||||
# Score combination
|
||||
# -----------------------------------------------------------------------
|
||||
|
||||
def _combine_results(
|
||||
self,
|
||||
keyword_result: Optional[PatternMatchResult],
|
||||
embedding_result: Optional[PatternMatchResult],
|
||||
) -> PatternMatchResult:
|
||||
"""Combine keyword and embedding results into the best match."""
|
||||
if not keyword_result and not embedding_result:
|
||||
return PatternMatchResult()
|
||||
|
||||
if not keyword_result:
|
||||
return embedding_result
|
||||
if not embedding_result:
|
||||
return keyword_result
|
||||
|
||||
# Both matched — check if they agree
|
||||
if keyword_result.pattern_id == embedding_result.pattern_id:
|
||||
# Same pattern: boost confidence
|
||||
combined_confidence = min(
|
||||
max(keyword_result.confidence, embedding_result.confidence) + 0.05,
|
||||
1.0,
|
||||
)
|
||||
return PatternMatchResult(
|
||||
pattern=keyword_result.pattern,
|
||||
pattern_id=keyword_result.pattern_id,
|
||||
method="combined",
|
||||
confidence=combined_confidence,
|
||||
keyword_hits=keyword_result.keyword_hits,
|
||||
total_keywords=keyword_result.total_keywords,
|
||||
embedding_score=embedding_result.embedding_score,
|
||||
domain_bonus_applied=(
|
||||
keyword_result.domain_bonus_applied
|
||||
or embedding_result.domain_bonus_applied
|
||||
),
|
||||
)
|
||||
|
||||
# Different patterns: pick the one with higher confidence
|
||||
if keyword_result.confidence >= embedding_result.confidence:
|
||||
return keyword_result
|
||||
return embedding_result
|
||||
|
||||
# -----------------------------------------------------------------------
|
||||
# Domain affinity
|
||||
# -----------------------------------------------------------------------
|
||||
|
||||
@staticmethod
|
||||
def _domain_matches(pattern_domain: str, regulation_id: str) -> bool:
|
||||
"""Check if a pattern's domain has affinity with a regulation."""
|
||||
affine_domains = _REGULATION_DOMAIN_AFFINITY.get(regulation_id, [])
|
||||
return pattern_domain in affine_domains
|
||||
|
||||
# -----------------------------------------------------------------------
|
||||
# Initialization helpers
|
||||
# -----------------------------------------------------------------------
|
||||
|
||||
def _load_patterns(self) -> None:
|
||||
"""Load control patterns from YAML files."""
|
||||
patterns_dir = _find_patterns_dir()
|
||||
if not patterns_dir:
|
||||
logger.warning("Control patterns directory not found")
|
||||
return
|
||||
|
||||
for yaml_file in sorted(patterns_dir.glob("*.yaml")):
|
||||
if yaml_file.name.startswith("_"):
|
||||
continue
|
||||
try:
|
||||
with open(yaml_file) as f:
|
||||
data = yaml.safe_load(f)
|
||||
if not data or "patterns" not in data:
|
||||
continue
|
||||
for p in data["patterns"]:
|
||||
pattern = ControlPattern(
|
||||
id=p["id"],
|
||||
name=p["name"],
|
||||
name_de=p["name_de"],
|
||||
domain=p["domain"],
|
||||
category=p["category"],
|
||||
description=p["description"],
|
||||
objective_template=p["objective_template"],
|
||||
rationale_template=p["rationale_template"],
|
||||
requirements_template=p.get("requirements_template", []),
|
||||
test_procedure_template=p.get("test_procedure_template", []),
|
||||
evidence_template=p.get("evidence_template", []),
|
||||
severity_default=p.get("severity_default", "medium"),
|
||||
implementation_effort_default=p.get("implementation_effort_default", "m"),
|
||||
obligation_match_keywords=p.get("obligation_match_keywords", []),
|
||||
tags=p.get("tags", []),
|
||||
composable_with=p.get("composable_with", []),
|
||||
open_anchor_refs=p.get("open_anchor_refs", []),
|
||||
)
|
||||
self._patterns.append(pattern)
|
||||
self._by_id[pattern.id] = pattern
|
||||
domain_list = self._by_domain.setdefault(pattern.domain, [])
|
||||
domain_list.append(pattern)
|
||||
except Exception as e:
|
||||
logger.error("Failed to load %s: %s", yaml_file.name, e)
|
||||
|
||||
logger.info("Loaded %d patterns from %s", len(self._patterns), patterns_dir)
|
||||
|
||||
def _build_keyword_index(self) -> None:
|
||||
"""Build reverse index: keyword → [pattern_ids]."""
|
||||
for pattern in self._patterns:
|
||||
for kw in pattern.obligation_match_keywords:
|
||||
lower_kw = kw.lower()
|
||||
if lower_kw not in self._keyword_index:
|
||||
self._keyword_index[lower_kw] = []
|
||||
self._keyword_index[lower_kw].append(pattern.id)
|
||||
|
||||
async def _compute_embeddings(self) -> None:
|
||||
"""Compute embeddings for all pattern objective templates."""
|
||||
if not self._patterns:
|
||||
return
|
||||
|
||||
self._pattern_ids = [p.id for p in self._patterns]
|
||||
texts = [
|
||||
f"{p.name_de}: {p.objective_template}"
|
||||
for p in self._patterns
|
||||
]
|
||||
|
||||
logger.info("Computing embeddings for %d patterns...", len(texts))
|
||||
self._pattern_embeddings = await _get_embeddings_batch(texts)
|
||||
valid = sum(1 for e in self._pattern_embeddings if e)
|
||||
logger.info("Got %d/%d valid pattern embeddings", valid, len(texts))
|
||||
|
||||
# -----------------------------------------------------------------------
|
||||
# Public helpers
|
||||
# -----------------------------------------------------------------------
|
||||
|
||||
def get_pattern(self, pattern_id: str) -> Optional[ControlPattern]:
|
||||
"""Get a pattern by its ID."""
|
||||
return self._by_id.get(pattern_id.upper())
|
||||
|
||||
def get_patterns_by_domain(self, domain: str) -> list[ControlPattern]:
|
||||
"""Get all patterns for a domain."""
|
||||
return self._by_domain.get(domain.upper(), [])
|
||||
|
||||
def stats(self) -> dict:
|
||||
"""Return matcher statistics."""
|
||||
return {
|
||||
"total_patterns": len(self._patterns),
|
||||
"domains": list(self._by_domain.keys()),
|
||||
"keywords": len(self._keyword_index),
|
||||
"embeddings_valid": sum(1 for e in self._pattern_embeddings if e),
|
||||
"initialized": self._initialized,
|
||||
}
|
||||
|
||||
|
||||
def _find_patterns_dir() -> Optional[Path]:
|
||||
"""Locate the control_patterns directory."""
|
||||
candidates = [
|
||||
Path(__file__).resolve().parent.parent.parent.parent
|
||||
/ "ai-compliance-sdk" / "policies" / "control_patterns",
|
||||
Path("/app/ai-compliance-sdk/policies/control_patterns"),
|
||||
Path("ai-compliance-sdk/policies/control_patterns"),
|
||||
]
|
||||
for p in candidates:
|
||||
if p.is_dir():
|
||||
return p
|
||||
return None
|
||||
670
control-pipeline/services/pipeline_adapter.py
Normal file
670
control-pipeline/services/pipeline_adapter.py
Normal file
@@ -0,0 +1,670 @@
|
||||
"""Pipeline Adapter — New 10-Stage Pipeline Integration.
|
||||
|
||||
Bridges the existing 7-stage control_generator pipeline with the new
|
||||
multi-layer components (ObligationExtractor, PatternMatcher, ControlComposer).
|
||||
|
||||
New pipeline flow:
|
||||
chunk → license_classify
|
||||
→ obligation_extract (Stage 4 — NEW)
|
||||
→ pattern_match (Stage 5 — NEW)
|
||||
→ control_compose (Stage 6 — replaces old Stage 3)
|
||||
→ harmonize → anchor → store + crosswalk → mark processed
|
||||
|
||||
Can be used in two modes:
|
||||
1. INLINE: Called from _process_batch() to enrich the pipeline
|
||||
2. STANDALONE: Process chunks directly through new stages
|
||||
|
||||
Part of the Multi-Layer Control Architecture (Phase 7 of 8).
|
||||
"""
|
||||
|
||||
import hashlib
|
||||
import json
|
||||
import logging
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Optional
|
||||
|
||||
from sqlalchemy import text
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from services.control_composer import ComposedControl, ControlComposer
|
||||
from services.obligation_extractor import ObligationExtractor, ObligationMatch
|
||||
from services.pattern_matcher import PatternMatcher, PatternMatchResult
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class PipelineChunk:
|
||||
"""Input chunk for the new pipeline stages."""
|
||||
|
||||
text: str
|
||||
collection: str = ""
|
||||
regulation_code: str = ""
|
||||
article: Optional[str] = None
|
||||
paragraph: Optional[str] = None
|
||||
license_rule: int = 3
|
||||
license_info: dict = field(default_factory=dict)
|
||||
source_citation: Optional[dict] = None
|
||||
chunk_hash: str = ""
|
||||
|
||||
def compute_hash(self) -> str:
|
||||
if not self.chunk_hash:
|
||||
self.chunk_hash = hashlib.sha256(self.text.encode()).hexdigest()
|
||||
return self.chunk_hash
|
||||
|
||||
|
||||
@dataclass
|
||||
class PipelineResult:
|
||||
"""Result of processing a chunk through the new pipeline."""
|
||||
|
||||
chunk: PipelineChunk
|
||||
obligation: ObligationMatch = field(default_factory=ObligationMatch)
|
||||
pattern_result: PatternMatchResult = field(default_factory=PatternMatchResult)
|
||||
control: Optional[ComposedControl] = None
|
||||
crosswalk_written: bool = False
|
||||
error: Optional[str] = None
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return {
|
||||
"chunk_hash": self.chunk.chunk_hash,
|
||||
"obligation": self.obligation.to_dict() if self.obligation else None,
|
||||
"pattern": self.pattern_result.to_dict() if self.pattern_result else None,
|
||||
"control": self.control.to_dict() if self.control else None,
|
||||
"crosswalk_written": self.crosswalk_written,
|
||||
"error": self.error,
|
||||
}
|
||||
|
||||
|
||||
class PipelineAdapter:
|
||||
"""Integrates ObligationExtractor + PatternMatcher + ControlComposer.
|
||||
|
||||
Usage::
|
||||
|
||||
adapter = PipelineAdapter(db)
|
||||
await adapter.initialize()
|
||||
|
||||
result = await adapter.process_chunk(PipelineChunk(
|
||||
text="...",
|
||||
regulation_code="eu_2016_679",
|
||||
article="Art. 30",
|
||||
license_rule=1,
|
||||
))
|
||||
"""
|
||||
|
||||
def __init__(self, db: Optional[Session] = None):
|
||||
self.db = db
|
||||
self._extractor = ObligationExtractor()
|
||||
self._matcher = PatternMatcher()
|
||||
self._composer = ControlComposer()
|
||||
self._initialized = False
|
||||
|
||||
async def initialize(self) -> None:
|
||||
"""Initialize all sub-components."""
|
||||
if self._initialized:
|
||||
return
|
||||
await self._extractor.initialize()
|
||||
await self._matcher.initialize()
|
||||
self._initialized = True
|
||||
logger.info("PipelineAdapter initialized")
|
||||
|
||||
async def process_chunk(self, chunk: PipelineChunk) -> PipelineResult:
|
||||
"""Process a single chunk through the new 3-stage pipeline.
|
||||
|
||||
Stage 4: Obligation Extract
|
||||
Stage 5: Pattern Match
|
||||
Stage 6: Control Compose
|
||||
"""
|
||||
if not self._initialized:
|
||||
await self.initialize()
|
||||
|
||||
chunk.compute_hash()
|
||||
result = PipelineResult(chunk=chunk)
|
||||
|
||||
try:
|
||||
# Stage 4: Obligation Extract
|
||||
result.obligation = await self._extractor.extract(
|
||||
chunk_text=chunk.text,
|
||||
regulation_code=chunk.regulation_code,
|
||||
article=chunk.article,
|
||||
paragraph=chunk.paragraph,
|
||||
)
|
||||
|
||||
# Stage 5: Pattern Match
|
||||
obligation_text = (
|
||||
result.obligation.obligation_text
|
||||
or result.obligation.obligation_title
|
||||
or chunk.text[:500]
|
||||
)
|
||||
result.pattern_result = await self._matcher.match(
|
||||
obligation_text=obligation_text,
|
||||
regulation_id=result.obligation.regulation_id,
|
||||
)
|
||||
|
||||
# Stage 6: Control Compose
|
||||
result.control = await self._composer.compose(
|
||||
obligation=result.obligation,
|
||||
pattern_result=result.pattern_result,
|
||||
chunk_text=chunk.text if chunk.license_rule in (1, 2) else None,
|
||||
license_rule=chunk.license_rule,
|
||||
source_citation=chunk.source_citation,
|
||||
regulation_code=chunk.regulation_code,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Pipeline processing failed: %s", e)
|
||||
result.error = str(e)
|
||||
|
||||
return result
|
||||
|
||||
async def process_batch(self, chunks: list[PipelineChunk]) -> list[PipelineResult]:
|
||||
"""Process multiple chunks through the pipeline."""
|
||||
results = []
|
||||
for chunk in chunks:
|
||||
result = await self.process_chunk(chunk)
|
||||
results.append(result)
|
||||
return results
|
||||
|
||||
def write_crosswalk(self, result: PipelineResult, control_uuid: str) -> bool:
|
||||
"""Write obligation_extraction + crosswalk_matrix rows for a processed chunk.
|
||||
|
||||
Called AFTER the control is stored in canonical_controls.
|
||||
"""
|
||||
if not self.db or not result.control:
|
||||
return False
|
||||
|
||||
chunk = result.chunk
|
||||
obligation = result.obligation
|
||||
pattern = result.pattern_result
|
||||
|
||||
try:
|
||||
# 1. Write obligation_extraction row
|
||||
self.db.execute(
|
||||
text("""
|
||||
INSERT INTO obligation_extractions (
|
||||
chunk_hash, collection, regulation_code,
|
||||
article, paragraph, obligation_id,
|
||||
obligation_text, confidence, extraction_method,
|
||||
pattern_id, pattern_match_score, control_uuid
|
||||
) VALUES (
|
||||
:chunk_hash, :collection, :regulation_code,
|
||||
:article, :paragraph, :obligation_id,
|
||||
:obligation_text, :confidence, :extraction_method,
|
||||
:pattern_id, :pattern_match_score,
|
||||
CAST(:control_uuid AS uuid)
|
||||
)
|
||||
"""),
|
||||
{
|
||||
"chunk_hash": chunk.chunk_hash,
|
||||
"collection": chunk.collection,
|
||||
"regulation_code": chunk.regulation_code,
|
||||
"article": chunk.article,
|
||||
"paragraph": chunk.paragraph,
|
||||
"obligation_id": obligation.obligation_id if obligation else None,
|
||||
"obligation_text": (
|
||||
obligation.obligation_text[:2000]
|
||||
if obligation and obligation.obligation_text
|
||||
else None
|
||||
),
|
||||
"confidence": obligation.confidence if obligation else 0,
|
||||
"extraction_method": obligation.method if obligation else "none",
|
||||
"pattern_id": pattern.pattern_id if pattern else None,
|
||||
"pattern_match_score": pattern.confidence if pattern else 0,
|
||||
"control_uuid": control_uuid,
|
||||
},
|
||||
)
|
||||
|
||||
# 2. Write crosswalk_matrix row
|
||||
self.db.execute(
|
||||
text("""
|
||||
INSERT INTO crosswalk_matrix (
|
||||
regulation_code, article, paragraph,
|
||||
obligation_id, pattern_id,
|
||||
master_control_id, master_control_uuid,
|
||||
confidence, source
|
||||
) VALUES (
|
||||
:regulation_code, :article, :paragraph,
|
||||
:obligation_id, :pattern_id,
|
||||
:master_control_id,
|
||||
CAST(:master_control_uuid AS uuid),
|
||||
:confidence, :source
|
||||
)
|
||||
"""),
|
||||
{
|
||||
"regulation_code": chunk.regulation_code,
|
||||
"article": chunk.article,
|
||||
"paragraph": chunk.paragraph,
|
||||
"obligation_id": obligation.obligation_id if obligation else None,
|
||||
"pattern_id": pattern.pattern_id if pattern else None,
|
||||
"master_control_id": result.control.control_id,
|
||||
"master_control_uuid": control_uuid,
|
||||
"confidence": min(
|
||||
obligation.confidence if obligation else 0,
|
||||
pattern.confidence if pattern else 0,
|
||||
),
|
||||
"source": "auto",
|
||||
},
|
||||
)
|
||||
|
||||
# 3. Update canonical_controls with pattern_id + obligation_ids
|
||||
if result.control.pattern_id or result.control.obligation_ids:
|
||||
self.db.execute(
|
||||
text("""
|
||||
UPDATE canonical_controls
|
||||
SET pattern_id = COALESCE(:pattern_id, pattern_id),
|
||||
obligation_ids = COALESCE(:obligation_ids, obligation_ids)
|
||||
WHERE id = CAST(:control_uuid AS uuid)
|
||||
"""),
|
||||
{
|
||||
"pattern_id": result.control.pattern_id,
|
||||
"obligation_ids": json.dumps(result.control.obligation_ids),
|
||||
"control_uuid": control_uuid,
|
||||
},
|
||||
)
|
||||
|
||||
self.db.commit()
|
||||
result.crosswalk_written = True
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Failed to write crosswalk: %s", e)
|
||||
self.db.rollback()
|
||||
return False
|
||||
|
||||
def stats(self) -> dict:
|
||||
"""Return component statistics."""
|
||||
return {
|
||||
"extractor": self._extractor.stats(),
|
||||
"matcher": self._matcher.stats(),
|
||||
"initialized": self._initialized,
|
||||
}
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Migration Passes — Backfill existing 4,800+ controls
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class MigrationPasses:
|
||||
"""Non-destructive migration passes for existing controls.
|
||||
|
||||
Pass 1: Obligation Linkage (deterministic, article→obligation lookup)
|
||||
Pass 2: Pattern Classification (keyword-based matching)
|
||||
Pass 3: Quality Triage (categorize by linkage completeness)
|
||||
Pass 4: Crosswalk Backfill (write crosswalk rows for linked controls)
|
||||
Pass 5: Deduplication (mark duplicate controls)
|
||||
|
||||
Usage::
|
||||
|
||||
migration = MigrationPasses(db)
|
||||
await migration.initialize()
|
||||
|
||||
result = await migration.run_pass1_obligation_linkage(limit=100)
|
||||
result = await migration.run_pass2_pattern_classification(limit=100)
|
||||
result = migration.run_pass3_quality_triage()
|
||||
result = migration.run_pass4_crosswalk_backfill()
|
||||
result = migration.run_pass5_deduplication()
|
||||
"""
|
||||
|
||||
def __init__(self, db: Session):
|
||||
self.db = db
|
||||
self._extractor = ObligationExtractor()
|
||||
self._matcher = PatternMatcher()
|
||||
self._initialized = False
|
||||
|
||||
async def initialize(self) -> None:
|
||||
"""Initialize extractors (loads obligations + patterns)."""
|
||||
if self._initialized:
|
||||
return
|
||||
self._extractor._load_obligations()
|
||||
self._matcher._load_patterns()
|
||||
self._matcher._build_keyword_index()
|
||||
self._initialized = True
|
||||
|
||||
# -------------------------------------------------------------------
|
||||
# Pass 1: Obligation Linkage (deterministic)
|
||||
# -------------------------------------------------------------------
|
||||
|
||||
async def run_pass1_obligation_linkage(self, limit: int = 0) -> dict:
|
||||
"""Link existing controls to obligations via source_citation article.
|
||||
|
||||
For each control with source_citation → extract regulation + article
|
||||
→ look up in obligation framework → set obligation_ids.
|
||||
"""
|
||||
if not self._initialized:
|
||||
await self.initialize()
|
||||
|
||||
query = """
|
||||
SELECT id, control_id, source_citation, generation_metadata
|
||||
FROM canonical_controls
|
||||
WHERE release_state NOT IN ('deprecated')
|
||||
AND (obligation_ids IS NULL OR obligation_ids = '[]')
|
||||
"""
|
||||
if limit > 0:
|
||||
query += f" LIMIT {limit}"
|
||||
|
||||
rows = self.db.execute(text(query)).fetchall()
|
||||
|
||||
stats = {"total": len(rows), "linked": 0, "no_match": 0, "no_citation": 0}
|
||||
|
||||
for row in rows:
|
||||
control_uuid = str(row[0])
|
||||
control_id = row[1]
|
||||
citation = row[2]
|
||||
metadata = row[3]
|
||||
|
||||
# Extract regulation + article from citation or metadata
|
||||
reg_code, article = _extract_regulation_article(citation, metadata)
|
||||
if not reg_code:
|
||||
stats["no_citation"] += 1
|
||||
continue
|
||||
|
||||
# Tier 1: Exact match
|
||||
match = self._extractor._tier1_exact(reg_code, article or "")
|
||||
if match and match.obligation_id:
|
||||
self.db.execute(
|
||||
text("""
|
||||
UPDATE canonical_controls
|
||||
SET obligation_ids = :obl_ids
|
||||
WHERE id = CAST(:uuid AS uuid)
|
||||
"""),
|
||||
{
|
||||
"obl_ids": json.dumps([match.obligation_id]),
|
||||
"uuid": control_uuid,
|
||||
},
|
||||
)
|
||||
stats["linked"] += 1
|
||||
else:
|
||||
stats["no_match"] += 1
|
||||
|
||||
self.db.commit()
|
||||
logger.info("Pass 1: %s", stats)
|
||||
return stats
|
||||
|
||||
# -------------------------------------------------------------------
|
||||
# Pass 2: Pattern Classification (keyword-based)
|
||||
# -------------------------------------------------------------------
|
||||
|
||||
async def run_pass2_pattern_classification(self, limit: int = 0) -> dict:
|
||||
"""Classify existing controls into patterns via keyword matching.
|
||||
|
||||
For each control without pattern_id → keyword-match title+objective
|
||||
against pattern library → assign best match.
|
||||
"""
|
||||
if not self._initialized:
|
||||
await self.initialize()
|
||||
|
||||
query = """
|
||||
SELECT id, control_id, title, objective
|
||||
FROM canonical_controls
|
||||
WHERE release_state NOT IN ('deprecated')
|
||||
AND (pattern_id IS NULL OR pattern_id = '')
|
||||
"""
|
||||
if limit > 0:
|
||||
query += f" LIMIT {limit}"
|
||||
|
||||
rows = self.db.execute(text(query)).fetchall()
|
||||
|
||||
stats = {"total": len(rows), "classified": 0, "no_match": 0}
|
||||
|
||||
for row in rows:
|
||||
control_uuid = str(row[0])
|
||||
title = row[2] or ""
|
||||
objective = row[3] or ""
|
||||
|
||||
# Keyword match
|
||||
match_text = f"{title} {objective}"
|
||||
result = self._matcher._tier1_keyword(match_text, None)
|
||||
|
||||
if result and result.pattern_id and result.keyword_hits >= 2:
|
||||
self.db.execute(
|
||||
text("""
|
||||
UPDATE canonical_controls
|
||||
SET pattern_id = :pattern_id
|
||||
WHERE id = CAST(:uuid AS uuid)
|
||||
"""),
|
||||
{
|
||||
"pattern_id": result.pattern_id,
|
||||
"uuid": control_uuid,
|
||||
},
|
||||
)
|
||||
stats["classified"] += 1
|
||||
else:
|
||||
stats["no_match"] += 1
|
||||
|
||||
self.db.commit()
|
||||
logger.info("Pass 2: %s", stats)
|
||||
return stats
|
||||
|
||||
# -------------------------------------------------------------------
|
||||
# Pass 3: Quality Triage
|
||||
# -------------------------------------------------------------------
|
||||
|
||||
def run_pass3_quality_triage(self) -> dict:
|
||||
"""Categorize controls by linkage completeness.
|
||||
|
||||
Sets generation_metadata.triage_status:
|
||||
- "review": has both obligation_id + pattern_id
|
||||
- "needs_obligation": has pattern_id but no obligation_id
|
||||
- "needs_pattern": has obligation_id but no pattern_id
|
||||
- "legacy_unlinked": has neither
|
||||
"""
|
||||
categories = {
|
||||
"review": """
|
||||
UPDATE canonical_controls
|
||||
SET generation_metadata = jsonb_set(
|
||||
COALESCE(generation_metadata::jsonb, '{}'::jsonb),
|
||||
'{triage_status}', '"review"'
|
||||
)
|
||||
WHERE release_state NOT IN ('deprecated')
|
||||
AND obligation_ids IS NOT NULL AND obligation_ids != '[]'
|
||||
AND pattern_id IS NOT NULL AND pattern_id != ''
|
||||
""",
|
||||
"needs_obligation": """
|
||||
UPDATE canonical_controls
|
||||
SET generation_metadata = jsonb_set(
|
||||
COALESCE(generation_metadata::jsonb, '{}'::jsonb),
|
||||
'{triage_status}', '"needs_obligation"'
|
||||
)
|
||||
WHERE release_state NOT IN ('deprecated')
|
||||
AND (obligation_ids IS NULL OR obligation_ids = '[]')
|
||||
AND pattern_id IS NOT NULL AND pattern_id != ''
|
||||
""",
|
||||
"needs_pattern": """
|
||||
UPDATE canonical_controls
|
||||
SET generation_metadata = jsonb_set(
|
||||
COALESCE(generation_metadata::jsonb, '{}'::jsonb),
|
||||
'{triage_status}', '"needs_pattern"'
|
||||
)
|
||||
WHERE release_state NOT IN ('deprecated')
|
||||
AND obligation_ids IS NOT NULL AND obligation_ids != '[]'
|
||||
AND (pattern_id IS NULL OR pattern_id = '')
|
||||
""",
|
||||
"legacy_unlinked": """
|
||||
UPDATE canonical_controls
|
||||
SET generation_metadata = jsonb_set(
|
||||
COALESCE(generation_metadata::jsonb, '{}'::jsonb),
|
||||
'{triage_status}', '"legacy_unlinked"'
|
||||
)
|
||||
WHERE release_state NOT IN ('deprecated')
|
||||
AND (obligation_ids IS NULL OR obligation_ids = '[]')
|
||||
AND (pattern_id IS NULL OR pattern_id = '')
|
||||
""",
|
||||
}
|
||||
|
||||
stats = {}
|
||||
for category, sql in categories.items():
|
||||
result = self.db.execute(text(sql))
|
||||
stats[category] = result.rowcount
|
||||
|
||||
self.db.commit()
|
||||
logger.info("Pass 3: %s", stats)
|
||||
return stats
|
||||
|
||||
# -------------------------------------------------------------------
|
||||
# Pass 4: Crosswalk Backfill
|
||||
# -------------------------------------------------------------------
|
||||
|
||||
def run_pass4_crosswalk_backfill(self) -> dict:
|
||||
"""Create crosswalk_matrix rows for controls with obligation + pattern.
|
||||
|
||||
Only creates rows that don't already exist.
|
||||
"""
|
||||
result = self.db.execute(text("""
|
||||
INSERT INTO crosswalk_matrix (
|
||||
regulation_code, obligation_id, pattern_id,
|
||||
master_control_id, master_control_uuid,
|
||||
confidence, source
|
||||
)
|
||||
SELECT
|
||||
COALESCE(
|
||||
(generation_metadata::jsonb->>'source_regulation'),
|
||||
''
|
||||
) AS regulation_code,
|
||||
obl.value::text AS obligation_id,
|
||||
cc.pattern_id,
|
||||
cc.control_id,
|
||||
cc.id,
|
||||
0.80,
|
||||
'migrated'
|
||||
FROM canonical_controls cc,
|
||||
jsonb_array_elements_text(
|
||||
COALESCE(cc.obligation_ids::jsonb, '[]'::jsonb)
|
||||
) AS obl(value)
|
||||
WHERE cc.release_state NOT IN ('deprecated')
|
||||
AND cc.pattern_id IS NOT NULL AND cc.pattern_id != ''
|
||||
AND cc.obligation_ids IS NOT NULL AND cc.obligation_ids != '[]'
|
||||
AND NOT EXISTS (
|
||||
SELECT 1 FROM crosswalk_matrix cw
|
||||
WHERE cw.master_control_uuid = cc.id
|
||||
AND cw.obligation_id = obl.value::text
|
||||
)
|
||||
"""))
|
||||
|
||||
rows_inserted = result.rowcount
|
||||
self.db.commit()
|
||||
logger.info("Pass 4: %d crosswalk rows inserted", rows_inserted)
|
||||
return {"rows_inserted": rows_inserted}
|
||||
|
||||
# -------------------------------------------------------------------
|
||||
# Pass 5: Deduplication
|
||||
# -------------------------------------------------------------------
|
||||
|
||||
def run_pass5_deduplication(self) -> dict:
|
||||
"""Mark duplicate controls (same obligation + same pattern).
|
||||
|
||||
Groups controls by (obligation_id, pattern_id), keeps the one with
|
||||
highest evidence_confidence (or newest), marks rest as deprecated.
|
||||
"""
|
||||
# Find groups with duplicates
|
||||
groups = self.db.execute(text("""
|
||||
SELECT cc.pattern_id,
|
||||
obl.value::text AS obligation_id,
|
||||
array_agg(cc.id ORDER BY cc.evidence_confidence DESC NULLS LAST, cc.created_at DESC) AS ids,
|
||||
count(*) AS cnt
|
||||
FROM canonical_controls cc,
|
||||
jsonb_array_elements_text(
|
||||
COALESCE(cc.obligation_ids::jsonb, '[]'::jsonb)
|
||||
) AS obl(value)
|
||||
WHERE cc.release_state NOT IN ('deprecated')
|
||||
AND cc.pattern_id IS NOT NULL AND cc.pattern_id != ''
|
||||
GROUP BY cc.pattern_id, obl.value::text
|
||||
HAVING count(*) > 1
|
||||
""")).fetchall()
|
||||
|
||||
stats = {"groups_found": len(groups), "controls_deprecated": 0}
|
||||
|
||||
for group in groups:
|
||||
ids = group[2] # Array of UUIDs, first is the keeper
|
||||
if len(ids) <= 1:
|
||||
continue
|
||||
|
||||
# Keep first (highest confidence), deprecate rest
|
||||
deprecate_ids = ids[1:]
|
||||
for dep_id in deprecate_ids:
|
||||
self.db.execute(
|
||||
text("""
|
||||
UPDATE canonical_controls
|
||||
SET release_state = 'deprecated',
|
||||
generation_metadata = jsonb_set(
|
||||
COALESCE(generation_metadata::jsonb, '{}'::jsonb),
|
||||
'{deprecated_reason}', '"duplicate_same_obligation_pattern"'
|
||||
)
|
||||
WHERE id = CAST(:uuid AS uuid)
|
||||
AND release_state != 'deprecated'
|
||||
"""),
|
||||
{"uuid": str(dep_id)},
|
||||
)
|
||||
stats["controls_deprecated"] += 1
|
||||
|
||||
self.db.commit()
|
||||
logger.info("Pass 5: %s", stats)
|
||||
return stats
|
||||
|
||||
def migration_status(self) -> dict:
|
||||
"""Return overall migration progress."""
|
||||
row = self.db.execute(text("""
|
||||
SELECT
|
||||
count(*) AS total,
|
||||
count(*) FILTER (WHERE obligation_ids IS NOT NULL AND obligation_ids != '[]') AS has_obligation,
|
||||
count(*) FILTER (WHERE pattern_id IS NOT NULL AND pattern_id != '') AS has_pattern,
|
||||
count(*) FILTER (
|
||||
WHERE obligation_ids IS NOT NULL AND obligation_ids != '[]'
|
||||
AND pattern_id IS NOT NULL AND pattern_id != ''
|
||||
) AS fully_linked,
|
||||
count(*) FILTER (WHERE release_state = 'deprecated') AS deprecated
|
||||
FROM canonical_controls
|
||||
""")).fetchone()
|
||||
|
||||
return {
|
||||
"total_controls": row[0],
|
||||
"has_obligation": row[1],
|
||||
"has_pattern": row[2],
|
||||
"fully_linked": row[3],
|
||||
"deprecated": row[4],
|
||||
"coverage_obligation_pct": round(row[1] / max(row[0], 1) * 100, 1),
|
||||
"coverage_pattern_pct": round(row[2] / max(row[0], 1) * 100, 1),
|
||||
"coverage_full_pct": round(row[3] / max(row[0], 1) * 100, 1),
|
||||
}
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _extract_regulation_article(
|
||||
citation: Optional[str], metadata: Optional[str]
|
||||
) -> tuple[Optional[str], Optional[str]]:
|
||||
"""Extract regulation_code and article from control's citation/metadata."""
|
||||
from services.obligation_extractor import _normalize_regulation
|
||||
|
||||
reg_code = None
|
||||
article = None
|
||||
|
||||
# Try citation first (JSON string or dict)
|
||||
if citation:
|
||||
try:
|
||||
c = json.loads(citation) if isinstance(citation, str) else citation
|
||||
if isinstance(c, dict):
|
||||
article = c.get("article") or c.get("source_article")
|
||||
# Try to get regulation from source field
|
||||
source = c.get("source", "")
|
||||
if source:
|
||||
reg_code = _normalize_regulation(source)
|
||||
except (json.JSONDecodeError, TypeError):
|
||||
pass
|
||||
|
||||
# Try metadata
|
||||
if metadata and not reg_code:
|
||||
try:
|
||||
m = json.loads(metadata) if isinstance(metadata, str) else metadata
|
||||
if isinstance(m, dict):
|
||||
src_reg = m.get("source_regulation", "")
|
||||
if src_reg:
|
||||
reg_code = _normalize_regulation(src_reg)
|
||||
if not article:
|
||||
article = m.get("source_article")
|
||||
except (json.JSONDecodeError, TypeError):
|
||||
pass
|
||||
|
||||
return reg_code, article
|
||||
213
control-pipeline/services/rag_client.py
Normal file
213
control-pipeline/services/rag_client.py
Normal file
@@ -0,0 +1,213 @@
|
||||
"""
|
||||
Compliance RAG Client — Proxy to Go SDK RAG Search.
|
||||
|
||||
Lightweight HTTP client that queries the Go AI Compliance SDK's
|
||||
POST /sdk/v1/rag/search endpoint. This avoids needing embedding
|
||||
models or direct Qdrant access in Python.
|
||||
|
||||
Error-tolerant: RAG failures never break the calling function.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import os
|
||||
from dataclasses import dataclass
|
||||
from typing import List, Optional
|
||||
|
||||
import httpx
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
SDK_URL = os.getenv("SDK_URL", "http://ai-compliance-sdk:8090")
|
||||
RAG_SEARCH_TIMEOUT = 15.0 # seconds
|
||||
|
||||
|
||||
@dataclass
|
||||
class RAGSearchResult:
|
||||
"""A single search result from the compliance corpus."""
|
||||
text: str
|
||||
regulation_code: str
|
||||
regulation_name: str
|
||||
regulation_short: str
|
||||
category: str
|
||||
article: str
|
||||
paragraph: str
|
||||
source_url: str
|
||||
score: float
|
||||
collection: str = ""
|
||||
|
||||
|
||||
class ComplianceRAGClient:
|
||||
"""
|
||||
RAG client that proxies search requests to the Go SDK.
|
||||
|
||||
Usage:
|
||||
client = get_rag_client()
|
||||
results = await client.search("DSGVO Art. 35", collection="bp_compliance_recht")
|
||||
context_str = client.format_for_prompt(results)
|
||||
"""
|
||||
|
||||
def __init__(self, base_url: str = SDK_URL):
|
||||
self._search_url = f"{base_url}/sdk/v1/rag/search"
|
||||
|
||||
async def search(
|
||||
self,
|
||||
query: str,
|
||||
collection: str = "bp_compliance_ce",
|
||||
regulations: Optional[List[str]] = None,
|
||||
top_k: int = 5,
|
||||
) -> List[RAGSearchResult]:
|
||||
"""
|
||||
Search the RAG corpus via Go SDK.
|
||||
|
||||
Returns an empty list on any error (never raises).
|
||||
"""
|
||||
payload = {
|
||||
"query": query,
|
||||
"collection": collection,
|
||||
"top_k": top_k,
|
||||
}
|
||||
if regulations:
|
||||
payload["regulations"] = regulations
|
||||
|
||||
try:
|
||||
async with httpx.AsyncClient(timeout=RAG_SEARCH_TIMEOUT) as client:
|
||||
resp = await client.post(self._search_url, json=payload)
|
||||
|
||||
if resp.status_code != 200:
|
||||
logger.warning(
|
||||
"RAG search returned %d: %s", resp.status_code, resp.text[:200]
|
||||
)
|
||||
return []
|
||||
|
||||
data = resp.json()
|
||||
results = []
|
||||
for r in data.get("results", []):
|
||||
results.append(RAGSearchResult(
|
||||
text=r.get("text", ""),
|
||||
regulation_code=r.get("regulation_code", ""),
|
||||
regulation_name=r.get("regulation_name", ""),
|
||||
regulation_short=r.get("regulation_short", ""),
|
||||
category=r.get("category", ""),
|
||||
article=r.get("article", ""),
|
||||
paragraph=r.get("paragraph", ""),
|
||||
source_url=r.get("source_url", ""),
|
||||
score=r.get("score", 0.0),
|
||||
collection=collection,
|
||||
))
|
||||
return results
|
||||
|
||||
except Exception as e:
|
||||
logger.warning("RAG search failed: %s", e)
|
||||
return []
|
||||
|
||||
async def search_with_rerank(
|
||||
self,
|
||||
query: str,
|
||||
collection: str = "bp_compliance_ce",
|
||||
regulations: Optional[List[str]] = None,
|
||||
top_k: int = 5,
|
||||
) -> List[RAGSearchResult]:
|
||||
"""
|
||||
Search with optional cross-encoder re-ranking.
|
||||
|
||||
Fetches top_k*4 results from RAG, then re-ranks with cross-encoder
|
||||
and returns top_k. Falls back to regular search if reranker is disabled.
|
||||
"""
|
||||
from .reranker import get_reranker
|
||||
|
||||
reranker = get_reranker()
|
||||
if reranker is None:
|
||||
return await self.search(query, collection, regulations, top_k)
|
||||
|
||||
# Fetch more candidates for re-ranking
|
||||
candidates = await self.search(
|
||||
query, collection, regulations, top_k=max(top_k * 4, 20)
|
||||
)
|
||||
if not candidates:
|
||||
return []
|
||||
|
||||
texts = [c.text for c in candidates]
|
||||
try:
|
||||
ranked_indices = reranker.rerank(query, texts, top_k=top_k)
|
||||
return [candidates[i] for i in ranked_indices]
|
||||
except Exception as e:
|
||||
logger.warning("Reranking failed, returning unranked: %s", e)
|
||||
return candidates[:top_k]
|
||||
|
||||
async def scroll(
|
||||
self,
|
||||
collection: str,
|
||||
offset: Optional[str] = None,
|
||||
limit: int = 100,
|
||||
) -> tuple[List[RAGSearchResult], Optional[str]]:
|
||||
"""
|
||||
Scroll through ALL chunks in a collection (paginated).
|
||||
|
||||
Returns (chunks, next_offset). next_offset is None when done.
|
||||
"""
|
||||
scroll_url = self._search_url.replace("/search", "/scroll")
|
||||
params = {"collection": collection, "limit": str(limit)}
|
||||
if offset:
|
||||
params["offset"] = offset
|
||||
|
||||
try:
|
||||
async with httpx.AsyncClient(timeout=30.0) as client:
|
||||
resp = await client.get(scroll_url, params=params)
|
||||
|
||||
if resp.status_code != 200:
|
||||
logger.warning(
|
||||
"RAG scroll returned %d: %s", resp.status_code, resp.text[:200]
|
||||
)
|
||||
return [], None
|
||||
|
||||
data = resp.json()
|
||||
results = []
|
||||
for r in data.get("chunks", []):
|
||||
results.append(RAGSearchResult(
|
||||
text=r.get("text", ""),
|
||||
regulation_code=r.get("regulation_code", ""),
|
||||
regulation_name=r.get("regulation_name", ""),
|
||||
regulation_short=r.get("regulation_short", ""),
|
||||
category=r.get("category", ""),
|
||||
article=r.get("article", ""),
|
||||
paragraph=r.get("paragraph", ""),
|
||||
source_url=r.get("source_url", ""),
|
||||
score=0.0,
|
||||
collection=collection,
|
||||
))
|
||||
next_offset = data.get("next_offset") or None
|
||||
return results, next_offset
|
||||
|
||||
except Exception as e:
|
||||
logger.warning("RAG scroll failed: %s", e)
|
||||
return [], None
|
||||
|
||||
def format_for_prompt(
|
||||
self, results: List[RAGSearchResult], max_results: int = 5
|
||||
) -> str:
|
||||
"""Format search results as Markdown for inclusion in an LLM prompt."""
|
||||
if not results:
|
||||
return ""
|
||||
|
||||
lines = ["## Relevanter Rechtskontext\n"]
|
||||
for i, r in enumerate(results[:max_results]):
|
||||
header = f"{i + 1}. **{r.regulation_short}** ({r.regulation_code})"
|
||||
if r.article:
|
||||
header += f" — {r.article}"
|
||||
lines.append(header)
|
||||
text = r.text[:400] + "..." if len(r.text) > 400 else r.text
|
||||
lines.append(f" > {text}\n")
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
# Singleton
|
||||
_rag_client: Optional[ComplianceRAGClient] = None
|
||||
|
||||
|
||||
def get_rag_client() -> ComplianceRAGClient:
|
||||
"""Get the shared RAG client instance."""
|
||||
global _rag_client
|
||||
if _rag_client is None:
|
||||
_rag_client = ComplianceRAGClient()
|
||||
return _rag_client
|
||||
85
control-pipeline/services/reranker.py
Normal file
85
control-pipeline/services/reranker.py
Normal file
@@ -0,0 +1,85 @@
|
||||
"""
|
||||
Cross-Encoder Re-Ranking for RAG Search Results.
|
||||
|
||||
Uses BGE Reranker v2 (BAAI/bge-reranker-v2-m3, MIT license) to re-rank
|
||||
search results from Qdrant for improved retrieval quality.
|
||||
|
||||
Lazy-loads the model on first use. Disabled by default (RERANK_ENABLED=false).
|
||||
"""
|
||||
|
||||
import logging
|
||||
import os
|
||||
from typing import Optional
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
RERANK_ENABLED = os.getenv("RERANK_ENABLED", "false").lower() == "true"
|
||||
RERANK_MODEL = os.getenv("RERANK_MODEL", "BAAI/bge-reranker-v2-m3")
|
||||
|
||||
|
||||
class Reranker:
|
||||
"""Cross-encoder reranker using sentence-transformers."""
|
||||
|
||||
def __init__(self, model_name: str = RERANK_MODEL):
|
||||
self._model = None # Lazy init
|
||||
self._model_name = model_name
|
||||
|
||||
def _ensure_model(self) -> None:
|
||||
"""Load model on first use."""
|
||||
if self._model is not None:
|
||||
return
|
||||
try:
|
||||
from sentence_transformers import CrossEncoder
|
||||
|
||||
logger.info("Loading reranker model: %s", self._model_name)
|
||||
self._model = CrossEncoder(self._model_name)
|
||||
logger.info("Reranker model loaded successfully")
|
||||
except ImportError:
|
||||
logger.error(
|
||||
"sentence-transformers not installed. "
|
||||
"Install with: pip install sentence-transformers"
|
||||
)
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("Failed to load reranker model: %s", e)
|
||||
raise
|
||||
|
||||
def rerank(
|
||||
self, query: str, texts: list[str], top_k: int = 5
|
||||
) -> list[int]:
|
||||
"""
|
||||
Return indices of top_k texts sorted by relevance (highest first).
|
||||
|
||||
Args:
|
||||
query: The search query.
|
||||
texts: List of candidate texts to re-rank.
|
||||
top_k: Number of top results to return.
|
||||
|
||||
Returns:
|
||||
List of indices into the original texts list, sorted by relevance.
|
||||
"""
|
||||
if not texts:
|
||||
return []
|
||||
|
||||
self._ensure_model()
|
||||
|
||||
pairs = [[query, text] for text in texts]
|
||||
scores = self._model.predict(pairs)
|
||||
|
||||
# Sort by score descending, return indices
|
||||
ranked = sorted(range(len(scores)), key=lambda i: scores[i], reverse=True)
|
||||
return ranked[:top_k]
|
||||
|
||||
|
||||
# Module-level singleton
|
||||
_reranker: Optional[Reranker] = None
|
||||
|
||||
|
||||
def get_reranker() -> Optional[Reranker]:
|
||||
"""Get the shared reranker instance. Returns None if disabled."""
|
||||
global _reranker
|
||||
if not RERANK_ENABLED:
|
||||
return None
|
||||
if _reranker is None:
|
||||
_reranker = Reranker()
|
||||
return _reranker
|
||||
223
control-pipeline/services/similarity_detector.py
Normal file
223
control-pipeline/services/similarity_detector.py
Normal file
@@ -0,0 +1,223 @@
|
||||
"""
|
||||
Too-Close Similarity Detector — checks whether a candidate text is too similar
|
||||
to a protected source text (copyright / license compliance).
|
||||
|
||||
Five metrics:
|
||||
1. Exact-phrase — longest identical token sequence
|
||||
2. Token overlap — Jaccard similarity of token sets
|
||||
3. 3-gram Jaccard — Jaccard similarity of character 3-grams
|
||||
4. Embedding cosine — via bge-m3 (Ollama or embedding-service)
|
||||
5. LCS ratio — Longest Common Subsequence / max(len_a, len_b)
|
||||
|
||||
Decision:
|
||||
PASS — no fail + max 1 warn
|
||||
WARN — max 2 warn, no fail → human review
|
||||
FAIL — any fail threshold → block, rewrite required
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import re
|
||||
from dataclasses import dataclass
|
||||
from typing import Optional
|
||||
|
||||
import httpx
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Thresholds
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
THRESHOLDS = {
|
||||
"max_exact_run": {"warn": 8, "fail": 12},
|
||||
"token_overlap": {"warn": 0.20, "fail": 0.30},
|
||||
"ngram_jaccard": {"warn": 0.10, "fail": 0.18},
|
||||
"embedding_cosine": {"warn": 0.86, "fail": 0.92},
|
||||
"lcs_ratio": {"warn": 0.35, "fail": 0.50},
|
||||
}
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Tokenisation helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
_WORD_RE = re.compile(r"\w+", re.UNICODE)
|
||||
|
||||
|
||||
def _tokenize(text: str) -> list[str]:
|
||||
return [t.lower() for t in _WORD_RE.findall(text)]
|
||||
|
||||
|
||||
def _char_ngrams(text: str, n: int = 3) -> set[str]:
|
||||
text = text.lower()
|
||||
return {text[i : i + n] for i in range(len(text) - n + 1)} if len(text) >= n else set()
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Metric implementations
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def max_exact_run(tokens_a: list[str], tokens_b: list[str]) -> int:
|
||||
"""Longest contiguous identical token sequence between a and b."""
|
||||
if not tokens_a or not tokens_b:
|
||||
return 0
|
||||
|
||||
best = 0
|
||||
set_b = set(tokens_b)
|
||||
|
||||
for i in range(len(tokens_a)):
|
||||
if tokens_a[i] not in set_b:
|
||||
continue
|
||||
for j in range(len(tokens_b)):
|
||||
if tokens_a[i] != tokens_b[j]:
|
||||
continue
|
||||
run = 0
|
||||
ii, jj = i, j
|
||||
while ii < len(tokens_a) and jj < len(tokens_b) and tokens_a[ii] == tokens_b[jj]:
|
||||
run += 1
|
||||
ii += 1
|
||||
jj += 1
|
||||
if run > best:
|
||||
best = run
|
||||
return best
|
||||
|
||||
|
||||
def token_overlap_jaccard(tokens_a: list[str], tokens_b: list[str]) -> float:
|
||||
"""Jaccard similarity of token sets."""
|
||||
set_a, set_b = set(tokens_a), set(tokens_b)
|
||||
if not set_a and not set_b:
|
||||
return 0.0
|
||||
return len(set_a & set_b) / len(set_a | set_b)
|
||||
|
||||
|
||||
def ngram_jaccard(text_a: str, text_b: str, n: int = 3) -> float:
|
||||
"""Jaccard similarity of character n-grams."""
|
||||
grams_a = _char_ngrams(text_a, n)
|
||||
grams_b = _char_ngrams(text_b, n)
|
||||
if not grams_a and not grams_b:
|
||||
return 0.0
|
||||
return len(grams_a & grams_b) / len(grams_a | grams_b)
|
||||
|
||||
|
||||
def lcs_ratio(tokens_a: list[str], tokens_b: list[str]) -> float:
|
||||
"""LCS length / max(len_a, len_b)."""
|
||||
m, n = len(tokens_a), len(tokens_b)
|
||||
if m == 0 or n == 0:
|
||||
return 0.0
|
||||
|
||||
# Space-optimised LCS (two rows)
|
||||
prev = [0] * (n + 1)
|
||||
curr = [0] * (n + 1)
|
||||
for i in range(1, m + 1):
|
||||
for j in range(1, n + 1):
|
||||
if tokens_a[i - 1] == tokens_b[j - 1]:
|
||||
curr[j] = prev[j - 1] + 1
|
||||
else:
|
||||
curr[j] = max(prev[j], curr[j - 1])
|
||||
prev, curr = curr, [0] * (n + 1)
|
||||
|
||||
return prev[n] / max(m, n)
|
||||
|
||||
|
||||
async def embedding_cosine(text_a: str, text_b: str, embedding_url: str | None = None) -> float:
|
||||
"""Cosine similarity via embedding service (bge-m3).
|
||||
|
||||
Falls back to 0.0 if the service is unreachable.
|
||||
"""
|
||||
url = embedding_url or "http://embedding-service:8087"
|
||||
try:
|
||||
async with httpx.AsyncClient(timeout=10.0) as client:
|
||||
resp = await client.post(
|
||||
f"{url}/embed",
|
||||
json={"texts": [text_a, text_b]},
|
||||
)
|
||||
resp.raise_for_status()
|
||||
embeddings = resp.json().get("embeddings", [])
|
||||
if len(embeddings) < 2:
|
||||
return 0.0
|
||||
return _cosine(embeddings[0], embeddings[1])
|
||||
except Exception:
|
||||
logger.warning("Embedding service unreachable, skipping cosine check")
|
||||
return 0.0
|
||||
|
||||
|
||||
def _cosine(a: list[float], b: list[float]) -> float:
|
||||
dot = sum(x * y for x, y in zip(a, b))
|
||||
norm_a = sum(x * x for x in a) ** 0.5
|
||||
norm_b = sum(x * x for x in b) ** 0.5
|
||||
if norm_a == 0 or norm_b == 0:
|
||||
return 0.0
|
||||
return dot / (norm_a * norm_b)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Decision engine
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
@dataclass
|
||||
class SimilarityReport:
|
||||
max_exact_run: int
|
||||
token_overlap: float
|
||||
ngram_jaccard: float
|
||||
embedding_cosine: float
|
||||
lcs_ratio: float
|
||||
status: str # PASS, WARN, FAIL
|
||||
details: dict # per-metric status
|
||||
|
||||
|
||||
def _classify(value: float | int, metric: str) -> str:
|
||||
t = THRESHOLDS[metric]
|
||||
if value >= t["fail"]:
|
||||
return "FAIL"
|
||||
if value >= t["warn"]:
|
||||
return "WARN"
|
||||
return "PASS"
|
||||
|
||||
|
||||
async def check_similarity(
|
||||
source_text: str,
|
||||
candidate_text: str,
|
||||
embedding_url: str | None = None,
|
||||
) -> SimilarityReport:
|
||||
"""Run all 5 metrics and return an aggregate report."""
|
||||
tok_src = _tokenize(source_text)
|
||||
tok_cand = _tokenize(candidate_text)
|
||||
|
||||
m_exact = max_exact_run(tok_src, tok_cand)
|
||||
m_token = token_overlap_jaccard(tok_src, tok_cand)
|
||||
m_ngram = ngram_jaccard(source_text, candidate_text)
|
||||
m_embed = await embedding_cosine(source_text, candidate_text, embedding_url)
|
||||
m_lcs = lcs_ratio(tok_src, tok_cand)
|
||||
|
||||
details = {
|
||||
"max_exact_run": _classify(m_exact, "max_exact_run"),
|
||||
"token_overlap": _classify(m_token, "token_overlap"),
|
||||
"ngram_jaccard": _classify(m_ngram, "ngram_jaccard"),
|
||||
"embedding_cosine": _classify(m_embed, "embedding_cosine"),
|
||||
"lcs_ratio": _classify(m_lcs, "lcs_ratio"),
|
||||
}
|
||||
|
||||
fail_count = sum(1 for v in details.values() if v == "FAIL")
|
||||
warn_count = sum(1 for v in details.values() if v == "WARN")
|
||||
|
||||
if fail_count > 0:
|
||||
status = "FAIL"
|
||||
elif warn_count > 2:
|
||||
status = "FAIL"
|
||||
elif warn_count > 1:
|
||||
status = "WARN"
|
||||
elif warn_count == 1:
|
||||
status = "PASS"
|
||||
else:
|
||||
status = "PASS"
|
||||
|
||||
return SimilarityReport(
|
||||
max_exact_run=m_exact,
|
||||
token_overlap=round(m_token, 4),
|
||||
ngram_jaccard=round(m_ngram, 4),
|
||||
embedding_cosine=round(m_embed, 4),
|
||||
lcs_ratio=round(m_lcs, 4),
|
||||
status=status,
|
||||
details=details,
|
||||
)
|
||||
331
control-pipeline/services/v1_enrichment.py
Normal file
331
control-pipeline/services/v1_enrichment.py
Normal file
@@ -0,0 +1,331 @@
|
||||
"""V1 Control Enrichment Service — Match Eigenentwicklung controls to regulations.
|
||||
|
||||
Finds regulatory coverage for v1 controls (generation_strategy='ungrouped',
|
||||
pipeline_version=1, no source_citation) by embedding similarity search.
|
||||
|
||||
Reuses embedding + Qdrant helpers from control_dedup.py.
|
||||
"""
|
||||
|
||||
import logging
|
||||
from typing import Optional
|
||||
|
||||
from sqlalchemy import text
|
||||
|
||||
from db.session import SessionLocal
|
||||
from services.control_dedup import (
|
||||
get_embedding,
|
||||
qdrant_search_cross_regulation,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Similarity threshold — lower than dedup (0.85) since we want informational matches
|
||||
# Typical top scores for v1 controls are 0.70-0.77
|
||||
V1_MATCH_THRESHOLD = 0.70
|
||||
V1_MAX_MATCHES = 5
|
||||
|
||||
|
||||
def _is_eigenentwicklung_query() -> str:
|
||||
"""SQL WHERE clause identifying v1 Eigenentwicklung controls."""
|
||||
return """
|
||||
generation_strategy = 'ungrouped'
|
||||
AND (pipeline_version = '1' OR pipeline_version IS NULL)
|
||||
AND source_citation IS NULL
|
||||
AND parent_control_uuid IS NULL
|
||||
AND release_state NOT IN ('rejected', 'merged', 'deprecated')
|
||||
"""
|
||||
|
||||
|
||||
async def count_v1_controls() -> int:
|
||||
"""Count how many v1 Eigenentwicklung controls exist."""
|
||||
with SessionLocal() as db:
|
||||
row = db.execute(text(f"""
|
||||
SELECT COUNT(*) AS cnt
|
||||
FROM canonical_controls
|
||||
WHERE {_is_eigenentwicklung_query()}
|
||||
""")).fetchone()
|
||||
return row.cnt if row else 0
|
||||
|
||||
|
||||
async def enrich_v1_matches(
|
||||
dry_run: bool = True,
|
||||
batch_size: int = 100,
|
||||
offset: int = 0,
|
||||
) -> dict:
|
||||
"""Find regulatory matches for v1 Eigenentwicklung controls.
|
||||
|
||||
Args:
|
||||
dry_run: If True, only count — don't write matches.
|
||||
batch_size: Number of v1 controls to process per call.
|
||||
offset: Pagination offset (v1 control index).
|
||||
|
||||
Returns:
|
||||
Stats dict with counts, sample matches, and pagination info.
|
||||
"""
|
||||
with SessionLocal() as db:
|
||||
# 1. Load v1 controls (paginated)
|
||||
v1_controls = db.execute(text(f"""
|
||||
SELECT id, control_id, title, objective, category
|
||||
FROM canonical_controls
|
||||
WHERE {_is_eigenentwicklung_query()}
|
||||
ORDER BY control_id
|
||||
LIMIT :limit OFFSET :offset
|
||||
"""), {"limit": batch_size, "offset": offset}).fetchall()
|
||||
|
||||
# Count total for pagination
|
||||
total_row = db.execute(text(f"""
|
||||
SELECT COUNT(*) AS cnt
|
||||
FROM canonical_controls
|
||||
WHERE {_is_eigenentwicklung_query()}
|
||||
""")).fetchone()
|
||||
total_v1 = total_row.cnt if total_row else 0
|
||||
|
||||
if not v1_controls:
|
||||
return {
|
||||
"dry_run": dry_run,
|
||||
"processed": 0,
|
||||
"total_v1": total_v1,
|
||||
"message": "Kein weiterer Batch — alle v1 Controls verarbeitet.",
|
||||
}
|
||||
|
||||
if dry_run:
|
||||
return {
|
||||
"dry_run": True,
|
||||
"total_v1": total_v1,
|
||||
"offset": offset,
|
||||
"batch_size": batch_size,
|
||||
"sample_controls": [
|
||||
{
|
||||
"control_id": r.control_id,
|
||||
"title": r.title,
|
||||
"category": r.category,
|
||||
}
|
||||
for r in v1_controls[:20]
|
||||
],
|
||||
}
|
||||
|
||||
# 2. Process each v1 control
|
||||
processed = 0
|
||||
matches_inserted = 0
|
||||
errors = []
|
||||
sample_matches = []
|
||||
|
||||
for v1 in v1_controls:
|
||||
try:
|
||||
# Build search text
|
||||
search_text = f"{v1.title} — {v1.objective}"
|
||||
|
||||
# Get embedding
|
||||
embedding = await get_embedding(search_text)
|
||||
if not embedding:
|
||||
errors.append({
|
||||
"control_id": v1.control_id,
|
||||
"error": "Embedding fehlgeschlagen",
|
||||
})
|
||||
continue
|
||||
|
||||
# Search Qdrant (cross-regulation, no pattern filter)
|
||||
# Collection is atomic_controls_dedup (contains ~51k atomare Controls)
|
||||
results = await qdrant_search_cross_regulation(
|
||||
embedding, top_k=20,
|
||||
collection="atomic_controls_dedup",
|
||||
)
|
||||
|
||||
# For each hit: resolve to a regulatory parent with source_citation.
|
||||
# Atomic controls in Qdrant usually have parent_control_uuid → parent
|
||||
# has the source_citation. We deduplicate by parent to avoid
|
||||
# listing the same regulation multiple times.
|
||||
rank = 0
|
||||
seen_parents: set[str] = set()
|
||||
|
||||
for hit in results:
|
||||
score = hit.get("score", 0)
|
||||
if score < V1_MATCH_THRESHOLD:
|
||||
continue
|
||||
|
||||
payload = hit.get("payload", {})
|
||||
matched_uuid = payload.get("control_uuid")
|
||||
if not matched_uuid or matched_uuid == str(v1.id):
|
||||
continue
|
||||
|
||||
# Try the matched control itself first, then its parent
|
||||
matched_row = db.execute(text("""
|
||||
SELECT c.id, c.control_id, c.title, c.source_citation,
|
||||
c.severity, c.category, c.parent_control_uuid
|
||||
FROM canonical_controls c
|
||||
WHERE c.id = CAST(:uuid AS uuid)
|
||||
"""), {"uuid": matched_uuid}).fetchone()
|
||||
|
||||
if not matched_row:
|
||||
continue
|
||||
|
||||
# Resolve to regulatory control (one with source_citation)
|
||||
reg_row = matched_row
|
||||
if not reg_row.source_citation and reg_row.parent_control_uuid:
|
||||
# Look up parent — the parent has the source_citation
|
||||
parent_row = db.execute(text("""
|
||||
SELECT id, control_id, title, source_citation,
|
||||
severity, category, parent_control_uuid
|
||||
FROM canonical_controls
|
||||
WHERE id = CAST(:uuid AS uuid)
|
||||
AND source_citation IS NOT NULL
|
||||
"""), {"uuid": str(reg_row.parent_control_uuid)}).fetchone()
|
||||
if parent_row:
|
||||
reg_row = parent_row
|
||||
|
||||
if not reg_row.source_citation:
|
||||
continue
|
||||
|
||||
# Deduplicate by parent UUID
|
||||
parent_key = str(reg_row.id)
|
||||
if parent_key in seen_parents:
|
||||
continue
|
||||
seen_parents.add(parent_key)
|
||||
|
||||
rank += 1
|
||||
if rank > V1_MAX_MATCHES:
|
||||
break
|
||||
|
||||
# Extract source info
|
||||
source_citation = reg_row.source_citation or {}
|
||||
matched_source = source_citation.get("source") if isinstance(source_citation, dict) else None
|
||||
matched_article = source_citation.get("article") if isinstance(source_citation, dict) else None
|
||||
|
||||
# Insert match — link to the regulatory parent (not the atomic child)
|
||||
db.execute(text("""
|
||||
INSERT INTO v1_control_matches
|
||||
(v1_control_uuid, matched_control_uuid, similarity_score,
|
||||
match_rank, matched_source, matched_article, match_method)
|
||||
VALUES
|
||||
(CAST(:v1_uuid AS uuid), CAST(:matched_uuid AS uuid), :score,
|
||||
:rank, :source, :article, 'embedding')
|
||||
ON CONFLICT (v1_control_uuid, matched_control_uuid) DO UPDATE
|
||||
SET similarity_score = EXCLUDED.similarity_score,
|
||||
match_rank = EXCLUDED.match_rank
|
||||
"""), {
|
||||
"v1_uuid": str(v1.id),
|
||||
"matched_uuid": str(reg_row.id),
|
||||
"score": round(score, 3),
|
||||
"rank": rank,
|
||||
"source": matched_source,
|
||||
"article": matched_article,
|
||||
})
|
||||
matches_inserted += 1
|
||||
|
||||
# Collect sample
|
||||
if len(sample_matches) < 20:
|
||||
sample_matches.append({
|
||||
"v1_control_id": v1.control_id,
|
||||
"v1_title": v1.title,
|
||||
"matched_control_id": reg_row.control_id,
|
||||
"matched_title": reg_row.title,
|
||||
"matched_source": matched_source,
|
||||
"matched_article": matched_article,
|
||||
"similarity_score": round(score, 3),
|
||||
"match_rank": rank,
|
||||
})
|
||||
|
||||
processed += 1
|
||||
|
||||
except Exception as e:
|
||||
logger.warning("V1 enrichment error for %s: %s", v1.control_id, e)
|
||||
errors.append({
|
||||
"control_id": v1.control_id,
|
||||
"error": str(e),
|
||||
})
|
||||
|
||||
db.commit()
|
||||
|
||||
# Pagination
|
||||
next_offset = offset + batch_size if len(v1_controls) == batch_size else None
|
||||
|
||||
return {
|
||||
"dry_run": False,
|
||||
"offset": offset,
|
||||
"batch_size": batch_size,
|
||||
"next_offset": next_offset,
|
||||
"total_v1": total_v1,
|
||||
"processed": processed,
|
||||
"matches_inserted": matches_inserted,
|
||||
"errors": errors[:10],
|
||||
"sample_matches": sample_matches,
|
||||
}
|
||||
|
||||
|
||||
async def get_v1_matches(control_uuid: str) -> list[dict]:
|
||||
"""Get all regulatory matches for a specific v1 control.
|
||||
|
||||
Args:
|
||||
control_uuid: The UUID of the v1 control.
|
||||
|
||||
Returns:
|
||||
List of match dicts with control details.
|
||||
"""
|
||||
with SessionLocal() as db:
|
||||
rows = db.execute(text("""
|
||||
SELECT
|
||||
m.similarity_score,
|
||||
m.match_rank,
|
||||
m.matched_source,
|
||||
m.matched_article,
|
||||
m.match_method,
|
||||
c.control_id AS matched_control_id,
|
||||
c.title AS matched_title,
|
||||
c.objective AS matched_objective,
|
||||
c.severity AS matched_severity,
|
||||
c.category AS matched_category,
|
||||
c.source_citation AS matched_source_citation
|
||||
FROM v1_control_matches m
|
||||
JOIN canonical_controls c ON c.id = m.matched_control_uuid
|
||||
WHERE m.v1_control_uuid = CAST(:uuid AS uuid)
|
||||
ORDER BY m.match_rank
|
||||
"""), {"uuid": control_uuid}).fetchall()
|
||||
|
||||
return [
|
||||
{
|
||||
"matched_control_id": r.matched_control_id,
|
||||
"matched_title": r.matched_title,
|
||||
"matched_objective": r.matched_objective,
|
||||
"matched_severity": r.matched_severity,
|
||||
"matched_category": r.matched_category,
|
||||
"matched_source": r.matched_source,
|
||||
"matched_article": r.matched_article,
|
||||
"matched_source_citation": r.matched_source_citation,
|
||||
"similarity_score": float(r.similarity_score),
|
||||
"match_rank": r.match_rank,
|
||||
"match_method": r.match_method,
|
||||
}
|
||||
for r in rows
|
||||
]
|
||||
|
||||
|
||||
async def get_v1_enrichment_stats() -> dict:
|
||||
"""Get overview stats for v1 enrichment."""
|
||||
with SessionLocal() as db:
|
||||
total_v1 = db.execute(text(f"""
|
||||
SELECT COUNT(*) AS cnt FROM canonical_controls
|
||||
WHERE {_is_eigenentwicklung_query()}
|
||||
""")).fetchone()
|
||||
|
||||
matched_v1 = db.execute(text(f"""
|
||||
SELECT COUNT(DISTINCT m.v1_control_uuid) AS cnt
|
||||
FROM v1_control_matches m
|
||||
JOIN canonical_controls c ON c.id = m.v1_control_uuid
|
||||
WHERE {_is_eigenentwicklung_query().replace('release_state', 'c.release_state').replace('generation_strategy', 'c.generation_strategy').replace('pipeline_version', 'c.pipeline_version').replace('source_citation', 'c.source_citation').replace('parent_control_uuid', 'c.parent_control_uuid')}
|
||||
""")).fetchone()
|
||||
|
||||
total_matches = db.execute(text("""
|
||||
SELECT COUNT(*) AS cnt FROM v1_control_matches
|
||||
""")).fetchone()
|
||||
|
||||
avg_score = db.execute(text("""
|
||||
SELECT AVG(similarity_score) AS avg_score FROM v1_control_matches
|
||||
""")).fetchone()
|
||||
|
||||
return {
|
||||
"total_v1_controls": total_v1.cnt if total_v1 else 0,
|
||||
"v1_with_matches": matched_v1.cnt if matched_v1 else 0,
|
||||
"v1_without_matches": (total_v1.cnt if total_v1 else 0) - (matched_v1.cnt if matched_v1 else 0),
|
||||
"total_matches": total_matches.cnt if total_matches else 0,
|
||||
"avg_similarity_score": round(float(avg_score.avg_score), 3) if avg_score and avg_score.avg_score else None,
|
||||
}
|
||||
0
control-pipeline/tests/__init__.py
Normal file
0
control-pipeline/tests/__init__.py
Normal file
229
control-pipeline/tests/test_applicability_engine.py
Normal file
229
control-pipeline/tests/test_applicability_engine.py
Normal file
@@ -0,0 +1,229 @@
|
||||
"""
|
||||
Tests for the Applicability Engine (Phase C2).
|
||||
|
||||
Tests the deterministic filtering logic for industry, company size,
|
||||
and scope signals without requiring a database connection.
|
||||
"""
|
||||
|
||||
import pytest
|
||||
|
||||
from services.applicability_engine import (
|
||||
_matches_company_size,
|
||||
_matches_industry,
|
||||
_matches_scope_signals,
|
||||
_parse_json_text,
|
||||
)
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# _parse_json_text
|
||||
# =============================================================================
|
||||
|
||||
|
||||
class TestParseJsonText:
|
||||
def test_none_returns_none(self):
|
||||
assert _parse_json_text(None) is None
|
||||
|
||||
def test_valid_json_list(self):
|
||||
assert _parse_json_text('["all"]') == ["all"]
|
||||
|
||||
def test_valid_json_list_multiple(self):
|
||||
result = _parse_json_text('["Telekommunikation", "Energie"]')
|
||||
assert result == ["Telekommunikation", "Energie"]
|
||||
|
||||
def test_valid_json_dict(self):
|
||||
result = _parse_json_text('{"requires_any": ["uses_ai"]}')
|
||||
assert result == {"requires_any": ["uses_ai"]}
|
||||
|
||||
def test_invalid_json_returns_none(self):
|
||||
assert _parse_json_text("not json") is None
|
||||
|
||||
def test_empty_string_returns_none(self):
|
||||
assert _parse_json_text("") is None
|
||||
|
||||
def test_already_list_passthrough(self):
|
||||
val = ["all"]
|
||||
assert _parse_json_text(val) == ["all"]
|
||||
|
||||
def test_already_dict_passthrough(self):
|
||||
val = {"requires_any": ["uses_ai"]}
|
||||
assert _parse_json_text(val) == val
|
||||
|
||||
def test_integer_returns_none(self):
|
||||
assert _parse_json_text(42) is None
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# _matches_industry
|
||||
# =============================================================================
|
||||
|
||||
|
||||
class TestMatchesIndustry:
|
||||
def test_null_matches_any_industry(self):
|
||||
assert _matches_industry(None, "Telekommunikation") is True
|
||||
|
||||
def test_all_matches_any_industry(self):
|
||||
assert _matches_industry('["all"]', "Telekommunikation") is True
|
||||
assert _matches_industry('["all"]', "Energie") is True
|
||||
|
||||
def test_specific_industry_matches(self):
|
||||
assert _matches_industry(
|
||||
'["Telekommunikation", "Energie"]', "Telekommunikation"
|
||||
) is True
|
||||
|
||||
def test_specific_industry_no_match(self):
|
||||
assert _matches_industry(
|
||||
'["Telekommunikation", "Energie"]', "Gesundheitswesen"
|
||||
) is False
|
||||
|
||||
def test_malformed_json_matches(self):
|
||||
"""Malformed data should be treated as 'applies to everyone'."""
|
||||
assert _matches_industry("not json", "anything") is True
|
||||
|
||||
def test_all_with_other_industries(self):
|
||||
assert _matches_industry(
|
||||
'["all", "Telekommunikation"]', "Gesundheitswesen"
|
||||
) is True
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# _matches_company_size
|
||||
# =============================================================================
|
||||
|
||||
|
||||
class TestMatchesCompanySize:
|
||||
def test_null_matches_any_size(self):
|
||||
assert _matches_company_size(None, "medium") is True
|
||||
|
||||
def test_all_matches_any_size(self):
|
||||
assert _matches_company_size('["all"]', "micro") is True
|
||||
assert _matches_company_size('["all"]', "enterprise") is True
|
||||
|
||||
def test_specific_size_matches(self):
|
||||
assert _matches_company_size(
|
||||
'["medium", "large", "enterprise"]', "large"
|
||||
) is True
|
||||
|
||||
def test_specific_size_no_match(self):
|
||||
assert _matches_company_size(
|
||||
'["medium", "large", "enterprise"]', "small"
|
||||
) is False
|
||||
|
||||
def test_micro_excluded_from_nis2(self):
|
||||
"""NIS2 typically requires medium+."""
|
||||
assert _matches_company_size(
|
||||
'["medium", "large", "enterprise"]', "micro"
|
||||
) is False
|
||||
|
||||
def test_malformed_json_matches(self):
|
||||
assert _matches_company_size("broken", "medium") is True
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# _matches_scope_signals
|
||||
# =============================================================================
|
||||
|
||||
|
||||
class TestMatchesScopeSignals:
|
||||
def test_null_conditions_always_match(self):
|
||||
assert _matches_scope_signals(None, ["uses_ai"]) is True
|
||||
assert _matches_scope_signals(None, []) is True
|
||||
|
||||
def test_empty_requires_any_matches(self):
|
||||
assert _matches_scope_signals('{"requires_any": []}', ["uses_ai"]) is True
|
||||
|
||||
def test_no_requires_any_key_matches(self):
|
||||
assert _matches_scope_signals(
|
||||
'{"description": "some text"}', ["uses_ai"]
|
||||
) is True
|
||||
|
||||
def test_requires_any_with_matching_signal(self):
|
||||
conditions = '{"requires_any": ["uses_ai"], "description": "AI Act"}'
|
||||
assert _matches_scope_signals(conditions, ["uses_ai"]) is True
|
||||
|
||||
def test_requires_any_with_no_matching_signal(self):
|
||||
conditions = '{"requires_any": ["uses_ai"], "description": "AI Act"}'
|
||||
assert _matches_scope_signals(
|
||||
conditions, ["third_country_transfer"]
|
||||
) is False
|
||||
|
||||
def test_requires_any_with_one_of_multiple_matching(self):
|
||||
conditions = '{"requires_any": ["uses_ai", "processes_health_data"]}'
|
||||
assert _matches_scope_signals(
|
||||
conditions, ["processes_health_data", "financial_data"]
|
||||
) is True
|
||||
|
||||
def test_requires_any_with_no_signals_provided(self):
|
||||
conditions = '{"requires_any": ["uses_ai"]}'
|
||||
assert _matches_scope_signals(conditions, []) is False
|
||||
|
||||
def test_malformed_json_matches(self):
|
||||
assert _matches_scope_signals("broken", ["uses_ai"]) is True
|
||||
|
||||
def test_multiple_required_signals_any_match(self):
|
||||
"""requires_any means at least ONE must match."""
|
||||
conditions = (
|
||||
'{"requires_any": ["uses_ai", "third_country_transfer", '
|
||||
'"processes_health_data"]}'
|
||||
)
|
||||
assert _matches_scope_signals(
|
||||
conditions, ["third_country_transfer"]
|
||||
) is True
|
||||
|
||||
def test_multiple_required_signals_none_match(self):
|
||||
conditions = (
|
||||
'{"requires_any": ["uses_ai", "third_country_transfer"]}'
|
||||
)
|
||||
assert _matches_scope_signals(
|
||||
conditions, ["financial_data", "employee_monitoring"]
|
||||
) is False
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Integration-style: combined filtering scenarios
|
||||
# =============================================================================
|
||||
|
||||
|
||||
class TestCombinedFiltering:
|
||||
"""Test typical real-world filtering scenarios."""
|
||||
|
||||
def test_dsgvo_art5_applies_to_everyone(self):
|
||||
"""DSGVO Art. 5 = all industries, all sizes, no scope conditions."""
|
||||
assert _matches_industry('["all"]', "Telekommunikation") is True
|
||||
assert _matches_company_size('["all"]', "micro") is True
|
||||
assert _matches_scope_signals(None, []) is True
|
||||
|
||||
def test_nis2_art21_kritis_medium_plus(self):
|
||||
"""NIS2 Art. 21 = KRITIS sectors, medium+."""
|
||||
industries = '["Energie", "Gesundheitswesen", "Digitale Infrastruktur", "Logistik / Transport"]'
|
||||
sizes = '["medium", "large", "enterprise"]'
|
||||
|
||||
# Matches: Energie + large
|
||||
assert _matches_industry(industries, "Energie") is True
|
||||
assert _matches_company_size(sizes, "large") is True
|
||||
|
||||
# No match: IT company
|
||||
assert _matches_industry(industries, "Technologie / IT") is False
|
||||
|
||||
# No match: small company
|
||||
assert _matches_company_size(sizes, "small") is False
|
||||
|
||||
def test_ai_act_scope_condition(self):
|
||||
"""AI Act = all industries, all sizes, but only if uses_ai."""
|
||||
conditions = '{"requires_any": ["uses_ai"], "description": "Nur bei KI-Einsatz"}'
|
||||
|
||||
# Company uses AI
|
||||
assert _matches_scope_signals(conditions, ["uses_ai"]) is True
|
||||
|
||||
# Company does not use AI
|
||||
assert _matches_scope_signals(conditions, []) is False
|
||||
assert _matches_scope_signals(
|
||||
conditions, ["third_country_transfer"]
|
||||
) is False
|
||||
|
||||
def test_tkg_telekom_only(self):
|
||||
"""TKG = only Telekommunikation, all sizes."""
|
||||
industries = '["Telekommunikation"]'
|
||||
|
||||
assert _matches_industry(industries, "Telekommunikation") is True
|
||||
assert _matches_industry(industries, "Energie") is False
|
||||
@@ -15,6 +15,7 @@ networks:
|
||||
volumes:
|
||||
valkey_data:
|
||||
embedding_models:
|
||||
paddleocr_models:
|
||||
|
||||
services:
|
||||
|
||||
@@ -141,6 +142,74 @@ services:
|
||||
networks:
|
||||
- breakpilot-network
|
||||
|
||||
# =========================================================
|
||||
# OCR SERVICE (PaddleOCR PP-OCRv5)
|
||||
# =========================================================
|
||||
paddleocr-service:
|
||||
build:
|
||||
context: ./paddleocr-service
|
||||
dockerfile: Dockerfile
|
||||
container_name: bp-core-paddleocr
|
||||
expose:
|
||||
- "8095"
|
||||
environment:
|
||||
PADDLEOCR_API_KEY: ${PADDLEOCR_API_KEY:-}
|
||||
FLAGS_use_mkldnn: "0"
|
||||
volumes:
|
||||
- paddleocr_models:/root/.paddleocr
|
||||
labels:
|
||||
- "traefik.http.services.paddleocr.loadbalancer.server.port=8095"
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 6G
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://127.0.0.1:8095/health"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
start_period: 300s
|
||||
retries: 5
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- breakpilot-network
|
||||
|
||||
# =========================================================
|
||||
# PITCH DECK
|
||||
# =========================================================
|
||||
pitch-deck:
|
||||
build:
|
||||
context: ./pitch-deck
|
||||
dockerfile: Dockerfile
|
||||
container_name: bp-core-pitch-deck
|
||||
expose:
|
||||
- "3000"
|
||||
environment:
|
||||
DATABASE_URL: postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT:-5432}/${POSTGRES_DB}
|
||||
PITCH_JWT_SECRET: ${PITCH_JWT_SECRET}
|
||||
PITCH_ADMIN_SECRET: ${PITCH_ADMIN_SECRET}
|
||||
PITCH_BASE_URL: ${PITCH_BASE_URL:-https://pitch.breakpilot.ai}
|
||||
MAGIC_LINK_TTL_HOURS: ${MAGIC_LINK_TTL_HOURS:-72}
|
||||
# Optional: bootstrap first admin via `npm run admin:create` inside the container.
|
||||
PITCH_ADMIN_BOOTSTRAP_EMAIL: ${PITCH_ADMIN_BOOTSTRAP_EMAIL:-}
|
||||
PITCH_ADMIN_BOOTSTRAP_NAME: ${PITCH_ADMIN_BOOTSTRAP_NAME:-}
|
||||
PITCH_ADMIN_BOOTSTRAP_PASSWORD: ${PITCH_ADMIN_BOOTSTRAP_PASSWORD:-}
|
||||
SMTP_HOST: ${SMTP_HOST}
|
||||
SMTP_PORT: ${SMTP_PORT:-587}
|
||||
SMTP_USERNAME: ${SMTP_USERNAME}
|
||||
SMTP_PASSWORD: ${SMTP_PASSWORD}
|
||||
SMTP_FROM_NAME: ${SMTP_FROM_NAME:-BreakPilot}
|
||||
SMTP_FROM_ADDR: ${SMTP_FROM_ADDR:-noreply@breakpilot.ai}
|
||||
NODE_ENV: production
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "-q", "--spider", "http://127.0.0.1:3000/api/health"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
start_period: 15s
|
||||
retries: 3
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- breakpilot-network
|
||||
|
||||
# =========================================================
|
||||
# HEALTH AGGREGATOR
|
||||
# =========================================================
|
||||
@@ -153,7 +222,7 @@ services:
|
||||
- "8099"
|
||||
environment:
|
||||
PORT: 8099
|
||||
CHECK_SERVICES: "valkey:6379,consent-service:8081,rag-service:8097,embedding-service:8087"
|
||||
CHECK_SERVICES: "valkey:6379,consent-service:8081,rag-service:8097,embedding-service:8087,paddleocr-service:8095,pitch-deck:3000"
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://127.0.0.1:8099/health"]
|
||||
interval: 30s
|
||||
|
||||
@@ -56,10 +56,12 @@ services:
|
||||
- "8091:8091" # Voice Service (WSS)
|
||||
- "8093:8093" # AI Compliance SDK
|
||||
- "8097:8097" # RAG Service (NEU)
|
||||
#- "8098:8098" # Control Pipeline (intern only, kein Nginx-Port noetig)
|
||||
- "8443:8443" # Jitsi Meet
|
||||
- "3008:3008" # Admin Core
|
||||
- "3010:3010" # Portal Dashboard
|
||||
- "8011:8011" # Compliance Docs (MkDocs)
|
||||
- "3012:3012" # Pitch Deck
|
||||
volumes:
|
||||
- ./nginx/conf.d:/etc/nginx/conf.d:ro
|
||||
- vault_certs:/etc/nginx/certs:ro
|
||||
@@ -376,6 +378,50 @@ services:
|
||||
networks:
|
||||
- breakpilot-network
|
||||
|
||||
# =========================================================
|
||||
# CONTROL PIPELINE (Entwickler-only, nicht kundenrelevant)
|
||||
# =========================================================
|
||||
control-pipeline:
|
||||
build:
|
||||
context: ./control-pipeline
|
||||
dockerfile: Dockerfile
|
||||
container_name: bp-core-control-pipeline
|
||||
platform: linux/arm64
|
||||
expose:
|
||||
- "8098"
|
||||
environment:
|
||||
PORT: 8098
|
||||
DATABASE_URL: postgresql://${POSTGRES_USER:-breakpilot}:${POSTGRES_PASSWORD:-breakpilot123}@postgres:5432/${POSTGRES_DB:-breakpilot_db}
|
||||
SCHEMA_SEARCH_PATH: compliance,core,public
|
||||
QDRANT_URL: http://qdrant:6333
|
||||
EMBEDDING_SERVICE_URL: http://embedding-service:8087
|
||||
ANTHROPIC_API_KEY: ${ANTHROPIC_API_KEY:-}
|
||||
CONTROL_GEN_ANTHROPIC_MODEL: ${CONTROL_GEN_ANTHROPIC_MODEL:-claude-sonnet-4-6}
|
||||
DECOMPOSITION_LLM_MODEL: ${DECOMPOSITION_LLM_MODEL:-claude-haiku-4-5-20251001}
|
||||
OLLAMA_URL: ${OLLAMA_URL:-http://host.docker.internal:11434}
|
||||
CONTROL_GEN_OLLAMA_MODEL: ${CONTROL_GEN_OLLAMA_MODEL:-qwen3.5:35b-a3b}
|
||||
SDK_URL: http://ai-compliance-sdk:8090
|
||||
JWT_SECRET: ${JWT_SECRET:-your-super-secret-jwt-key-change-in-production}
|
||||
ENVIRONMENT: ${ENVIRONMENT:-development}
|
||||
extra_hosts:
|
||||
- "host.docker.internal:host-gateway"
|
||||
depends_on:
|
||||
postgres:
|
||||
condition: service_healthy
|
||||
qdrant:
|
||||
condition: service_healthy
|
||||
embedding-service:
|
||||
condition: service_healthy
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://127.0.0.1:8098/health"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 10s
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- breakpilot-network
|
||||
|
||||
embedding-service:
|
||||
build:
|
||||
context: ./embedding-service
|
||||
@@ -828,13 +874,17 @@ services:
|
||||
dockerfile: Dockerfile
|
||||
container_name: bp-core-pitch-deck
|
||||
platform: linux/arm64
|
||||
ports:
|
||||
- "3012:3000"
|
||||
expose:
|
||||
- "3000"
|
||||
environment:
|
||||
NODE_ENV: production
|
||||
DATABASE_URL: postgres://${POSTGRES_USER:-breakpilot}:${POSTGRES_PASSWORD:-breakpilot123}@postgres:5432/${POSTGRES_DB:-breakpilot_db}
|
||||
OLLAMA_URL: ${OLLAMA_URL:-http://host.docker.internal:11434}
|
||||
OLLAMA_MODEL: ${OLLAMA_MODEL:-qwen3.5:35b-a3b}
|
||||
PITCH_JWT_SECRET: ${PITCH_JWT_SECRET:-7025f5da6d2ea384353ea6debddae0ea9e2dbca151a1df4b65be8cb80a5cf002}
|
||||
PITCH_ADMIN_SECRET: ${PITCH_ADMIN_SECRET:-40df9e6f2ca2e90729030af37bf79199710b09c898cac9df}
|
||||
LITELLM_URL: ${LITELLM_URL:-https://llm-dev.meghsakha.com}
|
||||
LITELLM_MODEL: ${LITELLM_MODEL:-gpt-oss-120b}
|
||||
LITELLM_API_KEY: ${LITELLM_API_KEY:-sk-0nAyxaMVbIqmz_ntnndzag}
|
||||
TTS_SERVICE_URL: http://bp-compliance-tts:8095
|
||||
extra_hosts:
|
||||
- "host.docker.internal:host-gateway"
|
||||
depends_on:
|
||||
|
||||
@@ -1,194 +1,77 @@
|
||||
# Umgebungs-Architektur
|
||||
|
||||
## Übersicht
|
||||
## Uebersicht
|
||||
|
||||
BreakPilot verwendet eine 3-Umgebungs-Strategie für sichere Entwicklung und Deployment:
|
||||
BreakPilot verwendet zwei Umgebungen:
|
||||
|
||||
```
|
||||
┌─────────────────┐ ┌─────────────────┐ ┌─────────────────┐
|
||||
│ Development │────▶│ Staging │────▶│ Production │
|
||||
│ (develop) │ │ (staging) │ │ (main) │
|
||||
└─────────────────┘ └─────────────────┘ └─────────────────┘
|
||||
Tägliche Getesteter Code Produktionsreif
|
||||
Entwicklung
|
||||
┌─────────────────┐ ┌─────────────────┐
|
||||
│ Development │───── git push ────▶│ Production │
|
||||
│ (Mac Mini) │ │ (Orca) │
|
||||
└─────────────────┘ └─────────────────┘
|
||||
Lokale Automatisch
|
||||
Entwicklung via Orca
|
||||
```
|
||||
|
||||
## Umgebungen
|
||||
|
||||
### Development (Dev)
|
||||
### Development (Lokal — Mac Mini)
|
||||
|
||||
**Zweck:** Tägliche Entwicklungsarbeit
|
||||
**Zweck:** Lokale Entwicklung und Tests
|
||||
|
||||
| Eigenschaft | Wert |
|
||||
|-------------|------|
|
||||
| Git Branch | `develop` |
|
||||
| Compose File | `docker-compose.yml` + `docker-compose.override.yml` (auto) |
|
||||
| Env File | `.env.dev` |
|
||||
| Database | `breakpilot_dev` |
|
||||
| Git Branch | `main` |
|
||||
| Compose File | `docker-compose.yml` |
|
||||
| Database | Lokale PostgreSQL |
|
||||
| Debug | Aktiviert |
|
||||
| Hot-Reload | Aktiviert |
|
||||
|
||||
**Start:**
|
||||
```bash
|
||||
./scripts/start.sh dev
|
||||
# oder einfach:
|
||||
docker compose up -d
|
||||
ssh macmini "cd ~/Projekte/breakpilot-core && /usr/local/bin/docker compose up -d"
|
||||
```
|
||||
|
||||
### Staging
|
||||
### Production (Orca)
|
||||
|
||||
**Zweck:** Getesteter, freigegebener Code vor Produktion
|
||||
|
||||
| Eigenschaft | Wert |
|
||||
|-------------|------|
|
||||
| Git Branch | `staging` |
|
||||
| Compose File | `docker-compose.yml` + `docker-compose.staging.yml` |
|
||||
| Env File | `.env.staging` |
|
||||
| Database | `breakpilot_staging` (separates Volume) |
|
||||
| Debug | Deaktiviert |
|
||||
| Hot-Reload | Deaktiviert |
|
||||
|
||||
**Start:**
|
||||
```bash
|
||||
./scripts/start.sh staging
|
||||
# oder:
|
||||
docker compose -f docker-compose.yml -f docker-compose.staging.yml up -d
|
||||
```
|
||||
|
||||
### Production (Prod)
|
||||
|
||||
**Zweck:** Live-System für Endbenutzer (ab Launch)
|
||||
**Zweck:** Live-System
|
||||
|
||||
| Eigenschaft | Wert |
|
||||
|-------------|------|
|
||||
| Git Branch | `main` |
|
||||
| Compose File | `docker-compose.yml` + `docker-compose.prod.yml` |
|
||||
| Env File | `.env.prod` (NICHT im Repository!) |
|
||||
| Database | `breakpilot_prod` (separates Volume) |
|
||||
| Deployment | Orca (automatisch bei Push auf gitea) |
|
||||
| Database | Externe PostgreSQL (TLS) |
|
||||
| Debug | Deaktiviert |
|
||||
| Vault | Pflicht (keine Env-Fallbacks) |
|
||||
|
||||
## Datenbank-Trennung
|
||||
|
||||
Jede Umgebung verwendet separate Docker Volumes für vollständige Datenisolierung:
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────┐
|
||||
│ PostgreSQL Volumes │
|
||||
├─────────────────────────────────────────────────────────────┤
|
||||
│ breakpilot-dev_postgres_data │ Development Database │
|
||||
│ breakpilot_staging_postgres │ Staging Database │
|
||||
│ breakpilot_prod_postgres │ Production Database │
|
||||
└─────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## Port-Mapping
|
||||
|
||||
Um mehrere Umgebungen gleichzeitig laufen zu lassen, verwenden sie unterschiedliche Ports:
|
||||
|
||||
| Service | Dev Port | Staging Port | Prod Port |
|
||||
|---------|----------|--------------|-----------|
|
||||
| Backend | 8000 | 8001 | 8000 |
|
||||
| PostgreSQL | 5432 | 5433 | - (intern) |
|
||||
| MinIO | 9000/9001 | 9002/9003 | - (intern) |
|
||||
| Qdrant | 6333/6334 | 6335/6336 | - (intern) |
|
||||
| Mailpit | 8025/1025 | 8026/1026 | - (deaktiviert) |
|
||||
|
||||
## Git Branching Strategie
|
||||
|
||||
```
|
||||
main (Prod) ← Nur Release-Merges, geschützt
|
||||
│
|
||||
▼
|
||||
staging ← Getesteter Code, Review erforderlich
|
||||
│
|
||||
▼
|
||||
develop (Dev) ← Tägliche Arbeit, Default-Branch
|
||||
│
|
||||
▼
|
||||
feature/* ← Feature-Branches (optional)
|
||||
```
|
||||
|
||||
### Workflow
|
||||
|
||||
1. **Entwicklung:** Arbeite auf `develop`
|
||||
2. **Code-Review:** Erstelle PR von Feature-Branch → `develop`
|
||||
3. **Staging:** Promote `develop` → `staging` mit Tests
|
||||
4. **Release:** Promote `staging` → `main` nach Freigabe
|
||||
|
||||
### Promotion-Befehle
|
||||
|
||||
**Deploy:**
|
||||
```bash
|
||||
# develop → staging
|
||||
./scripts/promote.sh dev-to-staging
|
||||
|
||||
# staging → main (Production)
|
||||
./scripts/promote.sh staging-to-prod
|
||||
git push origin main && git push gitea main
|
||||
# Orca baut und deployt automatisch
|
||||
```
|
||||
|
||||
## Secrets Management
|
||||
|
||||
### Development
|
||||
- `.env.dev` enthält Entwicklungs-Credentials
|
||||
- Vault optional (Dev-Token)
|
||||
- Mailpit für E-Mail-Tests
|
||||
|
||||
### Staging
|
||||
- `.env.staging` enthält Test-Credentials
|
||||
- Vault empfohlen
|
||||
- Mailpit für E-Mail-Sicherheit
|
||||
|
||||
### Production
|
||||
- `.env.prod` NICHT im Repository
|
||||
- Vault PFLICHT
|
||||
- Echte SMTP-Konfiguration
|
||||
|
||||
Siehe auch: [Secrets Management](./secrets-management.md)
|
||||
|
||||
## Docker Compose Architektur
|
||||
|
||||
```
|
||||
docker-compose.yml ← Basis-Konfiguration
|
||||
docker-compose.yml ← Basis-Konfiguration (lokal, arm64)
|
||||
│
|
||||
├── docker-compose.override.yml ← Dev (auto-geladen)
|
||||
│
|
||||
├── docker-compose.staging.yml ← Staging (explizit)
|
||||
│
|
||||
└── docker-compose.prod.yml ← Production (explizit)
|
||||
└── docker-compose.orca.yml ← Production Override (amd64)
|
||||
```
|
||||
|
||||
### Automatisches Laden
|
||||
Orca verwendet automatisch beide Compose-Files fuer den Production-Build.
|
||||
|
||||
Docker Compose lädt automatisch:
|
||||
1. `docker-compose.yml`
|
||||
2. `docker-compose.override.yml` (falls vorhanden)
|
||||
## Secrets Management
|
||||
|
||||
Daher startet `docker compose up` automatisch die Dev-Umgebung.
|
||||
### Development
|
||||
- `.env` enthält Entwicklungs-Credentials
|
||||
- Vault optional (Dev-Token)
|
||||
- Mailpit für E-Mail-Tests
|
||||
|
||||
## Helper Scripts
|
||||
### Production
|
||||
- `.env` auf dem Server (nicht im Repository)
|
||||
- Vault PFLICHT
|
||||
- Echte SMTP-Konfiguration
|
||||
|
||||
| Script | Beschreibung |
|
||||
|--------|--------------|
|
||||
| `scripts/env-switch.sh` | Wechselt zwischen Umgebungen |
|
||||
| `scripts/start.sh` | Startet Services für Umgebung |
|
||||
| `scripts/stop.sh` | Stoppt Services |
|
||||
| `scripts/promote.sh` | Promotet Code zwischen Branches |
|
||||
| `scripts/status.sh` | Zeigt aktuellen Status |
|
||||
|
||||
## Verifikation
|
||||
|
||||
Nach Setup prüfen:
|
||||
|
||||
```bash
|
||||
# Status anzeigen
|
||||
./scripts/status.sh
|
||||
|
||||
# Branches prüfen
|
||||
git branch -v
|
||||
|
||||
# Volumes prüfen
|
||||
docker volume ls | grep breakpilot
|
||||
```
|
||||
Siehe auch: [Secrets Management](./secrets-management.md)
|
||||
|
||||
## Verwandte Dokumentation
|
||||
|
||||
|
||||
@@ -1,15 +1,14 @@
|
||||
# CI/CD Pipeline
|
||||
|
||||
Übersicht über den Deployment-Prozess für Breakpilot.
|
||||
Uebersicht ueber den Deployment-Prozess fuer BreakPilot.
|
||||
|
||||
## Übersicht
|
||||
## Uebersicht
|
||||
|
||||
| Komponente | Build-Tool | Deployment |
|
||||
|------------|------------|------------|
|
||||
| Frontend (Next.js) | Docker | Mac Mini |
|
||||
| Backend (FastAPI) | Docker | Mac Mini |
|
||||
| Go Services | Docker (Multi-stage) | Mac Mini |
|
||||
| Documentation | MkDocs | Docker (Nginx) |
|
||||
| Repo | Deployment | Trigger | Compose File |
|
||||
|------|-----------|---------|--------------|
|
||||
| **breakpilot-core** | Orca (automatisch) | Push auf `orca` Branch | `docker-compose.orca.yml` |
|
||||
| **breakpilot-compliance** | Orca (automatisch) | Push auf `main` Branch | `docker-compose.yml` + `docker-compose.orca.yml` |
|
||||
| **breakpilot-lehrer** | Mac Mini (lokal) | Manuell `docker compose` | `docker-compose.yml` |
|
||||
|
||||
## Deployment-Architektur
|
||||
|
||||
@@ -17,287 +16,146 @@
|
||||
┌─────────────────────────────────────────────────────────────────┐
|
||||
│ Entwickler-MacBook │
|
||||
│ │
|
||||
│ breakpilot-core/ │
|
||||
│ ├── admin-core/ (Next.js Admin, Port 3008) │
|
||||
│ ├── backend-core/ (Python FastAPI, Port 8000) │
|
||||
│ ├── consent-service/ (Go Service, Port 8081) │
|
||||
│ ├── billing-service/ (Go Service, Port 8083) │
|
||||
│ └── docs-src/ (MkDocs) │
|
||||
│ breakpilot-core/ → git push gitea orca │
|
||||
│ breakpilot-compliance/ → git push gitea main │
|
||||
│ breakpilot-lehrer/ → git push + ssh macmini docker ... │
|
||||
│ │
|
||||
│ git push → Gitea Actions (automatisch) │
|
||||
│ oder manuell: git push && ssh macmini docker compose build │
|
||||
└───────────────────────────────┬─────────────────────────────────┘
|
||||
│
|
||||
│ git push origin main
|
||||
│
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────────────────┐
|
||||
│ Mac Mini Server (bp-core-*) │
|
||||
│ │
|
||||
│ Docker Compose │
|
||||
│ ├── admin-core (Port 3008) │
|
||||
│ ├── backend-core (Port 8000) │
|
||||
│ ├── consent-service (Port 8081) │
|
||||
│ ├── billing-service (Port 8083) │
|
||||
│ ├── gitea (Port 3003) + gitea-runner (Gitea Actions) │
|
||||
│ ├── docs (Port 8011) │
|
||||
│ ├── postgres, valkey, qdrant, minio │
|
||||
│ └── vault, nginx, night-scheduler, health │
|
||||
│ │
|
||||
└─────────────────────────────────────────────────────────────────┘
|
||||
┌───────────┴───────────┐
|
||||
│ │
|
||||
▼ ▼
|
||||
┌───────────────────────────┐ ┌───────────────────────────┐
|
||||
│ Orca (Production) │ │ Mac Mini (Lokal/Dev) │
|
||||
│ │ │ │
|
||||
│ Gitea Actions │ │ breakpilot-lehrer │
|
||||
│ ├── Tests │ │ ├── studio-v2 │
|
||||
│ └── Orca API Deploy │ │ ├── klausur-service │
|
||||
│ │ │ ├── backend-lehrer │
|
||||
│ Core Services: │ │ └── voice-service │
|
||||
│ ├── consent-service │ │ │
|
||||
│ ├── rag-service │ │ Core Services (lokal): │
|
||||
│ ├── embedding-service │ │ ├── postgres │
|
||||
│ ├── paddleocr-service │ │ ├── valkey, vault │
|
||||
│ └── health-aggregator │ │ ├── nginx, gitea │
|
||||
│ │ │ └── ... │
|
||||
│ Compliance Services: │ │ │
|
||||
│ ├── admin-compliance │ │ │
|
||||
│ ├── backend-compliance │ │ │
|
||||
│ ├── ai-compliance-sdk │ │ │
|
||||
│ └── developer-portal │ │ │
|
||||
└───────────────────────────┘ └───────────────────────────┘
|
||||
```
|
||||
|
||||
## Sync & Deploy Workflow
|
||||
## breakpilot-core → Orca
|
||||
|
||||
### 1. Dateien synchronisieren
|
||||
|
||||
```bash
|
||||
# Sync aller relevanten Verzeichnisse zum Mac Mini
|
||||
rsync -avz --delete \
|
||||
--exclude 'node_modules' \
|
||||
--exclude '.next' \
|
||||
--exclude '.git' \
|
||||
--exclude '__pycache__' \
|
||||
--exclude 'venv' \
|
||||
--exclude '.pytest_cache' \
|
||||
/Users/benjaminadmin/Projekte/breakpilot-core/ \
|
||||
macmini:/Users/benjaminadmin/Projekte/breakpilot-core/
|
||||
```
|
||||
|
||||
### 2. Container bauen
|
||||
|
||||
```bash
|
||||
# Einzelnen Service bauen
|
||||
ssh macmini "/usr/local/bin/docker compose \
|
||||
-f /Users/benjaminadmin/Projekte/breakpilot-core/docker-compose.yml \
|
||||
build --no-cache <service-name>"
|
||||
|
||||
# Beispiele:
|
||||
# studio-v2, admin-v2, website, backend, klausur-service, docs
|
||||
```
|
||||
|
||||
### 3. Container deployen
|
||||
|
||||
```bash
|
||||
# Container neu starten
|
||||
ssh macmini "/usr/local/bin/docker compose \
|
||||
-f /Users/benjaminadmin/Projekte/breakpilot-core/docker-compose.yml \
|
||||
up -d <service-name>"
|
||||
```
|
||||
|
||||
### 4. Logs prüfen
|
||||
|
||||
```bash
|
||||
# Container-Logs anzeigen
|
||||
ssh macmini "/usr/local/bin/docker compose \
|
||||
-f /Users/benjaminadmin/Projekte/breakpilot-core/docker-compose.yml \
|
||||
logs -f <service-name>"
|
||||
```
|
||||
|
||||
## Service-spezifische Deployments
|
||||
|
||||
### Next.js Frontend (studio-v2, admin-v2, website)
|
||||
|
||||
```bash
|
||||
# 1. Sync
|
||||
rsync -avz --delete \
|
||||
--exclude 'node_modules' --exclude '.next' --exclude '.git' \
|
||||
/Users/benjaminadmin/Projekte/breakpilot-core/studio-v2/ \
|
||||
macmini:/Users/benjaminadmin/Projekte/breakpilot-core/studio-v2/
|
||||
|
||||
# 2. Build & Deploy
|
||||
ssh macmini "/usr/local/bin/docker compose \
|
||||
-f /Users/benjaminadmin/Projekte/breakpilot-core/docker-compose.yml \
|
||||
build --no-cache studio-v2 && \
|
||||
/usr/local/bin/docker compose \
|
||||
-f /Users/benjaminadmin/Projekte/breakpilot-core/docker-compose.yml \
|
||||
up -d studio-v2"
|
||||
```
|
||||
|
||||
### Python Services (backend, klausur-service, voice-service)
|
||||
|
||||
```bash
|
||||
# Build mit requirements.txt
|
||||
ssh macmini "/usr/local/bin/docker compose \
|
||||
-f /Users/benjaminadmin/Projekte/breakpilot-core/docker-compose.yml \
|
||||
build klausur-service && \
|
||||
/usr/local/bin/docker compose \
|
||||
-f /Users/benjaminadmin/Projekte/breakpilot-core/docker-compose.yml \
|
||||
up -d klausur-service"
|
||||
```
|
||||
|
||||
### Go Services (consent-service, ai-compliance-sdk)
|
||||
|
||||
```bash
|
||||
# Multi-stage Build (Go → Alpine)
|
||||
ssh macmini "/usr/local/bin/docker compose \
|
||||
-f /Users/benjaminadmin/Projekte/breakpilot-core/docker-compose.yml \
|
||||
build --no-cache consent-service && \
|
||||
/usr/local/bin/docker compose \
|
||||
-f /Users/benjaminadmin/Projekte/breakpilot-core/docker-compose.yml \
|
||||
up -d consent-service"
|
||||
```
|
||||
|
||||
### MkDocs Dokumentation
|
||||
|
||||
```bash
|
||||
# Build & Deploy
|
||||
ssh macmini "/usr/local/bin/docker compose \
|
||||
-f /Users/benjaminadmin/Projekte/breakpilot-core/docker-compose.yml \
|
||||
build --no-cache docs && \
|
||||
/usr/local/bin/docker compose \
|
||||
-f /Users/benjaminadmin/Projekte/breakpilot-core/docker-compose.yml \
|
||||
up -d docs"
|
||||
|
||||
# Verfügbar unter: http://macmini:8009
|
||||
```
|
||||
|
||||
## Health Checks
|
||||
|
||||
### Service-Status prüfen
|
||||
|
||||
```bash
|
||||
# Alle Container-Status
|
||||
ssh macmini "docker ps --format 'table {{.Names}}\t{{.Status}}\t{{.Ports}}'"
|
||||
|
||||
# Health-Endpoints prüfen
|
||||
curl -s http://macmini:8000/health
|
||||
curl -s http://macmini:8081/health
|
||||
curl -s http://macmini:8086/health
|
||||
curl -s http://macmini:8090/health
|
||||
```
|
||||
|
||||
### Logs analysieren
|
||||
|
||||
```bash
|
||||
# Letzte 100 Zeilen
|
||||
ssh macmini "docker logs --tail 100 breakpilot-core-backend-1"
|
||||
|
||||
# Live-Logs folgen
|
||||
ssh macmini "docker logs -f breakpilot-core-backend-1"
|
||||
```
|
||||
|
||||
## Rollback
|
||||
|
||||
### Container auf vorherige Version zurücksetzen
|
||||
|
||||
```bash
|
||||
# 1. Aktuelles Image taggen
|
||||
ssh macmini "docker tag breakpilot-core-backend:latest breakpilot-core-backend:backup"
|
||||
|
||||
# 2. Altes Image deployen
|
||||
ssh macmini "/usr/local/bin/docker compose \
|
||||
-f /Users/benjaminadmin/Projekte/breakpilot-core/docker-compose.yml \
|
||||
up -d backend"
|
||||
|
||||
# 3. Bei Problemen: Backup wiederherstellen
|
||||
ssh macmini "docker tag breakpilot-core-backend:backup breakpilot-core-backend:latest"
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Container startet nicht
|
||||
|
||||
```bash
|
||||
# 1. Logs prüfen
|
||||
ssh macmini "docker logs breakpilot-core-<service>-1"
|
||||
|
||||
# 2. Container manuell starten für Debug-Output
|
||||
ssh macmini "docker compose -f .../docker-compose.yml run --rm <service>"
|
||||
|
||||
# 3. In Container einloggen
|
||||
ssh macmini "docker exec -it breakpilot-core-<service>-1 /bin/sh"
|
||||
```
|
||||
|
||||
### Port bereits belegt
|
||||
|
||||
```bash
|
||||
# Port-Belegung prüfen
|
||||
ssh macmini "lsof -i :8000"
|
||||
|
||||
# Container mit dem Port finden
|
||||
ssh macmini "docker ps --filter publish=8000"
|
||||
```
|
||||
|
||||
### Build-Fehler
|
||||
|
||||
```bash
|
||||
# Cache komplett leeren
|
||||
ssh macmini "docker builder prune -a"
|
||||
|
||||
# Ohne Cache bauen
|
||||
ssh macmini "docker compose build --no-cache <service>"
|
||||
```
|
||||
|
||||
## Monitoring
|
||||
|
||||
### Resource-Nutzung
|
||||
|
||||
```bash
|
||||
# CPU/Memory aller Container
|
||||
ssh macmini "docker stats --no-stream"
|
||||
|
||||
# Disk-Nutzung
|
||||
ssh macmini "docker system df"
|
||||
```
|
||||
|
||||
### Cleanup
|
||||
|
||||
```bash
|
||||
# Ungenutzte Images/Container entfernen
|
||||
ssh macmini "docker system prune -a --volumes"
|
||||
|
||||
# Nur dangling Images
|
||||
ssh macmini "docker image prune"
|
||||
```
|
||||
|
||||
## Umgebungsvariablen
|
||||
|
||||
Umgebungsvariablen werden über `.env` Dateien und docker-compose.yml verwaltet:
|
||||
### Pipeline
|
||||
|
||||
```yaml
|
||||
# docker-compose.yml
|
||||
services:
|
||||
backend:
|
||||
environment:
|
||||
- DATABASE_URL=postgresql://...
|
||||
- REDIS_URL=redis://valkey:6379
|
||||
- SECRET_KEY=${SECRET_KEY}
|
||||
# .gitea/workflows/deploy-orca.yml
|
||||
on:
|
||||
push:
|
||||
branches: [orca]
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Deploy via Orca API
|
||||
# Triggert Orca Build + Deploy ueber API
|
||||
# Secrets: ORCA_API_TOKEN, ORCA_RESOURCE_UUID, ORCA_BASE_URL
|
||||
```
|
||||
|
||||
**Wichtig**: Sensible Werte niemals in Git committen. Stattdessen:
|
||||
- `.env` Datei auf dem Server pflegen
|
||||
- Secrets über HashiCorp Vault (siehe unten)
|
||||
### Workflow
|
||||
|
||||
```bash
|
||||
# 1. Code auf MacBook bearbeiten
|
||||
# 2. Committen und pushen:
|
||||
git push origin main && git push gitea main
|
||||
|
||||
# 3. Fuer Production-Deploy:
|
||||
git push gitea orca
|
||||
|
||||
# 4. Status pruefen:
|
||||
# https://gitea.meghsakha.com/Benjamin_Boenisch/breakpilot-core/actions
|
||||
```
|
||||
|
||||
### Orca-deployed Services
|
||||
|
||||
| Service | Container | Beschreibung |
|
||||
|---------|-----------|--------------|
|
||||
| valkey | bp-core-valkey | Session-Cache |
|
||||
| consent-service | bp-core-consent-service | Consent-Management (Go) |
|
||||
| rag-service | bp-core-rag-service | Semantische Suche |
|
||||
| embedding-service | bp-core-embedding-service | Text-Embeddings |
|
||||
| paddleocr-service | bp-core-paddleocr | OCR Engine (x86_64) |
|
||||
| health-aggregator | bp-core-health | Health-Check Aggregator |
|
||||
|
||||
## breakpilot-compliance → Orca
|
||||
|
||||
### Pipeline
|
||||
|
||||
```yaml
|
||||
# .gitea/workflows/ci.yaml
|
||||
on:
|
||||
push:
|
||||
branches: [main, develop]
|
||||
|
||||
jobs:
|
||||
# Lint (nur PRs)
|
||||
# Tests (Go, Python, Node.js)
|
||||
# Validate Canonical Controls
|
||||
# Deploy (nur main, nach allen Tests)
|
||||
```
|
||||
|
||||
### Workflow
|
||||
|
||||
```bash
|
||||
# Committen und pushen → Orca deployt automatisch:
|
||||
git push origin main && git push gitea main
|
||||
|
||||
# CI-Status pruefen:
|
||||
# https://gitea.meghsakha.com/Benjamin_Boenisch/breakpilot-compliance/actions
|
||||
|
||||
# Health Checks:
|
||||
curl -sf https://api-dev.breakpilot.ai/health
|
||||
curl -sf https://sdk-dev.breakpilot.ai/health
|
||||
```
|
||||
|
||||
## breakpilot-lehrer → Mac Mini (lokal)
|
||||
|
||||
### Workflow
|
||||
|
||||
```bash
|
||||
# 1. Code auf MacBook bearbeiten
|
||||
# 2. Committen und pushen:
|
||||
git push origin main && git push gitea main
|
||||
|
||||
# 3. Auf Mac Mini pullen und Container neu bauen:
|
||||
ssh macmini "git -C /Users/benjaminadmin/Projekte/breakpilot-lehrer pull --no-rebase origin main"
|
||||
ssh macmini "/usr/local/bin/docker compose -f /Users/benjaminadmin/Projekte/breakpilot-lehrer/docker-compose.yml build --no-cache <service>"
|
||||
ssh macmini "/usr/local/bin/docker compose -f /Users/benjaminadmin/Projekte/breakpilot-lehrer/docker-compose.yml up -d <service>"
|
||||
```
|
||||
|
||||
## Gitea Actions
|
||||
|
||||
### Überblick
|
||||
### Ueberblick
|
||||
|
||||
BreakPilot Core nutzt **Gitea Actions** (GitHub Actions-kompatibel) als CI/CD-System. Der `act_runner` läuft als Container auf dem Mac Mini und führt Pipelines direkt bei Code-Push aus.
|
||||
BreakPilot nutzt **Gitea Actions** (GitHub Actions-kompatibel) als CI/CD-System. Der `act_runner` laeuft als Container auf dem Mac Mini und fuehrt Pipelines aus.
|
||||
|
||||
| Komponente | Container | Beschreibung |
|
||||
|------------|-----------|--------------|
|
||||
| Gitea | `bp-core-gitea` (Port 3003) | Git-Server + Actions-Trigger |
|
||||
| Gitea Runner | `bp-core-gitea-runner` | Führt Actions-Workflows aus |
|
||||
| Gitea Runner | `bp-core-gitea-runner` | Fuehrt Actions-Workflows aus |
|
||||
|
||||
### Pipeline-Konfiguration
|
||||
|
||||
Workflows liegen im Repo unter `.gitea/workflows/`:
|
||||
Workflows liegen in jedem Repo unter `.gitea/workflows/`:
|
||||
|
||||
```yaml
|
||||
# .gitea/workflows/main.yml
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Build & Test
|
||||
run: docker compose build
|
||||
```
|
||||
| Repo | Workflow | Branch | Aktion |
|
||||
|------|----------|--------|--------|
|
||||
| breakpilot-core | `deploy-orca.yml` | `orca` | Orca API Deploy |
|
||||
| breakpilot-compliance | `ci.yaml` | `main` | Tests + Orca Deploy |
|
||||
|
||||
### Runner-Token erneuern
|
||||
|
||||
@@ -314,12 +172,79 @@ ssh macmini "/usr/local/bin/docker compose \
|
||||
up -d --force-recreate gitea-runner"
|
||||
```
|
||||
|
||||
### Pipeline-Status prüfen
|
||||
### Pipeline-Status pruefen
|
||||
|
||||
```bash
|
||||
# Runner-Logs
|
||||
ssh macmini "/usr/local/bin/docker logs -f bp-core-gitea-runner"
|
||||
|
||||
# Laufende Jobs
|
||||
ssh macmini "/usr/local/bin/docker exec bp-core-gitea-runner act_runner list"
|
||||
```
|
||||
|
||||
## Health Checks
|
||||
|
||||
### Production (Orca)
|
||||
|
||||
```bash
|
||||
# Core PaddleOCR
|
||||
curl -sf https://ocr.breakpilot.com/health
|
||||
|
||||
# Compliance
|
||||
curl -sf https://api-dev.breakpilot.ai/health
|
||||
curl -sf https://sdk-dev.breakpilot.ai/health
|
||||
```
|
||||
|
||||
### Lokal (Mac Mini)
|
||||
|
||||
```bash
|
||||
# Core Health Aggregator
|
||||
curl -sf http://macmini:8099/health
|
||||
|
||||
# Lehrer Backend
|
||||
curl -sf https://macmini:8001/health
|
||||
|
||||
# Klausur-Service
|
||||
curl -sf https://macmini:8086/health
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Container startet nicht
|
||||
|
||||
```bash
|
||||
# Logs pruefen (lokal)
|
||||
ssh macmini "/usr/local/bin/docker logs bp-core-<service>"
|
||||
|
||||
# In Container einloggen
|
||||
ssh macmini "/usr/local/bin/docker exec -it bp-core-<service> /bin/sh"
|
||||
```
|
||||
|
||||
### Build-Fehler
|
||||
|
||||
```bash
|
||||
# Cache komplett leeren
|
||||
ssh macmini "docker builder prune -a"
|
||||
|
||||
# Ohne Cache bauen
|
||||
ssh macmini "docker compose build --no-cache <service>"
|
||||
```
|
||||
|
||||
## Rollback
|
||||
|
||||
### Orca
|
||||
|
||||
Ein Redeploy mit einem aelteren Commit kann durch Zuruecksetzen des Branches ausgeloest werden:
|
||||
|
||||
```bash
|
||||
# Branch auf vorherigen Commit zuruecksetzen und pushen
|
||||
git reset --hard <previous-commit>
|
||||
git push gitea orca --force
|
||||
```
|
||||
|
||||
### Lokal (Mac Mini)
|
||||
|
||||
```bash
|
||||
# Image taggen als Backup
|
||||
ssh macmini "docker tag breakpilot-lehrer-klausur-service:latest breakpilot-lehrer-klausur-service:backup"
|
||||
|
||||
# Bei Problemen: Backup wiederherstellen
|
||||
ssh macmini "docker tag breakpilot-lehrer-klausur-service:backup breakpilot-lehrer-klausur-service:latest"
|
||||
```
|
||||
|
||||
@@ -12,6 +12,14 @@ BreakPilot besteht aus drei unabhaengigen Projekten:
|
||||
| **breakpilot-lehrer** | Bildungs-Stack (Team A) | `bp-lehrer-*` | Blau |
|
||||
| **breakpilot-compliance** | DSGVO/Compliance-Stack (Team B) | `bp-compliance-*` | Lila |
|
||||
|
||||
### Deployment-Modell
|
||||
|
||||
| Repo | Deployment | Trigger |
|
||||
|------|-----------|---------|
|
||||
| **breakpilot-core** | Orca (automatisch) | Push auf gitea main |
|
||||
| **breakpilot-compliance** | Orca (automatisch) | Push auf gitea main |
|
||||
| **breakpilot-lehrer** | Mac Mini (lokal) | Manuell docker compose |
|
||||
|
||||
## Core Services
|
||||
|
||||
| Service | Container | Port | Beschreibung |
|
||||
@@ -30,32 +38,11 @@ BreakPilot besteht aus drei unabhaengigen Projekten:
|
||||
| Admin Core | bp-core-admin | 3008 | Admin-Dashboard (Next.js) |
|
||||
| Health Aggregator | bp-core-health | 8099 | Service-Health Monitoring |
|
||||
| Night Scheduler | bp-core-night-scheduler | 8096 | Nachtabschaltung |
|
||||
| Pitch Deck | bp-core-pitch-deck | 3012 | Investor-Praesentation |
|
||||
| Mailpit | bp-core-mailpit | 8025 | E-Mail (Entwicklung) |
|
||||
| Gitea | bp-core-gitea | 3003 | Git-Server |
|
||||
| Gitea Runner | bp-core-gitea-runner | - | CI/CD (Gitea Actions) |
|
||||
| Jitsi (5 Container) | bp-core-jitsi-* | 8443 | Videokonferenzen |
|
||||
|
||||
## Nginx Routing-Tabelle
|
||||
|
||||
| Port | Upstream | Projekt |
|
||||
|------|----------|---------|
|
||||
| 443 | bp-lehrer-studio-v2:3001 | Lehrer |
|
||||
| 3000 | bp-lehrer-website:3000 | Lehrer |
|
||||
| 3002 | bp-lehrer-admin:3000 | Lehrer |
|
||||
| 3006 | bp-compliance-developer-portal:3000 | Compliance |
|
||||
| 3007 | bp-compliance-admin:3000 | Compliance |
|
||||
| 3008 | bp-core-admin:3000 | Core |
|
||||
| 8000 | bp-core-backend:8000 | Core |
|
||||
| 8001 | bp-lehrer-backend:8001 | Lehrer |
|
||||
| 8002 | bp-compliance-backend:8002 | Compliance |
|
||||
| 8086 | bp-lehrer-klausur-service:8086 | Lehrer |
|
||||
| 8087 | bp-core-embedding-service:8087 | Core |
|
||||
| 8091 | bp-lehrer-voice-service:8091 | Lehrer |
|
||||
| 8093 | bp-compliance-ai-sdk:8090 | Compliance |
|
||||
| 8097 | bp-core-rag-service:8097 | Core |
|
||||
| 8443 | bp-core-jitsi-web:80 | Core |
|
||||
|
||||
## Architektur
|
||||
|
||||
- [System-Architektur](architecture/system-architecture.md)
|
||||
|
||||
84
document-templates/README.md
Normal file
84
document-templates/README.md
Normal file
@@ -0,0 +1,84 @@
|
||||
# Document Templates V2
|
||||
|
||||
Erweiterte Compliance-Vorlagen (DSFA, TOM, VVT, AVV, BV, FRIA) fuer den BreakPilot Document Generator.
|
||||
|
||||
**Branch:** `feature/betriebsrat-compliance-module`
|
||||
**Ziel-Integration:** breakpilot-compliance (nach Abschluss des Refactoring)
|
||||
**Datenbank:** `compliance.compliance_legal_templates` (shared PostgreSQL)
|
||||
|
||||
## Inhalt
|
||||
|
||||
### SQL Migrations (`migrations/`)
|
||||
|
||||
| Datei | Typ | Beschreibung |
|
||||
|-------|-----|--------------|
|
||||
| `001_dsfa_template_v2.sql` | DSFA | Schwellwertanalyse (WP248), SDM-TOM, KI-Modul, ~60 Placeholders |
|
||||
| `002_tom_sdm_template.sql` | TOM | 7 SDM-Gewaehrleistungsziele, Sektorbloecke, Compliance-Bewertung |
|
||||
| `003_vvt_sector_templates.sql` | VVT | 6 Branchen-Muster (IT/SaaS, Gesundheit, Handel, Handwerk, Bildung, Beratung) |
|
||||
| `004_avv_template.sql` | AVV | Auftragsverarbeitungsvertrag Art. 28, 12 Sections, TOM-Anlage |
|
||||
| `005_additional_templates.sql` | Div. | Verpflichtungserklaerung + Art. 13/14 Informationspflichten |
|
||||
| `006_betriebsvereinbarung_template.sql` | BV | Betriebsvereinbarung §87 BetrVG, 13 Sektionen (A-M), KI/IT-Systeme |
|
||||
| `007_fria_template.sql` | FRIA | Grundrechte-Folgenabschaetzung Art. 27 AI Act, 8 Sektionen |
|
||||
|
||||
### Python Generators (`generators/`)
|
||||
|
||||
| Datei | Beschreibung |
|
||||
|-------|--------------|
|
||||
| `dsfa_template.py` | DSFA-Generator mit Schwellwertanalyse, Bundesland-Mapping, SDM-TOM, Art. 36, Domain-Risiken (HR/Edu/HC/Finance) |
|
||||
| `tom_template.py` | TOM-Generator mit SDM-Struktur, NIS2/ISO27001/AI Act Erweiterungen, Sektoren |
|
||||
| `vvt_template.py` | VVT-Generator mit 6 Branchen-Katalogen, Art. 30 Validierung |
|
||||
| `betriebsvereinbarung_template.py` | BV-Generator mit TOM-Befuellung, Konflikt-Score-basierte Schutzklauseln |
|
||||
| `fria_template.py` | FRIA-Generator mit Domain→Grundrechte-Mapping (6 Domains), Risikomatrix |
|
||||
|
||||
### Scripts (`scripts/`)
|
||||
|
||||
| Datei | Beschreibung |
|
||||
|-------|--------------|
|
||||
| `cleanup_temp_vorlagen.py` | Loescht temporaere DPA-Vorlagen aus Qdrant (`temp_vorlagen=true`) |
|
||||
|
||||
## Integration in breakpilot-compliance
|
||||
|
||||
### 1. SQL Migrations ausfuehren
|
||||
|
||||
```bash
|
||||
# Migrations gegen die shared DB ausfuehren
|
||||
# Auf dem Mac Mini:
|
||||
ssh macmini "docker exec bp-core-postgres psql -U breakpilot -d breakpilot_db -f -" < migrations/001_dsfa_template_v2.sql
|
||||
ssh macmini "docker exec bp-core-postgres psql -U breakpilot -d breakpilot_db -f -" < migrations/002_tom_sdm_template.sql
|
||||
# ... usw.
|
||||
```
|
||||
|
||||
### 2. Python Generators kopieren (bei Compliance-Integration)
|
||||
|
||||
```bash
|
||||
cp generators/*.py /path/to/breakpilot-compliance/backend-compliance/compliance/api/document_templates/
|
||||
```
|
||||
|
||||
### 3. Neue document_types registrieren
|
||||
|
||||
In `breakpilot-compliance/backend-compliance/compliance/api/legal_template_routes.py`,
|
||||
`VALID_DOCUMENT_TYPES` erweitern um:
|
||||
- `verpflichtungserklaerung`
|
||||
- `informationspflichten`
|
||||
|
||||
### 4. Qdrant Cleanup ausfuehren
|
||||
|
||||
```bash
|
||||
# Vorschau
|
||||
ssh macmini "python3 /path/to/cleanup_temp_vorlagen.py --dry-run"
|
||||
|
||||
# Ausfuehren
|
||||
ssh macmini "python3 /path/to/cleanup_temp_vorlagen.py"
|
||||
```
|
||||
|
||||
## Template-Syntax
|
||||
|
||||
- `{{PLACEHOLDER}}` — Wird durch Kontext-Wert ersetzt
|
||||
- `{{#IF FELD}}...{{/IF}}` — Bedingter Block (wird nur angezeigt wenn Feld gesetzt)
|
||||
- `{{#IF_NOT FELD}}...{{/IF_NOT}}` — Invertierter bedingter Block
|
||||
- `[BLOCK:ID]...[/BLOCK:ID]` — Block der per Rule Engine entfernt werden kann
|
||||
|
||||
## Lizenz
|
||||
|
||||
Alle Templates: MIT License, BreakPilot Compliance.
|
||||
Keine Texte aus DPA-Dokumenten uebernommen — alle Formulierungen eigenstaendig.
|
||||
214
document-templates/generators/betriebsvereinbarung_template.py
Normal file
214
document-templates/generators/betriebsvereinbarung_template.py
Normal file
@@ -0,0 +1,214 @@
|
||||
"""Betriebsvereinbarung template generator — creates BV draft from UCCA assessment.
|
||||
|
||||
Generates a modular works council agreement (Betriebsvereinbarung) based on:
|
||||
- UCCA Assessment result (triggered rules, risk score, obligations)
|
||||
- Company profile (name, location, works council)
|
||||
- System details (name, type, modules)
|
||||
|
||||
Sections A-M follow the template in migration 006.
|
||||
"""
|
||||
|
||||
from typing import Optional
|
||||
|
||||
# -- Default verbotene Nutzungen nach BAG-Rechtsprechung --------------------
|
||||
|
||||
DEFAULT_VERBOTENE_NUTZUNGEN = [
|
||||
"Verdeckte Leistungs- oder Verhaltenskontrolle einzelner Beschaeftigter",
|
||||
"Erstellung individueller Persoenlichkeitsprofile oder Verhaltensanalysen",
|
||||
"Nutzung von Nutzungshistorien zu disziplinarischen Zwecken",
|
||||
"Automatisierte Personalentscheidungen ohne menschliche Ueberpruefung (Art. 22 DSGVO)",
|
||||
"Personenbezogene Rankings oder Leistungsvergleiche ohne gesonderte Mitbestimmung",
|
||||
"Korrelation von Systemnutzungsdaten mit Leistungsbeurteilungen",
|
||||
]
|
||||
|
||||
AI_VERBOTENE_NUTZUNGEN = [
|
||||
"Einsatz von KI-Funktionen zur biometrischen Echtzeit-Identifizierung am Arbeitsplatz",
|
||||
"KI-gestuetztes Social Scoring von Beschaeftigten",
|
||||
"Nutzung von KI-generierten Bewertungen als alleinige Grundlage fuer Personalentscheidungen",
|
||||
]
|
||||
|
||||
# -- Standard-TOM Massnahmen ------------------------------------------------
|
||||
|
||||
DEFAULT_TOM = [
|
||||
"Rollen- und Rechtekonzept mit Least-Privilege-Prinzip",
|
||||
"Verschluesselung der Daten bei Uebertragung (TLS 1.2+) und Speicherung (AES-256)",
|
||||
"Protokollierung aller administrativen Zugriffe",
|
||||
"Pseudonymisierung personenbezogener Daten, wo technisch moeglich",
|
||||
"Deaktivierung nicht benoetigter Telemetrie- und Diagnosefunktionen",
|
||||
"Getrennte Umgebungen fuer Test und Produktion",
|
||||
"Regelmaessige Sicherheitsupdates und Patch-Management",
|
||||
"Zugangsschutz durch Multi-Faktor-Authentifizierung fuer Administratoren",
|
||||
]
|
||||
|
||||
# -- Standard erlaubte Reports ----------------------------------------------
|
||||
|
||||
DEFAULT_ERLAUBTE_REPORTS = [
|
||||
"Systemgesundheit und Verfuegbarkeit (ohne Personenbezug)",
|
||||
"Lizenznutzung auf aggregierter Ebene (Abteilung/Standort, nicht Person)",
|
||||
"Sicherheitsereignisse und Anomalien",
|
||||
"Speicherplatznutzung (ohne Personenbezug)",
|
||||
"Fehlerstatistiken (technisch, nicht personenbezogen)",
|
||||
]
|
||||
|
||||
# -- Standard Datenarten bei IT/KI-Systemen ---------------------------------
|
||||
|
||||
DATENARTEN_MAP = {
|
||||
"email": "E-Mail-Metadaten (Absender, Empfaenger, Zeitstempel — NICHT Inhalte)",
|
||||
"chat": "Chat-/Messaging-Metadaten (Teilnehmer, Zeitstempel)",
|
||||
"document": "Dokumentenmetadaten (Ersteller, Aenderungsdatum, Dateiname)",
|
||||
"login": "Anmeldedaten (Benutzername, Zeitstempel, IP-Adresse)",
|
||||
"usage": "Nutzungsdaten (aufgerufene Funktionen, Nutzungsdauer — aggregiert)",
|
||||
"prompt": "KI-Eingaben und -Ausgaben (Prompts, Antworten)",
|
||||
"calendar": "Kalendereintraege (Betreff, Teilnehmer, Zeiten)",
|
||||
"hr": "Personalstammdaten (Name, Abteilung, Position, Eintrittsdatum)",
|
||||
"performance": "Leistungsdaten (Kennzahlen, Bewertungen, Zielvereinbarungen)",
|
||||
"video": "Videoaufnahmen (Arbeitsplatz, Zugangsbereiche)",
|
||||
"location": "Standortdaten (GPS, WLAN-basierte Ortung, Gebaeudezutritt)",
|
||||
}
|
||||
|
||||
|
||||
def generate_betriebsvereinbarung_draft(ctx: dict) -> dict:
|
||||
"""Generate a Betriebsvereinbarung draft from company + assessment context.
|
||||
|
||||
Args:
|
||||
ctx: Dict with keys:
|
||||
Required:
|
||||
- company_name: str
|
||||
- system_name: str
|
||||
- system_description: str
|
||||
Optional:
|
||||
- company_address: str
|
||||
- employer_representative: str
|
||||
- works_council_chair: str
|
||||
- system_vendor: str
|
||||
- locations: list[str]
|
||||
- departments: list[str]
|
||||
- modules: list[str]
|
||||
- purposes: list[str]
|
||||
- data_types: list[str] — keys from DATENARTEN_MAP
|
||||
- is_ai_system: bool
|
||||
- has_employee_monitoring: bool
|
||||
- has_hr_features: bool
|
||||
- has_video: bool
|
||||
- dpo_name: str
|
||||
- dpo_contact: str
|
||||
- audit_interval: str — e.g. "12 Monate"
|
||||
- duration: str — e.g. "unbefristet"
|
||||
- notice_period: str — e.g. "3 Monate"
|
||||
- retention_audit_logs: str — e.g. "90 Tage"
|
||||
- retention_usage_data: str — e.g. "30 Tage"
|
||||
- retention_prompts: str — e.g. "deaktiviert"
|
||||
- additional_forbidden: list[str]
|
||||
- additional_tom: list[str]
|
||||
- additional_reports: list[str]
|
||||
- betrvg_conflict_score: int — 0-100
|
||||
|
||||
Returns:
|
||||
Dict with placeholder values ready for template substitution.
|
||||
"""
|
||||
result = {}
|
||||
|
||||
# Basic info
|
||||
result["UNTERNEHMEN_NAME"] = ctx.get("company_name", "{{UNTERNEHMEN_NAME}}")
|
||||
result["UNTERNEHMEN_SITZ"] = ctx.get("company_address", "{{UNTERNEHMEN_SITZ}}")
|
||||
result["ARBEITGEBER_VERTRETER"] = ctx.get("employer_representative", "{{ARBEITGEBER_VERTRETER}}")
|
||||
result["BETRIEBSRAT_VORSITZ"] = ctx.get("works_council_chair", "{{BETRIEBSRAT_VORSITZ}}")
|
||||
result["SYSTEM_NAME"] = ctx.get("system_name", "{{SYSTEM_NAME}}")
|
||||
result["SYSTEM_BESCHREIBUNG"] = ctx.get("system_description", "{{SYSTEM_BESCHREIBUNG}}")
|
||||
result["SYSTEM_HERSTELLER"] = ctx.get("system_vendor", "")
|
||||
result["DSB_NAME"] = ctx.get("dpo_name", "{{DSB_NAME}}")
|
||||
result["DSB_KONTAKT"] = ctx.get("dpo_contact", "{{DSB_KONTAKT}}")
|
||||
|
||||
# B. Geltungsbereich
|
||||
locations = ctx.get("locations", [])
|
||||
result["GELTUNGSBEREICH_STANDORTE"] = _bullet_list(locations) if locations else "Alle Standorte der {{UNTERNEHMEN_NAME}}"
|
||||
|
||||
departments = ctx.get("departments", [])
|
||||
result["GELTUNGSBEREICH_BEREICHE"] = _bullet_list(departments) if departments else "Alle Beschaeftigten"
|
||||
|
||||
modules = ctx.get("modules", [])
|
||||
result["GELTUNGSBEREICH_MODULE"] = _bullet_list(modules) if modules else "Alle Module und Dienste von {{SYSTEM_NAME}}"
|
||||
|
||||
# C. Zweck
|
||||
purposes = ctx.get("purposes", [])
|
||||
result["ZWECK_BESCHREIBUNG"] = _bullet_list(purposes) if purposes else "{{ZWECK_BESCHREIBUNG}}"
|
||||
|
||||
# C.2 Verbotene Nutzungen
|
||||
forbidden = list(DEFAULT_VERBOTENE_NUTZUNGEN)
|
||||
if ctx.get("is_ai_system"):
|
||||
forbidden.extend(AI_VERBOTENE_NUTZUNGEN)
|
||||
forbidden.extend(ctx.get("additional_forbidden", []))
|
||||
result["VERBOTENE_NUTZUNGEN"] = _bullet_list(forbidden)
|
||||
|
||||
# D. Datenarten
|
||||
data_type_keys = ctx.get("data_types", [])
|
||||
datenarten = []
|
||||
for key in data_type_keys:
|
||||
if key in DATENARTEN_MAP:
|
||||
datenarten.append(DATENARTEN_MAP[key])
|
||||
else:
|
||||
datenarten.append(key)
|
||||
result["DATENARTEN_LISTE"] = _bullet_list(datenarten) if datenarten else "{{DATENARTEN_LISTE}}"
|
||||
|
||||
# E. Rollen
|
||||
result["ROLLEN_ADMIN"] = ctx.get("roles_admin", "IT-Administration: Systemkonfiguration, Benutzerverwaltung, Sicherheitsupdates")
|
||||
result["ROLLEN_FUEHRUNGSKRAFT"] = ctx.get("roles_manager", "Fuehrungskraefte: Nur aggregierte, nicht-personenbezogene Reports")
|
||||
result["ROLLEN_REPORTING"] = ctx.get("roles_reporting", "Controlling/Reporting: Nur freigegebene Standardreports (siehe Abschnitt G)")
|
||||
|
||||
# F. Transparenz
|
||||
result["TRANSPARENZ_INFO"] = ctx.get("transparency_info",
|
||||
"Die Information erfolgt schriftlich und in einer Informationsveranstaltung vor Einfuehrung des Systems.")
|
||||
|
||||
# G. Reports
|
||||
reports = list(DEFAULT_ERLAUBTE_REPORTS)
|
||||
reports.extend(ctx.get("additional_reports", []))
|
||||
result["ERLAUBTE_REPORTS"] = _bullet_list(reports)
|
||||
|
||||
# H. Speicherfristen
|
||||
result["SPEICHERFRIST_AUDIT_LOGS"] = ctx.get("retention_audit_logs", "90 Tage")
|
||||
result["SPEICHERFRIST_NUTZUNGSDATEN"] = ctx.get("retention_usage_data", "30 Tage")
|
||||
result["SPEICHERFRIST_CHAT_PROMPTS"] = ctx.get("retention_prompts", "deaktiviert")
|
||||
|
||||
# I. TOM
|
||||
tom = list(DEFAULT_TOM)
|
||||
tom.extend(ctx.get("additional_tom", []))
|
||||
# Intensivere Schutzmassnahmen bei hohem Konflikt-Score
|
||||
conflict_score = ctx.get("betrvg_conflict_score", 0)
|
||||
if conflict_score >= 50:
|
||||
tom.append("Automatische Anomalie-Erkennung bei ungewoehnlichen Admin-Zugriffen")
|
||||
tom.append("Quartalsweise Datenschutz-Audit durch externen Prueer")
|
||||
if conflict_score >= 75:
|
||||
tom.append("Betriebsrat erhaelt Leserechte auf Audit-Log-Dashboard")
|
||||
tom.append("Jede Sonderauswertung wird dem Betriebsrat innerhalb von 24h gemeldet")
|
||||
result["TOM_MASSNAHMEN"] = _bullet_list(tom)
|
||||
|
||||
# J. Change-Management
|
||||
result["CHANGE_MANAGEMENT_PROZESS"] = ctx.get("change_process",
|
||||
"Die Arbeitgeberin informiert den Betriebsrat schriftlich ueber geplante Aenderungen "
|
||||
"mindestens 14 Kalendertage vor Umsetzung. Bei sicherheitskritischen Updates kann die "
|
||||
"Frist auf 3 Werktage verkuerzt werden.")
|
||||
|
||||
# K. Audit
|
||||
result["AUDIT_INTERVALL"] = ctx.get("audit_interval", "12 Monate")
|
||||
|
||||
# L. Beschwerde
|
||||
result["BESCHWERDE_ANSPRECHPARTNER"] = ctx.get("complaint_contacts",
|
||||
"- Direkter Vorgesetzter\n- Betriebsrat ({{BETRIEBSRAT_VORSITZ}})\n"
|
||||
"- Datenschutzbeauftragter ({{DSB_NAME}}, {{DSB_KONTAKT}})")
|
||||
|
||||
# M. Schluss
|
||||
result["LAUFZEIT"] = ctx.get("duration", "unbefristet")
|
||||
result["KUENDIGUNGSFRIST"] = ctx.get("notice_period", "3 Monate")
|
||||
result["DATUM_UNTERZEICHNUNG"] = ctx.get("signing_date", "{{DATUM_UNTERZEICHNUNG}}")
|
||||
|
||||
# Conditional flags
|
||||
result["AI_SYSTEM"] = ctx.get("is_ai_system", False)
|
||||
result["VIDEO_UEBERWACHUNG"] = ctx.get("has_video", False)
|
||||
result["HR_SYSTEM"] = ctx.get("has_hr_features", False)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def _bullet_list(items: list) -> str:
|
||||
"""Format a list as markdown bullet points."""
|
||||
return "\n".join(f"- {item}" for item in items)
|
||||
485
document-templates/generators/dsfa_template.py
Normal file
485
document-templates/generators/dsfa_template.py
Normal file
@@ -0,0 +1,485 @@
|
||||
"""DSFA template generator V2 — creates DSFA skeleton from company profile.
|
||||
|
||||
Enhanced with:
|
||||
- Schwellwertanalyse (9 WP248 criteria)
|
||||
- Bundesland-specific Muss-Listen references
|
||||
- SDM-based TOM structure (7 Gewaehrleistungsziele)
|
||||
- Structured risk assessment (ISO 29134 methodology)
|
||||
- AI Act module (Section 8)
|
||||
- Art. 36 consultation assessment
|
||||
"""
|
||||
|
||||
from typing import Optional
|
||||
|
||||
# -- WP248 Kriterien --------------------------------------------------------
|
||||
|
||||
WP248_CRITERIA = [
|
||||
{"id": "K1", "label": "Bewertung oder Scoring (einschl. Profiling und Prognose)",
|
||||
"ctx_keys": ["has_profiling", "has_scoring"]},
|
||||
{"id": "K2", "label": "Automatisierte Entscheidungsfindung mit Rechtswirkung",
|
||||
"ctx_keys": ["has_automated_decisions"]},
|
||||
{"id": "K3", "label": "Systematische Ueberwachung von Personen",
|
||||
"ctx_keys": ["has_surveillance", "has_employee_monitoring", "has_video_surveillance"]},
|
||||
{"id": "K4", "label": "Verarbeitung sensibler Daten (Art. 9/10 DS-GVO)",
|
||||
"ctx_keys": ["processes_health_data", "processes_biometric_data", "processes_criminal_data"]},
|
||||
{"id": "K5", "label": "Datenverarbeitung in grossem Umfang",
|
||||
"ctx_keys": ["large_scale_processing"]},
|
||||
{"id": "K6", "label": "Verknuepfung oder Zusammenfuehrung von Datenbestaenden",
|
||||
"ctx_keys": ["data_matching", "data_combining"]},
|
||||
{"id": "K7", "label": "Daten zu schutzbeduerftigen Betroffenen",
|
||||
"ctx_keys": ["processes_minors_data", "processes_employee_data", "processes_patient_data"]},
|
||||
{"id": "K8", "label": "Innovative Nutzung neuer technologischer Loesungen",
|
||||
"ctx_keys": ["uses_ai", "uses_biometrics", "uses_iot"]},
|
||||
{"id": "K9", "label": "Verarbeitung hindert Betroffene an Rechtsausuebung",
|
||||
"ctx_keys": ["blocks_service_access", "blocks_contract"]},
|
||||
]
|
||||
|
||||
# -- Bundesland -> Aufsichtsbehoerde Mapping --------------------------------
|
||||
|
||||
BUNDESLAND_AUFSICHT = {
|
||||
"baden-wuerttemberg": ("LfDI Baden-Wuerttemberg", "DSK Muss-Liste + BW-spezifische Liste (Art. 35 Abs. 4)"),
|
||||
"bayern": ("BayLDA (nicht-oeffentlicher Bereich)", "BayLDA Muss-Liste (17.10.2018) + Fallbeispiel ISO 29134"),
|
||||
"berlin": ("BlnBDI", "BlnBDI Muss-Liste nicht-oeffentlich / oeffentlich"),
|
||||
"brandenburg": ("LDA Brandenburg", "LDA BB Muss-Liste allgemein / oeffentlich"),
|
||||
"bremen": ("LfDI Bremen", "LfDI HB Muss-Liste"),
|
||||
"hamburg": ("HmbBfDI", "HmbBfDI Muss-Liste nicht-oeffentlich / oeffentlich"),
|
||||
"hessen": ("HBDI", "DSK Muss-Liste (HBDI uebernimmt DSK-Liste)"),
|
||||
"mecklenburg-vorpommern": ("LfDI M-V", "LfDI M-V Muss-Liste"),
|
||||
"niedersachsen": ("LfD Niedersachsen", "LfD NI Muss-Liste + Pruefschema"),
|
||||
"nordrhein-westfalen": ("LDI NRW", "LDI NRW Muss-Liste nicht-oeffentlich / oeffentlich"),
|
||||
"rheinland-pfalz": ("LfDI RLP", "LfDI RLP Muss-Liste allgemein / oeffentlich"),
|
||||
"saarland": ("UDS Saarland", "DSK Muss-Liste (UDS uebernimmt DSK-Liste)"),
|
||||
"sachsen": ("SDB Sachsen", "SDB Sachsen Muss-Liste"),
|
||||
"sachsen-anhalt": ("LfD Sachsen-Anhalt", "LfD SA Muss-Liste allgemein / oeffentlich"),
|
||||
"schleswig-holstein": ("ULD Schleswig-Holstein", "ULD Muss-Liste + Planspiel-DSFA"),
|
||||
"thueringen": ("TLfDI", "TLfDI Muss-Liste (04.07.2018)"),
|
||||
"bund": ("BfDI", "BfDI Muss-Liste / DSFA-Hinweise"),
|
||||
}
|
||||
|
||||
# -- SDM Gewaehrleistungsziele -----------------------------------------------
|
||||
|
||||
SDM_GOALS = [
|
||||
{
|
||||
"id": "verfuegbarkeit",
|
||||
"label": "Verfuegbarkeit",
|
||||
"description": "Personenbezogene Daten stehen zeitgerecht zur Verfuegung und koennen ordnungsgemaess verarbeitet werden.",
|
||||
"default_measures": [
|
||||
"Redundante Datenhaltung und regelmaessige Backups",
|
||||
"Disaster-Recovery-Plan mit definierten RTO/RPO-Werten",
|
||||
"USV und Notstromversorgung fuer kritische Systeme",
|
||||
],
|
||||
},
|
||||
{
|
||||
"id": "integritaet",
|
||||
"label": "Integritaet",
|
||||
"description": "Personenbezogene Daten bleiben waehrend der Verarbeitung unversehrt, vollstaendig und aktuell.",
|
||||
"default_measures": [
|
||||
"Pruefsummen und digitale Signaturen fuer Datenuebertragungen",
|
||||
"Eingabevalidierung und Plausibilitaetspruefungen",
|
||||
"Versionierung und Change-Management-Verfahren",
|
||||
],
|
||||
},
|
||||
{
|
||||
"id": "vertraulichkeit",
|
||||
"label": "Vertraulichkeit",
|
||||
"description": "Nur befugte Personen koennen personenbezogene Daten zur Kenntnis nehmen.",
|
||||
"default_measures": [
|
||||
"Verschluesselung: TLS 1.3 im Transit, AES-256 at Rest",
|
||||
"Rollenbasiertes Zugriffskonzept (RBAC) mit Least-Privilege-Prinzip",
|
||||
"Multi-Faktor-Authentifizierung fuer administrative Zugaenge",
|
||||
],
|
||||
},
|
||||
{
|
||||
"id": "nichtverkettung",
|
||||
"label": "Nichtverkettung",
|
||||
"description": "Personenbezogene Daten werden nur fuer den Zweck verarbeitet, zu dem sie erhoben wurden.",
|
||||
"default_measures": [
|
||||
"Technische Zweckbindung durch Mandantentrennung",
|
||||
"Pseudonymisierung wo fachlich moeglich",
|
||||
"Getrennte Datenbanken / Schemata je Verarbeitungszweck",
|
||||
],
|
||||
},
|
||||
{
|
||||
"id": "transparenz",
|
||||
"label": "Transparenz",
|
||||
"description": "Betroffene, der Verantwortliche und die Aufsichtsbehoerde koennen die Verarbeitung nachvollziehen.",
|
||||
"default_measures": [
|
||||
"Vollstaendiges Audit-Log aller Datenzugriffe und -aenderungen",
|
||||
"Verzeichnis der Verarbeitungstaetigkeiten (Art. 30 DS-GVO)",
|
||||
"Informationspflichten gemaess Art. 13/14 DS-GVO umgesetzt",
|
||||
],
|
||||
},
|
||||
{
|
||||
"id": "intervenierbarkeit",
|
||||
"label": "Intervenierbarkeit",
|
||||
"description": "Betroffenenrechte (Auskunft, Berichtigung, Loeschung, Widerspruch) koennen wirksam ausgeuebt werden.",
|
||||
"default_measures": [
|
||||
"Self-Service-Portal oder dokumentierter Prozess fuer Betroffenenanfragen",
|
||||
"Technische Loeschfaehigkeit mit Nachweis (Loeschprotokoll)",
|
||||
"Datenexport in maschinenlesbarem Format (Art. 20 DS-GVO)",
|
||||
],
|
||||
},
|
||||
{
|
||||
"id": "datenminimierung",
|
||||
"label": "Datenminimierung",
|
||||
"description": "Die Verarbeitung beschraenkt sich auf das erforderliche Mass.",
|
||||
"default_measures": [
|
||||
"Regelmaessige Pruefung der Erforderlichkeit erhobener Datenfelder",
|
||||
"Automatisierte Loeschung nach Ablauf der Aufbewahrungsfrist",
|
||||
"Anonymisierung / Aggregation fuer statistische Zwecke",
|
||||
],
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
def generate_dsfa_draft(ctx: dict) -> dict:
|
||||
"""Generate a DSFA draft document from template context.
|
||||
|
||||
Args:
|
||||
ctx: Flat dict from company-profile/template-context endpoint.
|
||||
|
||||
Returns:
|
||||
Dict with DSFA fields ready for creation via POST /dsfa.
|
||||
"""
|
||||
company = ctx.get("company_name", "Unbekannt")
|
||||
dpo = ctx.get("dpo_name", "")
|
||||
dpo_email = ctx.get("dpo_email", "")
|
||||
federal_state = ctx.get("federal_state", "").lower().replace(" ", "-")
|
||||
|
||||
# --- Section 0: Schwellwertanalyse ---
|
||||
schwellwert = _generate_schwellwertanalyse(ctx)
|
||||
|
||||
# --- Section 1: Verarbeitungsbeschreibung ---
|
||||
section_1 = _generate_section_1(ctx, company, dpo, dpo_email)
|
||||
|
||||
# --- Section 2: Notwendigkeit ---
|
||||
section_2 = _generate_section_2(ctx)
|
||||
|
||||
# --- Section 3: Risikobewertung ---
|
||||
section_3 = _generate_risk_assessment(ctx)
|
||||
|
||||
# --- Section 4: Stakeholder-Konsultation ---
|
||||
section_4 = _generate_section_4(ctx)
|
||||
|
||||
# --- Section 5: TOM nach SDM ---
|
||||
section_5 = _generate_sdm_tom_section(ctx)
|
||||
|
||||
# --- Section 6: DSB-Stellungnahme ---
|
||||
section_6 = _generate_section_6(ctx, dpo)
|
||||
|
||||
# --- Section 7: Ergebnis ---
|
||||
section_7 = _generate_section_7(ctx)
|
||||
|
||||
# --- Section 8: KI-Modul ---
|
||||
ai_systems = ctx.get("ai_systems", [])
|
||||
involves_ai = len(ai_systems) > 0
|
||||
section_8 = _generate_ai_module(ctx) if involves_ai else None
|
||||
|
||||
sections = {
|
||||
"section_0": {"title": "Schwellwertanalyse", "content": schwellwert["content"]},
|
||||
"section_1": {"title": "Allgemeine Informationen und Verarbeitungsbeschreibung", "content": section_1},
|
||||
"section_2": {"title": "Notwendigkeit und Verhaeltnismaessigkeit", "content": section_2},
|
||||
"section_3": {"title": "Risikobewertung", "content": section_3},
|
||||
"section_4": {"title": "Konsultation der Betroffenen", "content": section_4},
|
||||
"section_5": {"title": "Technische und organisatorische Massnahmen (SDM)", "content": section_5},
|
||||
"section_6": {"title": "Stellungnahme des DSB", "content": section_6},
|
||||
"section_7": {"title": "Ergebnis und Ueberprufungsplan", "content": section_7},
|
||||
}
|
||||
if section_8:
|
||||
sections["section_8"] = {"title": "KI-spezifisches Modul (EU AI Act)", "content": section_8}
|
||||
|
||||
# Assess Art. 36 consultation requirement
|
||||
art36_required = _assess_art36_consultation(ctx, schwellwert)
|
||||
|
||||
return {
|
||||
"title": f"DSFA — {company}",
|
||||
"description": f"Datenschutz-Folgenabschaetzung fuer {company}",
|
||||
"status": "draft",
|
||||
"risk_level": "high" if involves_ai or schwellwert["criteria_met"] >= 3 else "medium",
|
||||
"involves_ai": involves_ai,
|
||||
"dpo_name": dpo,
|
||||
"federal_state": ctx.get("federal_state", ""),
|
||||
"sections": sections,
|
||||
"wp248_criteria_met": schwellwert["criteria_details"],
|
||||
"art35_abs3_triggered": schwellwert["art35_abs3"],
|
||||
"threshold_analysis": {
|
||||
"criteria_met_count": schwellwert["criteria_met"],
|
||||
"dsfa_required": schwellwert["dsfa_required"],
|
||||
"muss_liste_ref": schwellwert.get("muss_liste_ref", ""),
|
||||
},
|
||||
"consultation_requirement": {
|
||||
"art36_required": art36_required,
|
||||
"reason": "Restrisiko bleibt nach Massnahmen hoch" if art36_required else "Restrisiko akzeptabel",
|
||||
},
|
||||
"processing_systems": [s.get("name", "") for s in ctx.get("processing_systems", [])],
|
||||
"ai_systems_summary": [
|
||||
{"name": s.get("name"), "risk": s.get("risk_category", "unknown")}
|
||||
for s in ai_systems
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
# -- Internal helpers --------------------------------------------------------
|
||||
|
||||
def _generate_schwellwertanalyse(ctx: dict) -> dict:
|
||||
"""Evaluate 9 WP248 criteria against company profile."""
|
||||
criteria_details = []
|
||||
criteria_met = 0
|
||||
|
||||
for criterion in WP248_CRITERIA:
|
||||
met = any(ctx.get(key) for key in criterion["ctx_keys"])
|
||||
criteria_details.append({
|
||||
"id": criterion["id"],
|
||||
"label": criterion["label"],
|
||||
"met": met,
|
||||
})
|
||||
if met:
|
||||
criteria_met += 1
|
||||
|
||||
# Art. 35 Abs. 3 specific triggers
|
||||
art35_abs3 = []
|
||||
if ctx.get("has_profiling") and ctx.get("has_automated_decisions"):
|
||||
art35_abs3.append("Art. 35 Abs. 3 lit. a: Profiling mit Rechtswirkung")
|
||||
if any(ctx.get(k) for k in ["processes_health_data", "processes_biometric_data", "processes_criminal_data"]):
|
||||
if ctx.get("large_scale_processing"):
|
||||
art35_abs3.append("Art. 35 Abs. 3 lit. b: Umfangreiche Verarbeitung besonderer Kategorien")
|
||||
if ctx.get("has_surveillance"):
|
||||
art35_abs3.append("Art. 35 Abs. 3 lit. c: Systematische Ueberwachung oeffentlicher Bereiche")
|
||||
|
||||
dsfa_required = criteria_met >= 2 or len(art35_abs3) > 0
|
||||
|
||||
# Bundesland reference
|
||||
federal_state = ctx.get("federal_state", "").lower().replace(" ", "-")
|
||||
aufsicht_info = BUNDESLAND_AUFSICHT.get(federal_state, ("Nicht zugeordnet", "DSK Muss-Liste (allgemein)"))
|
||||
|
||||
met_labels = [c["label"] for c in criteria_details if c["met"]]
|
||||
content_lines = [
|
||||
f"**Anzahl erfuellter WP248-Kriterien:** {criteria_met} von 9\n",
|
||||
f"**Erfuellte Kriterien:** {', '.join(met_labels) if met_labels else 'Keine'}\n",
|
||||
]
|
||||
if art35_abs3:
|
||||
content_lines.append(f"**Art. 35 Abs. 3 DS-GVO direkt ausgeloest:** {'; '.join(art35_abs3)}\n")
|
||||
content_lines.append(
|
||||
f"\n**Ergebnis:** DSFA ist {'**erforderlich**' if dsfa_required else '**nicht erforderlich**'}."
|
||||
)
|
||||
if dsfa_required and criteria_met < 2:
|
||||
content_lines.append(" (Ausgeloest durch Art. 35 Abs. 3 DS-GVO)")
|
||||
|
||||
return {
|
||||
"content": "\n".join(content_lines),
|
||||
"criteria_met": criteria_met,
|
||||
"criteria_details": criteria_details,
|
||||
"art35_abs3": art35_abs3,
|
||||
"dsfa_required": dsfa_required,
|
||||
"muss_liste_ref": aufsicht_info[1],
|
||||
}
|
||||
|
||||
|
||||
def _generate_section_1(ctx: dict, company: str, dpo: str, dpo_email: str) -> str:
|
||||
federal_state = ctx.get("federal_state", "")
|
||||
aufsicht = BUNDESLAND_AUFSICHT.get(
|
||||
federal_state.lower().replace(" ", "-"), ("Nicht zugeordnet",)
|
||||
)[0]
|
||||
|
||||
lines = [
|
||||
f"**Verantwortlicher:** {company}",
|
||||
f"**Datenschutzbeauftragter:** {dpo}" + (f" ({dpo_email})" if dpo_email else ""),
|
||||
f"**Zustaendige Aufsichtsbehoerde:** {aufsicht}",
|
||||
]
|
||||
|
||||
systems = ctx.get("processing_systems", [])
|
||||
if systems:
|
||||
lines.append("\n**Eingesetzte Verarbeitungssysteme:**")
|
||||
for s in systems:
|
||||
hosting = s.get("hosting", "")
|
||||
lines.append(f"- {s.get('name', 'N/A')}" + (f" ({hosting})" if hosting else ""))
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def _generate_section_2(ctx: dict) -> str:
|
||||
lines = [
|
||||
"### Notwendigkeit\n",
|
||||
"Die Verarbeitung ist zur Erreichung des beschriebenen Zwecks erforderlich. ",
|
||||
"Alternative, weniger eingriffsintensive Massnahmen wurden geprueft.\n",
|
||||
"### Datenminimierung\n",
|
||||
"Die verarbeiteten Datenkategorien beschraenken sich auf das fuer den ",
|
||||
"Verarbeitungszweck erforderliche Minimum (Art. 5 Abs. 1 lit. c DS-GVO).\n",
|
||||
]
|
||||
return "".join(lines)
|
||||
|
||||
|
||||
def _generate_risk_assessment(ctx: dict) -> str:
|
||||
lines = ["## Risikoanalyse\n"]
|
||||
|
||||
# Standard risks
|
||||
risks = [
|
||||
("Unbefugter Zugriff auf personenbezogene Daten", "mittel", "hoch", "hoch"),
|
||||
("Datenverlust durch technischen Ausfall", "niedrig", "hoch", "mittel"),
|
||||
("Fehlerhafte Verarbeitung / Datenqualitaet", "niedrig", "mittel", "niedrig"),
|
||||
("Zweckentfremdung erhobener Daten", "niedrig", "hoch", "mittel"),
|
||||
]
|
||||
|
||||
if ctx.get("has_ai_systems") or ctx.get("uses_ai"):
|
||||
risks.append(("Diskriminierung durch algorithmische Entscheidungen", "mittel", "hoch", "hoch"))
|
||||
risks.append(("Mangelnde Erklaerbarkeit von KI-Entscheidungen", "mittel", "mittel", "mittel"))
|
||||
|
||||
if ctx.get("processes_health_data"):
|
||||
risks.append(("Offenlegung von Gesundheitsdaten", "niedrig", "gross", "hoch"))
|
||||
|
||||
if any(ctx.get(k) for k in ["third_country_transfer", "processes_in_third_country"]):
|
||||
risks.append(("Zugriff durch Behoerden in Drittlaendern", "mittel", "hoch", "hoch"))
|
||||
|
||||
# FISA 702 Risiko bei US-Cloud-Providern
|
||||
hosting = (ctx.get("hosting_provider") or "").lower()
|
||||
us_providers = ("aws", "azure", "google", "microsoft", "amazon", "openai", "anthropic", "oracle")
|
||||
if any(p in hosting for p in us_providers):
|
||||
risks.append(("FISA 702: Zugriff durch US-Behoerden auf EU-Daten nicht ausschliessbar", "mittel", "hoch", "hoch"))
|
||||
risks.append(("EU-Serverstandort schuetzt nicht gegen US-Rechtszugriff (Cloud Act + FISA)", "mittel", "hoch", "hoch"))
|
||||
risks.append(("Fehlende effektive Rechtsbehelfe fuer EU-Betroffene gegen US-Ueberwachung", "mittel", "hoch", "hoch"))
|
||||
|
||||
# Domain-spezifische Risiken (AI Act Annex III)
|
||||
domain = ctx.get("domain", "")
|
||||
if domain in ("hr", "recruiting") or ctx.get("has_hr_context"):
|
||||
risks.append(("AGG-Verstoss: Diskriminierung bei Bewerberauswahl (§ 1 AGG)", "mittel", "hoch", "hoch"))
|
||||
risks.append(("Beweislastumkehr bei Diskriminierungsklagen (§ 22 AGG)", "mittel", "hoch", "hoch"))
|
||||
risks.append(("Art. 22 DSGVO: Unzulaessige automatisierte Einzelentscheidung", "mittel", "hoch", "hoch"))
|
||||
risks.append(("Proxy-Diskriminierung durch Name/Foto/Alter-Erkennung", "mittel", "hoch", "hoch"))
|
||||
|
||||
if domain in ("education", "higher_education", "vocational_training"):
|
||||
risks.append(("Chancenungleichheit durch KI-gestuetzte Bewertung", "mittel", "hoch", "hoch"))
|
||||
risks.append(("Benachteiligung Minderjaehriger ohne Lehrkraft-Kontrolle", "niedrig", "gross", "hoch"))
|
||||
risks.append(("Fehlbewertung mit Auswirkung auf Bildungschancen", "mittel", "hoch", "hoch"))
|
||||
|
||||
if domain in ("healthcare", "medical_devices", "pharma", "elderly_care"):
|
||||
risks.append(("Fehldiagnose durch KI mit gesundheitlichen Folgen", "niedrig", "gross", "hoch"))
|
||||
risks.append(("Falsche Triage-Priorisierung (lebenskritisch)", "niedrig", "gross", "hoch"))
|
||||
risks.append(("Verletzung der Patientenautonomie", "mittel", "hoch", "hoch"))
|
||||
|
||||
if domain in ("finance", "banking", "insurance", "investment"):
|
||||
risks.append(("Diskriminierendes Kredit-Scoring", "mittel", "hoch", "hoch"))
|
||||
risks.append(("Ungerechtfertigte Verweigerung von Finanzdienstleistungen", "mittel", "hoch", "hoch"))
|
||||
|
||||
lines.append("| Risiko | Eintrittswahrscheinlichkeit | Schwere | Gesamt |")
|
||||
lines.append("|--------|----------------------------|---------|--------|")
|
||||
for risk_name, likelihood, severity, overall in risks:
|
||||
lines.append(f"| {risk_name} | {likelihood} | {severity} | **{overall}** |")
|
||||
|
||||
lines.append("")
|
||||
|
||||
high_risks = sum(1 for _, _, _, o in risks if o == "hoch")
|
||||
if high_risks > 0:
|
||||
lines.append(f"\n**{high_risks} Risiken mit Stufe 'hoch' identifiziert.** "
|
||||
"Massnahmen gemaess Abschnitt 5 reduzieren das Restrisiko.")
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def _generate_section_4(ctx: dict) -> str:
|
||||
lines = []
|
||||
if ctx.get("has_works_council"):
|
||||
lines.append("Der Betriebsrat wurde informiert und angehoert.")
|
||||
lines.append(
|
||||
"Eine Konsultation der Betroffenen gemaess Art. 35 Abs. 9 DS-GVO "
|
||||
"wird empfohlen, soweit verhaeltnismaessig und praktikabel."
|
||||
)
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def _generate_sdm_tom_section(ctx: dict) -> str:
|
||||
"""Generate TOM section structured by 7 SDM Gewaehrleistungsziele."""
|
||||
lines = []
|
||||
for goal in SDM_GOALS:
|
||||
lines.append(f"**{goal['label']}** — {goal['description']}\n")
|
||||
lines.append("| Massnahme | Typ | Status |")
|
||||
lines.append("|-----------|-----|--------|")
|
||||
for measure in goal["default_measures"]:
|
||||
mtype = "technisch" if any(
|
||||
kw in measure.lower()
|
||||
for kw in ["verschluesselung", "backup", "redundanz", "tls", "aes", "rbac", "mfa",
|
||||
"pruefsumm", "validierung", "loeschfaehigkeit", "export", "automatisiert"]
|
||||
) else "organisatorisch"
|
||||
lines.append(f"| {measure} | {mtype} | geplant |")
|
||||
lines.append("")
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def _generate_section_6(ctx: dict, dpo: str) -> str:
|
||||
if dpo:
|
||||
return (
|
||||
f"Der Datenschutzbeauftragte ({dpo}) wurde konsultiert. "
|
||||
"Die Stellungnahme liegt bei bzw. wird nachgereicht."
|
||||
)
|
||||
return (
|
||||
"Ein Datenschutzbeauftragter wurde noch nicht benannt. "
|
||||
"Sofern eine Benennungspflicht besteht (Art. 37 DS-GVO), "
|
||||
"ist dies vor Abschluss der DSFA nachzuholen."
|
||||
)
|
||||
|
||||
|
||||
def _generate_section_7(ctx: dict) -> str:
|
||||
review_months = ctx.get("review_cycle_months", 12)
|
||||
lines = [
|
||||
"### Ergebnis\n",
|
||||
"Die DSFA wurde gemaess Art. 35 DS-GVO durchgefuehrt. Die identifizierten Risiken ",
|
||||
"wurden bewertet und durch geeignete Massnahmen auf ein akzeptables Niveau reduziert.\n",
|
||||
"### Ueberprufungsplan\n",
|
||||
f"- **Regelmaessige Ueberprufung:** alle {review_months} Monate\n",
|
||||
"- **Trigger fuer ausserplanmaessige Ueberprufung:**\n",
|
||||
" - Wesentliche Aenderung der Verarbeitungstaetigkeit\n",
|
||||
" - Neue oder geaenderte Rechtsgrundlage\n",
|
||||
" - Sicherheitsvorfall mit Bezug zur Verarbeitung\n",
|
||||
" - Aenderung der eingesetzten Technologie oder Auftragsverarbeiter\n",
|
||||
" - Neue Erkenntnisse zu Risiken oder Bedrohungen\n",
|
||||
]
|
||||
return "".join(lines)
|
||||
|
||||
|
||||
def _generate_ai_module(ctx: dict) -> str:
|
||||
"""Generate Section 8 for AI systems (EU AI Act)."""
|
||||
lines = ["### Eingesetzte KI-Systeme\n"]
|
||||
|
||||
ai_systems = ctx.get("ai_systems", [])
|
||||
if ai_systems:
|
||||
lines.append("| System | Zweck | Risikokategorie | Human Oversight |")
|
||||
lines.append("|--------|-------|-----------------|-----------------|")
|
||||
for s in ai_systems:
|
||||
risk = s.get("risk_category", "unbekannt")
|
||||
oversight = "Ja" if s.get("has_human_oversight") else "Nein"
|
||||
lines.append(f"| {s.get('name', 'N/A')} | {s.get('purpose', 'N/A')} | {risk} | {oversight} |")
|
||||
lines.append("")
|
||||
|
||||
if ctx.get("subject_to_ai_act"):
|
||||
lines.append(
|
||||
"**Hinweis:** Das Unternehmen unterliegt dem EU AI Act (Verordnung (EU) 2024/1689). "
|
||||
"Fuer Hochrisiko-KI-Systeme ist eine grundrechtliche Folgenabschaetzung "
|
||||
"gemaess Art. 27 KI-VO durchzufuehren.\n"
|
||||
)
|
||||
|
||||
high_risk = [s for s in ai_systems if s.get("risk_category") in ("high", "hoch")]
|
||||
if high_risk:
|
||||
lines.append("### Hochrisiko-KI-Systeme — Zusatzanforderungen\n")
|
||||
lines.append("Fuer die folgenden Systeme gelten die Anforderungen aus Kapitel III KI-VO:\n")
|
||||
for s in high_risk:
|
||||
lines.append(f"- **{s.get('name', 'N/A')}**: Risikomanagement (Art. 9), "
|
||||
f"Daten-Governance (Art. 10), Transparenz (Art. 13), "
|
||||
f"Human Oversight (Art. 14)\n")
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def _assess_art36_consultation(ctx: dict, schwellwert: dict) -> bool:
|
||||
"""Determine if Art. 36 DSGVO consultation with supervisory authority is required.
|
||||
|
||||
Art. 36 requires prior consultation when the DSFA indicates that the processing
|
||||
would result in a HIGH residual risk despite mitigation measures.
|
||||
"""
|
||||
if schwellwert["criteria_met"] >= 4:
|
||||
return True
|
||||
if len(schwellwert.get("art35_abs3", [])) >= 2:
|
||||
return True
|
||||
ai_systems = ctx.get("ai_systems", [])
|
||||
high_risk_ai = [s for s in ai_systems if s.get("risk_category") in ("high", "hoch", "unacceptable")]
|
||||
if len(high_risk_ai) >= 2:
|
||||
return True
|
||||
return False
|
||||
227
document-templates/generators/fria_template.py
Normal file
227
document-templates/generators/fria_template.py
Normal file
@@ -0,0 +1,227 @@
|
||||
"""FRIA template generator — creates Fundamental Rights Impact Assessment from UCCA context.
|
||||
|
||||
Generates a FRIA (Art. 27 AI Act) based on:
|
||||
- UCCA Assessment result (risk level, triggered rules, domain)
|
||||
- AI Act Decision Tree classification
|
||||
- Company profile
|
||||
|
||||
Automatically maps domains to affected fundamental rights.
|
||||
"""
|
||||
|
||||
from typing import Optional
|
||||
|
||||
# -- Domain → Fundamental Rights Mapping ------------------------------------
|
||||
|
||||
DOMAIN_RIGHTS_MAP = {
|
||||
"education": [
|
||||
{"right": "Recht auf Bildung", "charter": "Art. 14", "gg": "Art. 12",
|
||||
"risk": "Chancengleichheit bei KI-gestuetzter Bewertung oder Auswahl"},
|
||||
{"right": "Nicht-Diskriminierung", "charter": "Art. 21", "gg": "Art. 3",
|
||||
"risk": "Bias bei Leistungsbewertung nach Herkunft, Sprache oder Geschlecht"},
|
||||
{"right": "Rechte des Kindes", "charter": "Art. 24", "gg": "Art. 6 Abs. 2",
|
||||
"risk": "Besonderer Schutz Minderjaehriger vor automatisierten Bewertungen"},
|
||||
],
|
||||
"hr": [
|
||||
{"right": "Berufsfreiheit / Recht zu arbeiten", "charter": "Art. 15", "gg": "Art. 12",
|
||||
"risk": "KI-gestuetzte Auswahl kann Zugang zum Arbeitsmarkt einschraenken"},
|
||||
{"right": "Nicht-Diskriminierung", "charter": "Art. 21", "gg": "Art. 3",
|
||||
"risk": "Bias bei Recruiting, Befoerderung oder Kuendigung"},
|
||||
{"right": "Schutz personenbezogener Daten", "charter": "Art. 8", "gg": "Art. 2 Abs. 1",
|
||||
"risk": "Umfangreiche Verarbeitung von Beschaeftigtendaten"},
|
||||
],
|
||||
"healthcare": [
|
||||
{"right": "Menschenwuerde", "charter": "Art. 1", "gg": "Art. 1",
|
||||
"risk": "KI-Diagnosen koennen existenzielle Auswirkungen haben"},
|
||||
{"right": "Schutz personenbezogener Daten", "charter": "Art. 8", "gg": "Art. 2 Abs. 1",
|
||||
"risk": "Gesundheitsdaten sind besondere Kategorien (Art. 9 DSGVO)"},
|
||||
{"right": "Nicht-Diskriminierung", "charter": "Art. 21", "gg": "Art. 3",
|
||||
"risk": "Bias bei Behandlungsempfehlungen nach Alter, Geschlecht oder Ethnie"},
|
||||
],
|
||||
"finance": [
|
||||
{"right": "Recht auf soziale Sicherheit", "charter": "Art. 34", "gg": "Art. 20",
|
||||
"risk": "Zugang zu Finanzdienstleistungen und Versicherungen"},
|
||||
{"right": "Nicht-Diskriminierung", "charter": "Art. 21", "gg": "Art. 3",
|
||||
"risk": "Scoring-Bias bei Kreditvergabe oder Versicherungspraemien"},
|
||||
{"right": "Recht auf wirksamen Rechtsbehelf", "charter": "Art. 47", "gg": "Art. 19 Abs. 4",
|
||||
"risk": "Anfechtbarkeit automatisierter Finanzentscheidungen"},
|
||||
],
|
||||
"law_enforcement": [
|
||||
{"right": "Recht auf Freiheit und Sicherheit", "charter": "Art. 6", "gg": "Art. 2 Abs. 2",
|
||||
"risk": "KI-gestuetzte Ueberwachung oder Vorhersage"},
|
||||
{"right": "Unschuldsvermutung", "charter": "Art. 48", "gg": "Art. 20 Abs. 3",
|
||||
"risk": "Predictive Policing kann Vorverurteilung erzeugen"},
|
||||
{"right": "Recht auf Privatsphaere", "charter": "Art. 7", "gg": "Art. 2 Abs. 1",
|
||||
"risk": "Biometrische Identifizierung im oeffentlichen Raum"},
|
||||
],
|
||||
"public_sector": [
|
||||
{"right": "Recht auf eine gute Verwaltung", "charter": "Art. 41", "gg": "Art. 20 Abs. 3",
|
||||
"risk": "Automatisierte Verwaltungsentscheidungen muessen nachvollziehbar sein"},
|
||||
{"right": "Nicht-Diskriminierung", "charter": "Art. 21", "gg": "Art. 3",
|
||||
"risk": "Gleichbehandlung aller Buerger bei KI-gestuetzten Verwaltungsakten"},
|
||||
{"right": "Recht auf wirksamen Rechtsbehelf", "charter": "Art. 47", "gg": "Art. 19 Abs. 4",
|
||||
"risk": "Widerspruchsmoeglichkeit gegen KI-gestuetzte Bescheide"},
|
||||
],
|
||||
}
|
||||
|
||||
# Universal rights (always relevant for High-Risk AI)
|
||||
UNIVERSAL_RIGHTS = [
|
||||
{"right": "Schutz personenbezogener Daten", "charter": "Art. 8", "gg": "Art. 2 Abs. 1 i.V.m. Art. 1 Abs. 1",
|
||||
"risk": "Datenverarbeitung durch KI-System"},
|
||||
{"right": "Menschenwuerde", "charter": "Art. 1", "gg": "Art. 1",
|
||||
"risk": "KI darf Menschen nicht auf Datenpunkte reduzieren"},
|
||||
]
|
||||
|
||||
# -- Default measures -------------------------------------------------------
|
||||
|
||||
DEFAULT_MEASURES = [
|
||||
"Human-in-the-Loop: Menschliche Ueberpruefung aller KI-Empfehlungen vor Umsetzung",
|
||||
"Transparenz: Betroffene werden ueber den Einsatz von KI informiert",
|
||||
"Erklaerbarkeit: KI-Ergebnisse koennen nachvollzogen und begruendet werden",
|
||||
"Beschwerdemechanismus: Betroffene koennen KI-Entscheidungen anfechten",
|
||||
"Logging: Alle Eingaben und Ausgaben werden fuer Audit-Zwecke protokolliert",
|
||||
"Regelmaessige Bias-Audits: Systematische Pruefung auf Diskriminierung",
|
||||
]
|
||||
|
||||
HR_MEASURES = [
|
||||
"AGG-konforme Gestaltung: Kein Bias bei Geschlecht, Alter, Herkunft, Behinderung",
|
||||
"Betriebsrat gemaess §87 Abs.1 Nr.6 und §95 BetrVG beteiligt",
|
||||
"Keine automatisierte Endentscheidung bei Personalangelegenheiten",
|
||||
]
|
||||
|
||||
EDUCATION_MEASURES = [
|
||||
"Lehrkraft ueberprueft und verantwortet alle KI-generierten Bewertungen",
|
||||
"Chancengleichheit unabhaengig von sozioekonomischem Hintergrund",
|
||||
"Schueler/Eltern koennen KI-gestuetzte Bewertungen anfechten",
|
||||
]
|
||||
|
||||
|
||||
def generate_fria_draft(ctx: dict) -> dict:
|
||||
"""Generate a FRIA draft from UCCA assessment context.
|
||||
|
||||
Args:
|
||||
ctx: Dict with keys:
|
||||
Required:
|
||||
- organisation_name: str
|
||||
- system_name: str
|
||||
- system_description: str
|
||||
- einsatzzweck: str
|
||||
Optional:
|
||||
- organisation_address: str
|
||||
- system_version: str
|
||||
- system_provider: str
|
||||
- domain: str (education, hr, healthcare, finance, etc.)
|
||||
- affected_groups: list[str]
|
||||
- affected_count: str
|
||||
- ai_act_classification: str (high_risk, limited_risk, etc.)
|
||||
- annex_iii_category: str
|
||||
- is_public_entity: bool
|
||||
- has_hr_context: bool
|
||||
- has_education_context: bool
|
||||
- risk_score: int
|
||||
- dpo_name: str
|
||||
- dpo_contact: str
|
||||
- review_interval: str
|
||||
|
||||
Returns:
|
||||
Dict with placeholder values for template substitution.
|
||||
"""
|
||||
result = {}
|
||||
|
||||
# Section 1: Basic info
|
||||
result["ORGANISATION_NAME"] = ctx.get("organisation_name", "{{ORGANISATION_NAME}}")
|
||||
result["ORGANISATION_ADRESSE"] = ctx.get("organisation_address", "{{ORGANISATION_ADRESSE}}")
|
||||
result["VERANTWORTLICHER"] = ctx.get("responsible_person", "{{VERANTWORTLICHER}}")
|
||||
result["ERSTELLT_VON"] = ctx.get("created_by", "{{ERSTELLT_VON}}")
|
||||
result["ERSTELLT_AM"] = ctx.get("created_at", "{{ERSTELLT_AM}}")
|
||||
result["SYSTEM_NAME"] = ctx.get("system_name", "{{SYSTEM_NAME}}")
|
||||
result["SYSTEM_VERSION"] = ctx.get("system_version", "1.0")
|
||||
result["SYSTEM_BESCHREIBUNG"] = ctx.get("system_description", "{{SYSTEM_BESCHREIBUNG}}")
|
||||
result["SYSTEM_ANBIETER"] = ctx.get("system_provider", "{{SYSTEM_ANBIETER}}")
|
||||
result["EINSATZZWECK"] = ctx.get("einsatzzweck", "{{EINSATZZWECK}}")
|
||||
result["EINSATZKONTEXT"] = ctx.get("einsatzkontext", "{{EINSATZKONTEXT}}")
|
||||
result["AI_ACT_KLASSIFIKATION"] = ctx.get("ai_act_classification", "High-Risk")
|
||||
result["ANNEX_III_KATEGORIE"] = ctx.get("annex_iii_category", "")
|
||||
result["DSB_NAME"] = ctx.get("dpo_name", "{{DSB_NAME}}")
|
||||
result["DSB_KONTAKT"] = ctx.get("dpo_contact", "{{DSB_KONTAKT}}")
|
||||
|
||||
# Section 1.5: Affected groups
|
||||
groups = ctx.get("affected_groups", [])
|
||||
result["BETROFFENE_GRUPPEN"] = _bullet_list(groups) if groups else "{{BETROFFENE_GRUPPEN}}"
|
||||
result["BETROFFENE_ANZAHL"] = ctx.get("affected_count", "{{BETROFFENE_ANZAHL}}")
|
||||
|
||||
# Section 2: Fundamental rights mapping
|
||||
domain = ctx.get("domain", "")
|
||||
rights = list(UNIVERSAL_RIGHTS)
|
||||
if domain in DOMAIN_RIGHTS_MAP:
|
||||
rights.extend(DOMAIN_RIGHTS_MAP[domain])
|
||||
|
||||
rights_table = []
|
||||
for i, r in enumerate(rights, 1):
|
||||
rights_table.append(
|
||||
f"| {i} | {r['right']} | {r['charter']} | {r['gg']} | Ja | {r['risk']} |"
|
||||
)
|
||||
result["GRUNDRECHTE_ANALYSE"] = "\n".join(rights_table) if rights_table else "{{GRUNDRECHTE_ANALYSE}}"
|
||||
|
||||
# Section 3: Risk matrix
|
||||
risk_rows = []
|
||||
risk_score = ctx.get("risk_score", 0)
|
||||
base_likelihood = min(3, 1 + risk_score // 30)
|
||||
for r in rights:
|
||||
severity = 3 if "Diskriminierung" in r["risk"] or "existenz" in r["risk"].lower() else 2
|
||||
likelihood = base_likelihood
|
||||
level = _risk_level(likelihood * severity)
|
||||
risk_rows.append(
|
||||
f"| {r['right']} | {r['risk']} | {likelihood} | {severity} | {level} | Basierend auf Systemanalyse |"
|
||||
)
|
||||
result["RISIKOMATRIX"] = "\n".join(risk_rows) if risk_rows else "{{RISIKOMATRIX}}"
|
||||
|
||||
# Section 4: Measures
|
||||
measures = list(DEFAULT_MEASURES)
|
||||
if ctx.get("has_hr_context") or domain == "hr":
|
||||
measures.extend(HR_MEASURES)
|
||||
if ctx.get("has_education_context") or domain == "education":
|
||||
measures.extend(EDUCATION_MEASURES)
|
||||
result["MASSNAHMEN_LISTE"] = _bullet_list(measures)
|
||||
|
||||
result["HUMAN_OVERSIGHT_BESCHREIBUNG"] = ctx.get("human_oversight",
|
||||
"Das System unterstuetzt menschliche Entscheidungen, trifft jedoch keine eigenstaendigen Entscheidungen. "
|
||||
"Alle KI-generierten Empfehlungen werden von qualifiziertem Personal geprueft.")
|
||||
|
||||
result["TRANSPARENZ_MASSNAHMEN"] = ctx.get("transparency_measures",
|
||||
"Betroffene Personen werden ueber den Einsatz des KI-Systems informiert. "
|
||||
"KI-generierte Ergebnisse werden als solche gekennzeichnet.")
|
||||
|
||||
# Section 5: Consultation
|
||||
result["KONSULTATION_ERGEBNISSE"] = ctx.get("consultation_results",
|
||||
"Konsultation steht aus — bitte vor Freigabe durchfuehren.")
|
||||
|
||||
# Section 6: Approval
|
||||
result["GENEHMIGT_VON"] = ctx.get("approved_by", "{{GENEHMIGT_VON}}")
|
||||
result["GENEHMIGT_AM"] = ctx.get("approved_at", "{{GENEHMIGT_AM}}")
|
||||
|
||||
# Section 7: Monitoring
|
||||
result["NAECHSTE_UEBERPRUEFUNG"] = ctx.get("review_interval", "12 Monate nach Inbetriebnahme")
|
||||
|
||||
# Conditional flags
|
||||
result["BILDUNGSKONTEXT"] = ctx.get("has_education_context", False) or domain == "education"
|
||||
result["HR_KONTEXT"] = ctx.get("has_hr_context", False) or domain == "hr"
|
||||
result["OEFFENTLICHE_STELLE"] = ctx.get("is_public_entity", False)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def _risk_level(score: int) -> str:
|
||||
"""Map risk score to level label."""
|
||||
if score <= 6:
|
||||
return "Niedrig"
|
||||
elif score <= 12:
|
||||
return "Mittel"
|
||||
elif score <= 19:
|
||||
return "Hoch"
|
||||
else:
|
||||
return "Kritisch"
|
||||
|
||||
|
||||
def _bullet_list(items: list) -> str:
|
||||
"""Format a list as markdown bullet points."""
|
||||
return "\n".join(f"- {item}" for item in items)
|
||||
158
document-templates/generators/test_betriebsvereinbarung.py
Normal file
158
document-templates/generators/test_betriebsvereinbarung.py
Normal file
@@ -0,0 +1,158 @@
|
||||
"""Tests for Betriebsvereinbarung template generator."""
|
||||
|
||||
import pytest
|
||||
from betriebsvereinbarung_template import (
|
||||
generate_betriebsvereinbarung_draft,
|
||||
DEFAULT_VERBOTENE_NUTZUNGEN,
|
||||
AI_VERBOTENE_NUTZUNGEN,
|
||||
DEFAULT_TOM,
|
||||
DATENARTEN_MAP,
|
||||
)
|
||||
|
||||
|
||||
class TestGenerateBetriebsvereinbarung:
|
||||
"""Tests for generate_betriebsvereinbarung_draft()."""
|
||||
|
||||
def test_minimal_context(self):
|
||||
"""Minimal context should produce valid output with placeholders."""
|
||||
ctx = {
|
||||
"company_name": "Test GmbH",
|
||||
"system_name": "Microsoft 365",
|
||||
"system_description": "Office-Suite mit KI-Funktionen",
|
||||
}
|
||||
result = generate_betriebsvereinbarung_draft(ctx)
|
||||
|
||||
assert result["UNTERNEHMEN_NAME"] == "Test GmbH"
|
||||
assert result["SYSTEM_NAME"] == "Microsoft 365"
|
||||
assert "{{BETRIEBSRAT_VORSITZ}}" in result["BETRIEBSRAT_VORSITZ"]
|
||||
|
||||
def test_full_context(self):
|
||||
"""Full context should fill all placeholders."""
|
||||
ctx = {
|
||||
"company_name": "Acme Corp",
|
||||
"company_address": "Hamburg",
|
||||
"employer_representative": "Dr. Schmidt",
|
||||
"works_council_chair": "Fr. Mueller",
|
||||
"system_name": "Copilot",
|
||||
"system_description": "KI-Assistent",
|
||||
"system_vendor": "Microsoft",
|
||||
"locations": ["Hamburg", "Berlin"],
|
||||
"departments": ["IT", "HR"],
|
||||
"modules": ["Teams", "Outlook", "Word"],
|
||||
"purposes": ["Texterstellung", "Zusammenfassung"],
|
||||
"data_types": ["email", "chat", "login"],
|
||||
"is_ai_system": True,
|
||||
"dpo_name": "Dr. Datenschutz",
|
||||
"dpo_contact": "dsb@acme.de",
|
||||
"audit_interval": "6 Monate",
|
||||
"duration": "2 Jahre",
|
||||
"notice_period": "6 Monate",
|
||||
}
|
||||
result = generate_betriebsvereinbarung_draft(ctx)
|
||||
|
||||
assert result["ARBEITGEBER_VERTRETER"] == "Dr. Schmidt"
|
||||
assert result["BETRIEBSRAT_VORSITZ"] == "Fr. Mueller"
|
||||
assert "Hamburg" in result["GELTUNGSBEREICH_STANDORTE"]
|
||||
assert "Berlin" in result["GELTUNGSBEREICH_STANDORTE"]
|
||||
assert "Teams" in result["GELTUNGSBEREICH_MODULE"]
|
||||
assert result["AUDIT_INTERVALL"] == "6 Monate"
|
||||
assert result["LAUFZEIT"] == "2 Jahre"
|
||||
assert result["AI_SYSTEM"] is True
|
||||
|
||||
def test_verbotene_nutzungen_default(self):
|
||||
"""Default forbidden uses should always be included."""
|
||||
ctx = {"company_name": "Test", "system_name": "Tool", "system_description": "x"}
|
||||
result = generate_betriebsvereinbarung_draft(ctx)
|
||||
|
||||
for nutzung in DEFAULT_VERBOTENE_NUTZUNGEN:
|
||||
assert nutzung in result["VERBOTENE_NUTZUNGEN"]
|
||||
|
||||
def test_verbotene_nutzungen_ai_system(self):
|
||||
"""AI-specific forbidden uses should be added for AI systems."""
|
||||
ctx = {
|
||||
"company_name": "Test",
|
||||
"system_name": "Tool",
|
||||
"system_description": "x",
|
||||
"is_ai_system": True,
|
||||
}
|
||||
result = generate_betriebsvereinbarung_draft(ctx)
|
||||
|
||||
for nutzung in AI_VERBOTENE_NUTZUNGEN:
|
||||
assert nutzung in result["VERBOTENE_NUTZUNGEN"]
|
||||
|
||||
def test_verbotene_nutzungen_no_ai(self):
|
||||
"""AI-specific forbidden uses should NOT be added for non-AI systems."""
|
||||
ctx = {
|
||||
"company_name": "Test",
|
||||
"system_name": "Tool",
|
||||
"system_description": "x",
|
||||
"is_ai_system": False,
|
||||
}
|
||||
result = generate_betriebsvereinbarung_draft(ctx)
|
||||
|
||||
for nutzung in AI_VERBOTENE_NUTZUNGEN:
|
||||
assert nutzung not in result["VERBOTENE_NUTZUNGEN"]
|
||||
|
||||
def test_datenarten_mapping(self):
|
||||
"""Data types should be resolved from DATENARTEN_MAP."""
|
||||
ctx = {
|
||||
"company_name": "Test",
|
||||
"system_name": "Tool",
|
||||
"system_description": "x",
|
||||
"data_types": ["email", "prompt", "hr"],
|
||||
}
|
||||
result = generate_betriebsvereinbarung_draft(ctx)
|
||||
|
||||
assert DATENARTEN_MAP["email"] in result["DATENARTEN_LISTE"]
|
||||
assert DATENARTEN_MAP["prompt"] in result["DATENARTEN_LISTE"]
|
||||
assert DATENARTEN_MAP["hr"] in result["DATENARTEN_LISTE"]
|
||||
|
||||
def test_tom_high_conflict_score(self):
|
||||
"""High conflict score should add extra TOM measures."""
|
||||
ctx_low = {
|
||||
"company_name": "Test",
|
||||
"system_name": "Tool",
|
||||
"system_description": "x",
|
||||
"betrvg_conflict_score": 20,
|
||||
}
|
||||
ctx_high = {
|
||||
"company_name": "Test",
|
||||
"system_name": "Tool",
|
||||
"system_description": "x",
|
||||
"betrvg_conflict_score": 80,
|
||||
}
|
||||
|
||||
result_low = generate_betriebsvereinbarung_draft(ctx_low)
|
||||
result_high = generate_betriebsvereinbarung_draft(ctx_high)
|
||||
|
||||
# High score should have more TOM items
|
||||
low_count = result_low["TOM_MASSNAHMEN"].count("- ")
|
||||
high_count = result_high["TOM_MASSNAHMEN"].count("- ")
|
||||
assert high_count > low_count, f"High conflict ({high_count} TOMs) should have more than low ({low_count})"
|
||||
|
||||
def test_speicherfristen_defaults(self):
|
||||
"""Default retention periods should be set."""
|
||||
ctx = {"company_name": "Test", "system_name": "Tool", "system_description": "x"}
|
||||
result = generate_betriebsvereinbarung_draft(ctx)
|
||||
|
||||
assert result["SPEICHERFRIST_AUDIT_LOGS"] == "90 Tage"
|
||||
assert result["SPEICHERFRIST_NUTZUNGSDATEN"] == "30 Tage"
|
||||
assert result["SPEICHERFRIST_CHAT_PROMPTS"] == "deaktiviert"
|
||||
|
||||
def test_custom_retention(self):
|
||||
"""Custom retention periods should override defaults."""
|
||||
ctx = {
|
||||
"company_name": "Test",
|
||||
"system_name": "Tool",
|
||||
"system_description": "x",
|
||||
"retention_audit_logs": "180 Tage",
|
||||
"retention_prompts": "7 Tage",
|
||||
}
|
||||
result = generate_betriebsvereinbarung_draft(ctx)
|
||||
|
||||
assert result["SPEICHERFRIST_AUDIT_LOGS"] == "180 Tage"
|
||||
assert result["SPEICHERFRIST_CHAT_PROMPTS"] == "7 Tage"
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
pytest.main([__file__, "-v"])
|
||||
198
document-templates/generators/test_fria_template.py
Normal file
198
document-templates/generators/test_fria_template.py
Normal file
@@ -0,0 +1,198 @@
|
||||
"""Tests for FRIA (Fundamental Rights Impact Assessment) template generator."""
|
||||
|
||||
import pytest
|
||||
from fria_template import (
|
||||
generate_fria_draft,
|
||||
DOMAIN_RIGHTS_MAP,
|
||||
UNIVERSAL_RIGHTS,
|
||||
DEFAULT_MEASURES,
|
||||
HR_MEASURES,
|
||||
EDUCATION_MEASURES,
|
||||
)
|
||||
|
||||
|
||||
class TestGenerateFRIA:
|
||||
"""Tests for generate_fria_draft()."""
|
||||
|
||||
def test_minimal_context(self):
|
||||
ctx = {
|
||||
"organisation_name": "Test GmbH",
|
||||
"system_name": "AI Tool",
|
||||
"system_description": "KI-Assistenz",
|
||||
"einsatzzweck": "Automatisierung",
|
||||
}
|
||||
result = generate_fria_draft(ctx)
|
||||
assert result["ORGANISATION_NAME"] == "Test GmbH"
|
||||
assert result["SYSTEM_NAME"] == "AI Tool"
|
||||
assert result["AI_ACT_KLASSIFIKATION"] == "High-Risk"
|
||||
|
||||
def test_hr_domain_rights(self):
|
||||
ctx = {
|
||||
"organisation_name": "HR Corp",
|
||||
"system_name": "Recruiting AI",
|
||||
"system_description": "Bewerber-Screening",
|
||||
"einsatzzweck": "Personalauswahl",
|
||||
"domain": "hr",
|
||||
}
|
||||
result = generate_fria_draft(ctx)
|
||||
|
||||
# HR domain should include employment rights
|
||||
assert "Berufsfreiheit" in result["GRUNDRECHTE_ANALYSE"]
|
||||
assert "Nicht-Diskriminierung" in result["GRUNDRECHTE_ANALYSE"]
|
||||
assert result["HR_KONTEXT"] is True
|
||||
assert result["BILDUNGSKONTEXT"] is False
|
||||
|
||||
def test_education_domain_rights(self):
|
||||
ctx = {
|
||||
"organisation_name": "Schule",
|
||||
"system_name": "Bewertungs-KI",
|
||||
"system_description": "Notenunterstuetzung",
|
||||
"einsatzzweck": "Leistungsbewertung",
|
||||
"domain": "education",
|
||||
}
|
||||
result = generate_fria_draft(ctx)
|
||||
|
||||
assert "Recht auf Bildung" in result["GRUNDRECHTE_ANALYSE"]
|
||||
assert "Rechte des Kindes" in result["GRUNDRECHTE_ANALYSE"]
|
||||
assert result["BILDUNGSKONTEXT"] is True
|
||||
|
||||
def test_healthcare_domain_rights(self):
|
||||
ctx = {
|
||||
"organisation_name": "Klinik",
|
||||
"system_name": "Diagnose-KI",
|
||||
"system_description": "Diagnoseunterstuetzung",
|
||||
"einsatzzweck": "Diagnostik",
|
||||
"domain": "healthcare",
|
||||
}
|
||||
result = generate_fria_draft(ctx)
|
||||
|
||||
assert "Menschenwuerde" in result["GRUNDRECHTE_ANALYSE"]
|
||||
assert "Schutz personenbezogener Daten" in result["GRUNDRECHTE_ANALYSE"]
|
||||
|
||||
def test_universal_rights_always_present(self):
|
||||
for domain in ["hr", "education", "healthcare", "finance", ""]:
|
||||
ctx = {
|
||||
"organisation_name": "Test",
|
||||
"system_name": "Tool",
|
||||
"system_description": "x",
|
||||
"einsatzzweck": "y",
|
||||
"domain": domain,
|
||||
}
|
||||
result = generate_fria_draft(ctx)
|
||||
assert "Schutz personenbezogener Daten" in result["GRUNDRECHTE_ANALYSE"]
|
||||
|
||||
def test_hr_measures_included(self):
|
||||
ctx = {
|
||||
"organisation_name": "Test",
|
||||
"system_name": "Tool",
|
||||
"system_description": "x",
|
||||
"einsatzzweck": "y",
|
||||
"domain": "hr",
|
||||
}
|
||||
result = generate_fria_draft(ctx)
|
||||
|
||||
for measure in HR_MEASURES:
|
||||
assert measure in result["MASSNAHMEN_LISTE"]
|
||||
|
||||
def test_education_measures_included(self):
|
||||
ctx = {
|
||||
"organisation_name": "Test",
|
||||
"system_name": "Tool",
|
||||
"system_description": "x",
|
||||
"einsatzzweck": "y",
|
||||
"domain": "education",
|
||||
}
|
||||
result = generate_fria_draft(ctx)
|
||||
|
||||
for measure in EDUCATION_MEASURES:
|
||||
assert measure in result["MASSNAHMEN_LISTE"]
|
||||
|
||||
def test_public_entity_flag(self):
|
||||
ctx = {
|
||||
"organisation_name": "Behoerde",
|
||||
"system_name": "Tool",
|
||||
"system_description": "x",
|
||||
"einsatzzweck": "y",
|
||||
"is_public_entity": True,
|
||||
}
|
||||
result = generate_fria_draft(ctx)
|
||||
assert result["OEFFENTLICHE_STELLE"] is True
|
||||
|
||||
def test_risk_matrix_generated(self):
|
||||
ctx = {
|
||||
"organisation_name": "Test",
|
||||
"system_name": "Tool",
|
||||
"system_description": "x",
|
||||
"einsatzzweck": "y",
|
||||
"domain": "hr",
|
||||
"risk_score": 60,
|
||||
}
|
||||
result = generate_fria_draft(ctx)
|
||||
|
||||
assert result["RISIKOMATRIX"] != "{{RISIKOMATRIX}}"
|
||||
assert "Nicht-Diskriminierung" in result["RISIKOMATRIX"]
|
||||
|
||||
def test_affected_groups(self):
|
||||
ctx = {
|
||||
"organisation_name": "Test",
|
||||
"system_name": "Tool",
|
||||
"system_description": "x",
|
||||
"einsatzzweck": "y",
|
||||
"affected_groups": ["Bewerber", "Beschaeftigte"],
|
||||
"affected_count": "~500 pro Jahr",
|
||||
}
|
||||
result = generate_fria_draft(ctx)
|
||||
|
||||
assert "Bewerber" in result["BETROFFENE_GRUPPEN"]
|
||||
assert result["BETROFFENE_ANZAHL"] == "~500 pro Jahr"
|
||||
|
||||
|
||||
class TestDSFADomainRisks:
|
||||
"""Tests for domain-specific risks in DSFA generator."""
|
||||
|
||||
def test_hr_domain_adds_agg_risks(self):
|
||||
# Import from dsfa_template
|
||||
from dsfa_template import _generate_risk_assessment
|
||||
|
||||
ctx = {"has_ai_systems": True, "domain": "hr"}
|
||||
output = _generate_risk_assessment(ctx)
|
||||
|
||||
assert "AGG-Verstoss" in output
|
||||
assert "Beweislastumkehr" in output
|
||||
|
||||
def test_education_domain_adds_risks(self):
|
||||
from dsfa_template import _generate_risk_assessment
|
||||
|
||||
ctx = {"has_ai_systems": True, "domain": "education"}
|
||||
output = _generate_risk_assessment(ctx)
|
||||
|
||||
assert "Chancenungleichheit" in output
|
||||
|
||||
def test_healthcare_domain_adds_risks(self):
|
||||
from dsfa_template import _generate_risk_assessment
|
||||
|
||||
ctx = {"has_ai_systems": True, "domain": "healthcare"}
|
||||
output = _generate_risk_assessment(ctx)
|
||||
|
||||
assert "Fehldiagnose" in output
|
||||
|
||||
def test_finance_domain_adds_risks(self):
|
||||
from dsfa_template import _generate_risk_assessment
|
||||
|
||||
ctx = {"has_ai_systems": True, "domain": "finance"}
|
||||
output = _generate_risk_assessment(ctx)
|
||||
|
||||
assert "Kredit-Scoring" in output
|
||||
|
||||
def test_no_domain_no_extra_risks(self):
|
||||
from dsfa_template import _generate_risk_assessment
|
||||
|
||||
ctx = {"has_ai_systems": True}
|
||||
output = _generate_risk_assessment(ctx)
|
||||
|
||||
assert "AGG-Verstoss" not in output
|
||||
assert "Fehldiagnose" not in output
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
pytest.main([__file__, "-v"])
|
||||
285
document-templates/generators/tom_template.py
Normal file
285
document-templates/generators/tom_template.py
Normal file
@@ -0,0 +1,285 @@
|
||||
"""TOM template generator V2 — SDM-structured TOM catalog.
|
||||
|
||||
Replaces the flat 17-measure list with a hierarchical structure based on
|
||||
the 7 SDM Gewaehrleistungsziele (Standard-Datenschutzmodell V3.1a).
|
||||
"""
|
||||
|
||||
# -- SDM-structured TOM catalog ---------------------------------------------
|
||||
|
||||
SDM_TOM_CATALOG = {
|
||||
"verfuegbarkeit": {
|
||||
"label": "Verfuegbarkeit",
|
||||
"sdm_baustein": "SDM-B11 (Aufbewahren)",
|
||||
"measures": [
|
||||
{"name": "Redundante Datenhaltung", "description": "RAID, Replikation, Geo-Redundanz", "type": "technical"},
|
||||
{"name": "Backup-Strategie", "description": "Taeglich inkrementell, woechentlich voll, verschluesselt", "type": "technical"},
|
||||
{"name": "Disaster-Recovery-Plan", "description": "Dokumentierte RTO/RPO-Werte, jaehrliche Tests", "type": "organizational"},
|
||||
{"name": "USV / Notstromversorgung", "description": "Unterbrechungsfreie Stromversorgung fuer kritische Systeme", "type": "technical"},
|
||||
],
|
||||
},
|
||||
"integritaet": {
|
||||
"label": "Integritaet",
|
||||
"sdm_baustein": "SDM-B61 (Berichtigen)",
|
||||
"measures": [
|
||||
{"name": "Pruefsummen und Signaturen", "description": "Digitale Signaturen fuer Datenuebertragungen", "type": "technical"},
|
||||
{"name": "Eingabevalidierung", "description": "Plausibilitaetspruefungen auf allen Eingabeschnittstellen", "type": "technical"},
|
||||
{"name": "Change Management", "description": "Dokumentierte Aenderungsverfahren mit Freigabeprozess", "type": "organizational"},
|
||||
{"name": "Versionierung", "description": "Versionierung von Datensaetzen und Konfigurationen", "type": "technical"},
|
||||
],
|
||||
},
|
||||
"vertraulichkeit": {
|
||||
"label": "Vertraulichkeit",
|
||||
"sdm_baustein": "SDM-B51 (Zugriffe regeln)",
|
||||
"measures": [
|
||||
{"name": "Verschluesselung im Transit", "description": "TLS 1.3 fuer alle Verbindungen", "type": "technical"},
|
||||
{"name": "Verschluesselung at Rest", "description": "AES-256 fuer gespeicherte Daten", "type": "technical"},
|
||||
{"name": "Zugriffskonzept (RBAC)", "description": "Rollenbasiert, Least-Privilege-Prinzip, regelmaessige Reviews", "type": "technical"},
|
||||
{"name": "Multi-Faktor-Authentifizierung", "description": "MFA fuer alle administrativen Zugaenge", "type": "technical"},
|
||||
{"name": "Physische Zutrittskontrolle", "description": "Schluessel, Kartenleser, Besucherprotokoll", "type": "technical"},
|
||||
{"name": "Vertraulichkeitsverpflichtung", "description": "Schriftliche Verpflichtung aller Mitarbeitenden", "type": "organizational"},
|
||||
],
|
||||
},
|
||||
"nichtverkettung": {
|
||||
"label": "Nichtverkettung",
|
||||
"sdm_baustein": "SDM-B50 (Trennen)",
|
||||
"measures": [
|
||||
{"name": "Mandantentrennung", "description": "Logische Datentrennung nach Mandanten/Zweck", "type": "technical"},
|
||||
{"name": "Pseudonymisierung", "description": "Wo fachlich moeglich, Einsatz von Pseudonymen", "type": "technical"},
|
||||
{"name": "Zweckbindungspruefung", "description": "Pruefung bei jeder neuen Datennutzung", "type": "organizational"},
|
||||
],
|
||||
},
|
||||
"transparenz": {
|
||||
"label": "Transparenz",
|
||||
"sdm_baustein": "SDM-B42 (Dokumentieren), SDM-B43 (Protokollieren)",
|
||||
"measures": [
|
||||
{"name": "Verarbeitungsverzeichnis", "description": "Art. 30 DS-GVO konformes VVT", "type": "organizational"},
|
||||
{"name": "Audit-Logging", "description": "Vollstaendige Protokollierung aller Datenzugriffe", "type": "technical"},
|
||||
{"name": "Informationspflichten", "description": "Art. 13/14 DS-GVO Datenschutzerklaerung", "type": "organizational"},
|
||||
{"name": "Datenpannen-Prozess", "description": "Dokumentierter Meldeprozess Art. 33/34 DS-GVO", "type": "organizational"},
|
||||
],
|
||||
},
|
||||
"intervenierbarkeit": {
|
||||
"label": "Intervenierbarkeit",
|
||||
"sdm_baustein": "SDM-B60 (Loeschen), SDM-B61 (Berichtigen), SDM-B62 (Einschraenken)",
|
||||
"measures": [
|
||||
{"name": "Betroffenenanfragen-Prozess", "description": "Auskunft, Loeschung, Berichtigung, Widerspruch", "type": "organizational"},
|
||||
{"name": "Technische Loeschfaehigkeit", "description": "Loeschung mit Nachweis (Loeschprotokoll)", "type": "technical"},
|
||||
{"name": "Datenportabilitaet", "description": "Export in maschinenlesbarem Format (Art. 20)", "type": "technical"},
|
||||
{"name": "Sperrfunktion", "description": "Einschraenkung der Verarbeitung moeglich", "type": "technical"},
|
||||
],
|
||||
},
|
||||
"datenminimierung": {
|
||||
"label": "Datenminimierung",
|
||||
"sdm_baustein": "SDM-B41 (Planen und Spezifizieren)",
|
||||
"measures": [
|
||||
{"name": "Erforderlichkeitspruefung", "description": "Regelmaessige Pruefung der erhobenen Datenfelder", "type": "organizational"},
|
||||
{"name": "Automatisierte Loeschung", "description": "Fristgerechte Loeschung nach Aufbewahrungsfrist", "type": "technical"},
|
||||
{"name": "Anonymisierung", "description": "Anonymisierung/Aggregation fuer Statistik", "type": "technical"},
|
||||
{"name": "Privacy by Design", "description": "Datenschutz ab Entwurfsphase neuer Verarbeitungen", "type": "organizational"},
|
||||
],
|
||||
},
|
||||
}
|
||||
|
||||
# -- Sector-specific extensions ----------------------------------------------
|
||||
|
||||
SECTOR_TOMS = {
|
||||
"it_saas": {
|
||||
"label": "IT / SaaS",
|
||||
"measures": [
|
||||
{"name": "Container-Isolation", "description": "Workload-Isolation zwischen Mandanten (Kubernetes Namespaces)", "type": "technical", "sdm_goal": "nichtverkettung"},
|
||||
{"name": "API-Security", "description": "Rate Limiting, OAuth 2.0, API-Key-Rotation", "type": "technical", "sdm_goal": "vertraulichkeit"},
|
||||
{"name": "DevSecOps Pipeline", "description": "SAST/DAST in CI/CD, Dependency Scanning", "type": "technical", "sdm_goal": "integritaet"},
|
||||
{"name": "Secrets Management", "description": "Vault/KMS fuer Credentials, keine Hardcoded Secrets", "type": "technical", "sdm_goal": "vertraulichkeit"},
|
||||
],
|
||||
},
|
||||
"gesundheitswesen": {
|
||||
"label": "Gesundheitswesen",
|
||||
"measures": [
|
||||
{"name": "Patientenakten-Verschluesselung", "description": "Ende-zu-Ende-Verschluesselung fuer Gesundheitsdaten (Art. 9)", "type": "technical", "sdm_goal": "vertraulichkeit"},
|
||||
{"name": "Notfallzugriff", "description": "Break-the-Glass-Verfahren fuer medizinische Notfaelle", "type": "organizational", "sdm_goal": "verfuegbarkeit"},
|
||||
{"name": "Forschungsdaten-Anonymisierung", "description": "Vollstaendige Anonymisierung vor Forschungsnutzung", "type": "technical", "sdm_goal": "datenminimierung"},
|
||||
],
|
||||
},
|
||||
"finanzdienstleistungen": {
|
||||
"label": "Finanzdienstleistungen",
|
||||
"measures": [
|
||||
{"name": "Transaktions-Monitoring", "description": "Echtzeit-Ueberwachung auf Unregelmaessigkeiten (GwG)", "type": "technical", "sdm_goal": "integritaet"},
|
||||
{"name": "Aufbewahrungspflichten", "description": "10 Jahre Aufbewahrung gemaess AO/HGB, danach Loeschung", "type": "organizational", "sdm_goal": "datenminimierung"},
|
||||
{"name": "PCI-DSS Compliance", "description": "Payment Card Industry Standards fuer Kartendaten", "type": "technical", "sdm_goal": "vertraulichkeit"},
|
||||
],
|
||||
},
|
||||
"handel": {
|
||||
"label": "Handel / E-Commerce",
|
||||
"measures": [
|
||||
{"name": "Cookie-Consent-Management", "description": "TDDDG-konformes Einwilligungsmanagement", "type": "technical", "sdm_goal": "transparenz"},
|
||||
{"name": "Gastzugang-Option", "description": "Bestellung ohne Pflicht-Kundenkonto (Datenminimierung)", "type": "organizational", "sdm_goal": "datenminimierung"},
|
||||
{"name": "Zahlungsdaten-Tokenisierung", "description": "Keine direkte Speicherung von Zahlungsdaten", "type": "technical", "sdm_goal": "vertraulichkeit"},
|
||||
],
|
||||
},
|
||||
"handwerk": {
|
||||
"label": "Handwerk",
|
||||
"measures": [
|
||||
{"name": "Mobile-Device-Management", "description": "Absicherung mobiler Endgeraete auf Baustellen", "type": "technical", "sdm_goal": "vertraulichkeit"},
|
||||
{"name": "Papierakten-Sicherung", "description": "Verschlossene Schraenke fuer physische Kundenakten", "type": "technical", "sdm_goal": "vertraulichkeit"},
|
||||
],
|
||||
},
|
||||
}
|
||||
|
||||
# -- NIS2 / ISO 27001 / AI Act extensions -----------------------------------
|
||||
|
||||
NIS2_TOMS = [
|
||||
{"name": "Incident-Response-Plan", "description": "NIS2-konformer Vorfallreaktionsplan (72h Meldepflicht an BSI)", "type": "organizational", "sdm_goal": "verfuegbarkeit"},
|
||||
{"name": "Supply-Chain-Security", "description": "Bewertung der Lieferkettensicherheit (BSIG 2025)", "type": "organizational", "sdm_goal": "integritaet"},
|
||||
{"name": "Vulnerability Management", "description": "Regelmaessige Schwachstellenscans, Patch-Management", "type": "technical", "sdm_goal": "integritaet"},
|
||||
{"name": "Security Awareness", "description": "Pflicht-Schulungen Cybersicherheit fuer Geschaeftsleitung", "type": "organizational", "sdm_goal": "vertraulichkeit"},
|
||||
]
|
||||
|
||||
ISO27001_TOMS = [
|
||||
{"name": "ISMS Risikomanagement", "description": "ISO 27001 Anhang A — Informationssicherheits-Risikobewertung", "type": "organizational", "sdm_goal": "verfuegbarkeit"},
|
||||
{"name": "Dokumentenlenkung", "description": "Versionierte Sicherheitsrichtlinien und -verfahren", "type": "organizational", "sdm_goal": "transparenz"},
|
||||
{"name": "Management Review", "description": "Jaehrliche Ueberprufung des ISMS durch Geschaeftsleitung", "type": "organizational", "sdm_goal": "transparenz"},
|
||||
]
|
||||
|
||||
AI_ACT_TOMS = [
|
||||
{"name": "KI-Risikoklassifizierung", "description": "Bewertung aller KI-Systeme nach EU AI Act Risikokategorien", "type": "organizational", "sdm_goal": "transparenz"},
|
||||
{"name": "Human Oversight", "description": "Menschliche Aufsicht fuer Hochrisiko-KI-Systeme (Art. 14 KI-VO)", "type": "organizational", "sdm_goal": "intervenierbarkeit"},
|
||||
{"name": "KI-Transparenz", "description": "Transparenzpflichten bei KI-Einsatz gegenueber Betroffenen (Art. 13 KI-VO)", "type": "organizational", "sdm_goal": "transparenz"},
|
||||
{"name": "KI-Bias-Monitoring", "description": "Ueberwachung auf diskriminierende Ergebnisse", "type": "technical", "sdm_goal": "integritaet"},
|
||||
]
|
||||
|
||||
|
||||
def generate_tom_drafts(ctx: dict) -> list[dict]:
|
||||
"""Generate TOM measure drafts structured by SDM Gewaehrleistungsziele.
|
||||
|
||||
Args:
|
||||
ctx: Flat dict from company-profile/template-context.
|
||||
|
||||
Returns:
|
||||
List of TOM measure dicts with SDM goal assignment.
|
||||
"""
|
||||
measures = []
|
||||
control_counter = 0
|
||||
|
||||
# Base SDM measures
|
||||
for goal_key, goal_data in SDM_TOM_CATALOG.items():
|
||||
for m in goal_data["measures"]:
|
||||
control_counter += 1
|
||||
measures.append(_build_measure(
|
||||
counter=control_counter,
|
||||
measure=m,
|
||||
sdm_goal=goal_key,
|
||||
sdm_baustein=goal_data["sdm_baustein"],
|
||||
category=goal_data["label"],
|
||||
ctx=ctx,
|
||||
))
|
||||
|
||||
# Regulatory extensions
|
||||
if ctx.get("subject_to_nis2"):
|
||||
for m in NIS2_TOMS:
|
||||
control_counter += 1
|
||||
measures.append(_build_measure(
|
||||
counter=control_counter,
|
||||
measure=m,
|
||||
sdm_goal=m["sdm_goal"],
|
||||
sdm_baustein="NIS2 / BSIG 2025",
|
||||
category="Cybersicherheit (NIS2)",
|
||||
ctx=ctx,
|
||||
))
|
||||
|
||||
if ctx.get("subject_to_iso27001"):
|
||||
for m in ISO27001_TOMS:
|
||||
control_counter += 1
|
||||
measures.append(_build_measure(
|
||||
counter=control_counter,
|
||||
measure=m,
|
||||
sdm_goal=m["sdm_goal"],
|
||||
sdm_baustein="ISO 27001 Anhang A",
|
||||
category="ISMS (ISO 27001)",
|
||||
ctx=ctx,
|
||||
))
|
||||
|
||||
if ctx.get("subject_to_ai_act") or ctx.get("has_ai_systems"):
|
||||
for m in AI_ACT_TOMS:
|
||||
control_counter += 1
|
||||
measures.append(_build_measure(
|
||||
counter=control_counter,
|
||||
measure=m,
|
||||
sdm_goal=m["sdm_goal"],
|
||||
sdm_baustein="EU AI Act (2024/1689)",
|
||||
category="KI-Compliance",
|
||||
ctx=ctx,
|
||||
))
|
||||
|
||||
# Sector-specific extensions
|
||||
sector = _detect_sector(ctx)
|
||||
if sector and sector in SECTOR_TOMS:
|
||||
sector_data = SECTOR_TOMS[sector]
|
||||
for m in sector_data["measures"]:
|
||||
control_counter += 1
|
||||
measures.append(_build_measure(
|
||||
counter=control_counter,
|
||||
measure=m,
|
||||
sdm_goal=m.get("sdm_goal", "vertraulichkeit"),
|
||||
sdm_baustein=f"Sektor: {sector_data['label']}",
|
||||
category=f"Sektor ({sector_data['label']})",
|
||||
ctx=ctx,
|
||||
))
|
||||
|
||||
return measures
|
||||
|
||||
|
||||
def sdm_coverage_summary(measures: list[dict]) -> dict:
|
||||
"""Return coverage matrix: SDM goal -> measure count."""
|
||||
summary = {}
|
||||
for goal_key in SDM_TOM_CATALOG:
|
||||
count = sum(1 for m in measures if m.get("sdm_goal") == goal_key)
|
||||
summary[goal_key] = {
|
||||
"label": SDM_TOM_CATALOG[goal_key]["label"],
|
||||
"count": count,
|
||||
}
|
||||
return summary
|
||||
|
||||
|
||||
# -- Internal helpers --------------------------------------------------------
|
||||
|
||||
def _build_measure(counter: int, measure: dict, sdm_goal: str,
|
||||
sdm_baustein: str, category: str, ctx: dict) -> dict:
|
||||
return {
|
||||
"control_id": f"TOM-SDM-{counter:03d}",
|
||||
"name": measure["name"],
|
||||
"description": measure["description"],
|
||||
"category": category,
|
||||
"type": measure.get("type", "organizational"),
|
||||
"sdm_goal": sdm_goal,
|
||||
"sdm_baustein_ref": sdm_baustein,
|
||||
"implementation_status": "not_implemented",
|
||||
"effectiveness_rating": "not_assessed",
|
||||
"responsible_department": "IT-Sicherheit",
|
||||
"priority": _assess_priority(measure, ctx),
|
||||
"review_frequency": f"{ctx.get('review_cycle_months', 12)} Monate",
|
||||
}
|
||||
|
||||
|
||||
def _assess_priority(measure: dict, ctx: dict) -> str:
|
||||
name_lower = measure.get("name", "").lower()
|
||||
if any(kw in name_lower for kw in ["verschluesselung", "mfa", "incident", "ki-risiko"]):
|
||||
return "high"
|
||||
if any(kw in name_lower for kw in ["backup", "zugriff", "logging", "loeschung"]):
|
||||
return "high"
|
||||
return "medium"
|
||||
|
||||
|
||||
def _detect_sector(ctx: dict) -> str | None:
|
||||
"""Map company industry to sector key."""
|
||||
industry = (ctx.get("industry") or "").lower()
|
||||
mapping = {
|
||||
"technologie": "it_saas", "it": "it_saas", "saas": "it_saas", "software": "it_saas",
|
||||
"gesundheit": "gesundheitswesen", "pharma": "gesundheitswesen", "medizin": "gesundheitswesen",
|
||||
"finanz": "finanzdienstleistungen", "bank": "finanzdienstleistungen", "versicherung": "finanzdienstleistungen",
|
||||
"handel": "handel", "e-commerce": "handel", "einzelhandel": "handel", "shop": "handel",
|
||||
"handwerk": "handwerk", "bau": "handwerk", "kfz": "handwerk",
|
||||
}
|
||||
for keyword, sector in mapping.items():
|
||||
if keyword in industry:
|
||||
return sector
|
||||
return None
|
||||
393
document-templates/generators/vvt_template.py
Normal file
393
document-templates/generators/vvt_template.py
Normal file
@@ -0,0 +1,393 @@
|
||||
"""VVT template generator V2 — sector-specific VVT activity drafts.
|
||||
|
||||
Generates Art. 30 DS-GVO compliant VVT entries with sector-specific
|
||||
standard processing activities inspired by BayLDA patterns.
|
||||
"""
|
||||
|
||||
from typing import Optional
|
||||
|
||||
# -- Sector activity catalogs ------------------------------------------------
|
||||
|
||||
SECTOR_ACTIVITIES = {
|
||||
"it_saas": [
|
||||
{
|
||||
"name": "SaaS-Plattformbetrieb",
|
||||
"purposes": ["Bereitstellung und Betrieb der SaaS-Plattform"],
|
||||
"legal_bases": ["Art. 6 Abs. 1 lit. b DS-GVO (Vertragserfullung)"],
|
||||
"data_subject_categories": ["Kunden", "Endnutzer"],
|
||||
"personal_data_categories": ["Stammdaten", "Nutzungsdaten", "Inhaltsdaten", "Logdaten"],
|
||||
"recipient_categories": ["Hosting-Anbieter (AVV)", "Support-Dienstleister (AVV)"],
|
||||
"retention_period": "90 Tage nach Vertragsende + gesetzl. Aufbewahrung",
|
||||
"tom_description": "Mandantentrennung, Verschluesselung, RBAC",
|
||||
"dpia_required": True,
|
||||
},
|
||||
{
|
||||
"name": "Kundenverwaltung / CRM",
|
||||
"purposes": ["Verwaltung von Kundenbeziehungen, Vertragsmanagement"],
|
||||
"legal_bases": ["Art. 6 Abs. 1 lit. b DS-GVO"],
|
||||
"data_subject_categories": ["Kunden", "Ansprechpartner", "Interessenten"],
|
||||
"personal_data_categories": ["Kontaktdaten", "Vertragsdaten", "Kommunikationshistorie"],
|
||||
"recipient_categories": ["CRM-Anbieter (AVV)"],
|
||||
"retention_period": "3 Jahre nach letztem Kontakt, 10 Jahre Rechnungsdaten",
|
||||
"tom_description": "Zugriffsbeschraenkung Vertrieb/Support, Protokollierung",
|
||||
},
|
||||
{
|
||||
"name": "E-Mail-Marketing / Newsletter",
|
||||
"purposes": ["Versand von Produkt-Updates und Marketing-Newsletter"],
|
||||
"legal_bases": ["Art. 6 Abs. 1 lit. a DS-GVO (Einwilligung)", "UWG §7"],
|
||||
"data_subject_categories": ["Newsletter-Abonnenten"],
|
||||
"personal_data_categories": ["E-Mail-Adresse", "Name", "Oeffnungs-/Klickverhalten"],
|
||||
"recipient_categories": ["E-Mail-Dienstleister (AVV)"],
|
||||
"retention_period": "Unverzueglich nach Widerruf",
|
||||
"tom_description": "Double-Opt-In, einfache Abmeldefunktion",
|
||||
},
|
||||
{
|
||||
"name": "Webanalyse",
|
||||
"purposes": ["Analyse der Website-Nutzung zur Verbesserung"],
|
||||
"legal_bases": ["Art. 6 Abs. 1 lit. a DS-GVO (Einwilligung via Cookie-Banner)"],
|
||||
"data_subject_categories": ["Website-Besucher"],
|
||||
"personal_data_categories": ["IP-Adresse (anonymisiert)", "Seitenaufrufe", "Geraeteinformationen"],
|
||||
"recipient_categories": ["Analyse-Anbieter (AVV)"],
|
||||
"retention_period": "14 Monate",
|
||||
"tom_description": "IP-Anonymisierung, Cookie-Consent (TDDDG §25)",
|
||||
},
|
||||
{
|
||||
"name": "Bewerbermanagement",
|
||||
"purposes": ["Bearbeitung von Bewerbungen"],
|
||||
"legal_bases": ["Art. 6 Abs. 1 lit. b DS-GVO i.V.m. §26 BDSG"],
|
||||
"data_subject_categories": ["Bewerber"],
|
||||
"personal_data_categories": ["Kontaktdaten", "Lebenslauf", "Qualifikationen"],
|
||||
"recipient_categories": ["Fachabteilung"],
|
||||
"retention_period": "6 Monate nach Verfahrensabschluss (AGG)",
|
||||
"tom_description": "Zugriffsschutz Bewerbungsportal, verschluesselte Uebertragung",
|
||||
},
|
||||
{
|
||||
"name": "Mitarbeiterverwaltung / HR",
|
||||
"purposes": ["Personalverwaltung, Lohnabrechnung, Arbeitszeiterfassung"],
|
||||
"legal_bases": ["Art. 6 Abs. 1 lit. b/c DS-GVO i.V.m. §26 BDSG"],
|
||||
"data_subject_categories": ["Beschaeftigte"],
|
||||
"personal_data_categories": ["Stammdaten", "Vertragsdaten", "Bankverbindung", "Arbeitszeiten"],
|
||||
"recipient_categories": ["Lohnbuero (AVV)", "Finanzamt", "Sozialversicherungstraeger"],
|
||||
"retention_period": "10 Jahre nach Austritt",
|
||||
"tom_description": "Besonderer Zugriffsschutz (nur HR), verschluesselte Speicherung",
|
||||
},
|
||||
{
|
||||
"name": "Support-Ticketing",
|
||||
"purposes": ["Bearbeitung von Kundenanfragen und Stoerungsmeldungen"],
|
||||
"legal_bases": ["Art. 6 Abs. 1 lit. b DS-GVO"],
|
||||
"data_subject_categories": ["Kunden", "Endnutzer"],
|
||||
"personal_data_categories": ["Kontaktdaten", "Ticket-Inhalt", "Systemlogs"],
|
||||
"recipient_categories": ["Support-Tool-Anbieter (AVV)"],
|
||||
"retention_period": "2 Jahre nach Ticket-Schliessung",
|
||||
"tom_description": "Rollenbasierter Zugriff, Pseudonymisierung in Reports",
|
||||
},
|
||||
{
|
||||
"name": "Logging und Monitoring",
|
||||
"purposes": ["Sicherheitsueberwachung, Fehleranalyse"],
|
||||
"legal_bases": ["Art. 6 Abs. 1 lit. f DS-GVO (berechtigtes Interesse: IT-Sicherheit)"],
|
||||
"data_subject_categories": ["Plattform-Nutzer", "Administratoren"],
|
||||
"personal_data_categories": ["IP-Adressen", "Zugriffszeitpunkte", "Fehlerprotokolle"],
|
||||
"recipient_categories": ["Log-Management-Anbieter (AVV)"],
|
||||
"retention_period": "30 Tage Anwendungslogs, 90 Tage Sicherheitslogs",
|
||||
"tom_description": "Zugriffsschutz Logdaten, automatische Rotation",
|
||||
},
|
||||
],
|
||||
"gesundheitswesen": [
|
||||
{
|
||||
"name": "Patientenverwaltung",
|
||||
"purposes": ["Patientenakte, Behandlungsdokumentation"],
|
||||
"legal_bases": ["Art. 9 Abs. 2 lit. h DS-GVO i.V.m. §630f BGB"],
|
||||
"data_subject_categories": ["Patienten"],
|
||||
"personal_data_categories": ["Stammdaten", "Versicherung", "Diagnosen", "Befunde (Art. 9)"],
|
||||
"recipient_categories": ["PVS-Anbieter (AVV)", "Labor (AVV)", "ueberweisende Aerzte"],
|
||||
"retention_period": "10 Jahre nach letzter Behandlung (§630f BGB)",
|
||||
"tom_description": "Verschluesselung Patientenakte, Notfallzugriff",
|
||||
"dpia_required": True,
|
||||
},
|
||||
{
|
||||
"name": "Abrechnung (KV/PKV)",
|
||||
"purposes": ["Abrechnung aerztlicher Leistungen"],
|
||||
"legal_bases": ["Art. 6 Abs. 1 lit. c DS-GVO", "Art. 9 Abs. 2 lit. h"],
|
||||
"data_subject_categories": ["Patienten"],
|
||||
"personal_data_categories": ["Stammdaten", "Versicherung", "Diagnosen (ICD)", "Leistungsziffern"],
|
||||
"recipient_categories": ["KV", "PKV", "Abrechnungsstelle (AVV)"],
|
||||
"retention_period": "10 Jahre (AO)",
|
||||
"tom_description": "Verschluesselte Uebermittlung (KV-Connect/KIM)",
|
||||
},
|
||||
],
|
||||
"handel": [
|
||||
{
|
||||
"name": "Bestellabwicklung",
|
||||
"purposes": ["Bestellannahme, Versand, Rechnungsstellung"],
|
||||
"legal_bases": ["Art. 6 Abs. 1 lit. b DS-GVO"],
|
||||
"data_subject_categories": ["Kunden (Besteller)"],
|
||||
"personal_data_categories": ["Kontaktdaten", "Lieferadresse", "Bestelldaten", "Rechnungsdaten"],
|
||||
"recipient_categories": ["Versanddienstleister", "Zahlungsanbieter (AVV)"],
|
||||
"retention_period": "10 Jahre Rechnungen, 3 Jahre Bestelldaten",
|
||||
"tom_description": "Verschluesselte Uebertragung, Zugriffsschutz",
|
||||
},
|
||||
{
|
||||
"name": "Kundenkonto",
|
||||
"purposes": ["Bereitstellung Kundenkonto (optional)"],
|
||||
"legal_bases": ["Art. 6 Abs. 1 lit. a/b DS-GVO"],
|
||||
"data_subject_categories": ["Registrierte Kunden"],
|
||||
"personal_data_categories": ["Stammdaten", "Passwort (gehasht)", "Bestellhistorie"],
|
||||
"recipient_categories": ["Shop-Plattform (AVV)"],
|
||||
"retention_period": "Sofort nach Kontoloesch-Anfrage, Rechnungen 10 Jahre",
|
||||
"tom_description": "MFA-Option, bcrypt Passwortspeicherung, Gastzugang-Alternative",
|
||||
},
|
||||
{
|
||||
"name": "Zahlungsabwicklung",
|
||||
"purposes": ["Abwicklung von Zahlungsvorgaengen"],
|
||||
"legal_bases": ["Art. 6 Abs. 1 lit. b DS-GVO"],
|
||||
"data_subject_categories": ["Zahlende Kunden"],
|
||||
"personal_data_categories": ["Zahlungsart", "Transaktionsdaten"],
|
||||
"recipient_categories": ["Payment-Service-Provider"],
|
||||
"retention_period": "10 Jahre (AO)",
|
||||
"tom_description": "PCI-DSS, Tokenisierung, keine direkte Kartenspeicherung",
|
||||
},
|
||||
],
|
||||
"handwerk": [
|
||||
{
|
||||
"name": "Kundenauftraege und Angebotserstellung",
|
||||
"purposes": ["Angebotserstellung, Auftragsabwicklung, Rechnungsstellung"],
|
||||
"legal_bases": ["Art. 6 Abs. 1 lit. b DS-GVO"],
|
||||
"data_subject_categories": ["Kunden (Privat/Gewerbe)"],
|
||||
"personal_data_categories": ["Kontaktdaten", "Objektadresse", "Auftrag", "Rechnungsdaten"],
|
||||
"recipient_categories": ["Steuerberater", "ggf. Subunternehmer"],
|
||||
"retention_period": "10 Jahre Rechnungen, 5 Jahre Gewaehrleistung",
|
||||
"tom_description": "Zugriffskontrolle Auftragssystem",
|
||||
},
|
||||
{
|
||||
"name": "Baustellendokumentation",
|
||||
"purposes": ["Dokumentation Baufortschritt, Maengelprotokoll"],
|
||||
"legal_bases": ["Art. 6 Abs. 1 lit. b/f DS-GVO"],
|
||||
"data_subject_categories": ["Kunden", "Mitarbeitende"],
|
||||
"personal_data_categories": ["Fotos", "Protokolle", "Abnahmedokumente"],
|
||||
"recipient_categories": ["Auftraggeber", "Architekten"],
|
||||
"retention_period": "5 Jahre nach Abnahme",
|
||||
"tom_description": "Projektordner mit Zugriffsbeschraenkung",
|
||||
},
|
||||
],
|
||||
"bildung": [
|
||||
{
|
||||
"name": "Schueler-/Studierendenverwaltung",
|
||||
"purposes": ["Verwaltung von Schueler-/Studierendendaten"],
|
||||
"legal_bases": ["Art. 6 Abs. 1 lit. c/e DS-GVO i.V.m. Schulgesetz"],
|
||||
"data_subject_categories": ["Schueler/Studierende (ggf. Minderjaehrige)", "Erziehungsberechtigte"],
|
||||
"personal_data_categories": ["Stammdaten", "Kontaktdaten Erziehungsberechtigte"],
|
||||
"recipient_categories": ["Schulverwaltungssoftware (AVV)", "Schulbehoerde"],
|
||||
"retention_period": "Gemaess Schulgesetz (i.d.R. 5 Jahre nach Abgang)",
|
||||
"tom_description": "Besonderer Zugriffsschutz, Einwilligung Erziehungsberechtigte",
|
||||
"dpia_required": True,
|
||||
},
|
||||
{
|
||||
"name": "Notenverarbeitung",
|
||||
"purposes": ["Leistungsbewertung, Zeugniserstellung"],
|
||||
"legal_bases": ["Art. 6 Abs. 1 lit. c/e DS-GVO i.V.m. Schulgesetz"],
|
||||
"data_subject_categories": ["Schueler/Studierende"],
|
||||
"personal_data_categories": ["Noten", "Leistungsbewertungen", "Pruefungsergebnisse"],
|
||||
"recipient_categories": ["Lehrkraefte", "Schulleitung"],
|
||||
"retention_period": "Zeugniskopien 50 Jahre, Einzelnoten 2 Jahre",
|
||||
"tom_description": "Zugriffsbeschraenkung auf Fachlehrkraft, verschluesselt",
|
||||
},
|
||||
],
|
||||
"beratung": [
|
||||
{
|
||||
"name": "Mandantenverwaltung",
|
||||
"purposes": ["Verwaltung von Mandantenbeziehungen"],
|
||||
"legal_bases": ["Art. 6 Abs. 1 lit. b DS-GVO"],
|
||||
"data_subject_categories": ["Mandanten", "Ansprechpartner"],
|
||||
"personal_data_categories": ["Kontaktdaten", "Vertragsdaten", "Korrespondenz"],
|
||||
"recipient_categories": ["Kanzleisoftware (AVV)", "Steuerberater"],
|
||||
"retention_period": "10 Jahre Rechnungen, 5 Jahre Handakten",
|
||||
"tom_description": "Mandantengeheimnis, Need-to-know-Prinzip",
|
||||
},
|
||||
{
|
||||
"name": "Projektmanagement",
|
||||
"purposes": ["Planung und Steuerung von Beratungsprojekten"],
|
||||
"legal_bases": ["Art. 6 Abs. 1 lit. b/f DS-GVO"],
|
||||
"data_subject_categories": ["Projektbeteiligte"],
|
||||
"personal_data_categories": ["Projektdaten", "Aufgaben", "Zeiterfassung"],
|
||||
"recipient_categories": ["PM-Tool (AVV)", "Mandant"],
|
||||
"retention_period": "2 Jahre nach Projektabschluss",
|
||||
"tom_description": "Projektspezifische Zugriffsrechte, Mandantentrennung",
|
||||
},
|
||||
{
|
||||
"name": "Zeiterfassung und Abrechnung",
|
||||
"purposes": ["Stundenerfassung, Abrechnung gegenueber Mandanten"],
|
||||
"legal_bases": ["Art. 6 Abs. 1 lit. b DS-GVO"],
|
||||
"data_subject_categories": ["Berater/Mitarbeitende", "Mandanten"],
|
||||
"personal_data_categories": ["Arbeitszeiten", "Taetigkeitsbeschreibungen", "Stundensaetze"],
|
||||
"recipient_categories": ["Abrechnungssystem (AVV)", "Buchhaltung"],
|
||||
"retention_period": "10 Jahre (AO)",
|
||||
"tom_description": "Zugriff nur eigene Zeiten + Projektleitung",
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
# Industry -> Sector mapping
|
||||
INDUSTRY_SECTOR_MAP = {
|
||||
"technologie": "it_saas", "it": "it_saas", "saas": "it_saas", "software": "it_saas",
|
||||
"it dienstleistungen": "it_saas",
|
||||
"gesundheit": "gesundheitswesen", "pharma": "gesundheitswesen",
|
||||
"e-commerce": "handel", "handel": "handel", "einzelhandel": "handel",
|
||||
"handwerk": "handwerk", "bau": "handwerk", "kfz": "handwerk",
|
||||
"bildung": "bildung", "schule": "bildung", "hochschule": "bildung",
|
||||
"beratung": "beratung", "consulting": "beratung", "kanzlei": "beratung",
|
||||
"recht": "beratung",
|
||||
}
|
||||
|
||||
|
||||
def generate_vvt_drafts(ctx: dict) -> list[dict]:
|
||||
"""Generate VVT activity drafts, sector-specific if possible.
|
||||
|
||||
Args:
|
||||
ctx: Flat dict from company-profile/template-context.
|
||||
|
||||
Returns:
|
||||
List of VVT activity dicts ready for creation.
|
||||
"""
|
||||
company = ctx.get("company_name", "Unbekannt")
|
||||
dpo = ctx.get("dpo_name", "")
|
||||
sector = _detect_sector(ctx)
|
||||
|
||||
# Use sector-specific activities if available, else generate from systems
|
||||
if sector and sector in SECTOR_ACTIVITIES:
|
||||
activities = _generate_sector_vvt(ctx, sector, company, dpo)
|
||||
else:
|
||||
activities = _generate_system_vvt(ctx, company, dpo)
|
||||
|
||||
# Always add standard HR activity if not already present
|
||||
has_hr = any("mitarbeiter" in a.get("name", "").lower() or "hr" in a.get("name", "").lower()
|
||||
for a in activities)
|
||||
if not has_hr and len(activities) > 0:
|
||||
activities.append(_build_hr_activity(len(activities) + 1, company, dpo))
|
||||
|
||||
return activities
|
||||
|
||||
|
||||
def _detect_sector(ctx: dict) -> Optional[str]:
|
||||
industry = (ctx.get("industry") or "").lower().strip()
|
||||
for keyword, sector in INDUSTRY_SECTOR_MAP.items():
|
||||
if keyword in industry:
|
||||
return sector
|
||||
return None
|
||||
|
||||
|
||||
def _generate_sector_vvt(ctx: dict, sector: str, company: str, dpo: str) -> list[dict]:
|
||||
activities = []
|
||||
sector_data = SECTOR_ACTIVITIES[sector]
|
||||
|
||||
for i, template in enumerate(sector_data, 1):
|
||||
activity = {
|
||||
"vvt_id": f"VVT-{sector.upper()[:3]}-{i:03d}",
|
||||
"name": template["name"],
|
||||
"description": f"Automatisch generierter VVT-Eintrag: {template['name']}",
|
||||
"purposes": template["purposes"],
|
||||
"legal_bases": template["legal_bases"],
|
||||
"data_subject_categories": template["data_subject_categories"],
|
||||
"personal_data_categories": template["personal_data_categories"],
|
||||
"recipient_categories": template["recipient_categories"],
|
||||
"third_country_transfers": _assess_third_country(ctx),
|
||||
"retention_period": {"default": template["retention_period"]},
|
||||
"tom_description": template["tom_description"],
|
||||
"business_function": _infer_business_function(template["name"]),
|
||||
"systems": [],
|
||||
"protection_level": "HIGH" if template.get("dpia_required") else "MEDIUM",
|
||||
"dpia_required": template.get("dpia_required", False),
|
||||
"status": "DRAFT",
|
||||
"responsible": dpo or company,
|
||||
"source_sector": sector,
|
||||
}
|
||||
activities.append(activity)
|
||||
|
||||
return activities
|
||||
|
||||
|
||||
def _generate_system_vvt(ctx: dict, company: str, dpo: str) -> list[dict]:
|
||||
"""Fallback: generate VVT per processing system (original approach)."""
|
||||
systems = ctx.get("processing_systems", [])
|
||||
activities = []
|
||||
|
||||
for i, system in enumerate(systems, 1):
|
||||
name = system.get("name", f"System {i}")
|
||||
vendor = system.get("vendor", "")
|
||||
hosting = system.get("hosting", "on-premise")
|
||||
categories = system.get("personal_data_categories", [])
|
||||
|
||||
activity = {
|
||||
"vvt_id": f"VVT-SYS-{i:03d}",
|
||||
"name": f"Verarbeitung in {name}",
|
||||
"description": f"VVT-Eintrag fuer System '{name}'"
|
||||
+ (f" (Anbieter: {vendor})" if vendor else ""),
|
||||
"purposes": [f"Datenverarbeitung via {name}"],
|
||||
"legal_bases": ["Art. 6 Abs. 1 lit. b DS-GVO (Vertragserfullung)"],
|
||||
"data_subject_categories": [],
|
||||
"personal_data_categories": categories,
|
||||
"recipient_categories": [vendor] if vendor else [],
|
||||
"third_country_transfers": _assess_third_country_hosting(hosting),
|
||||
"retention_period": {"default": "Gemaess Loeschfristenkatalog"},
|
||||
"tom_description": f"Siehe TOM-Katalog fuer {name}",
|
||||
"business_function": "IT",
|
||||
"systems": [name],
|
||||
"deployment_model": hosting,
|
||||
"protection_level": "HIGH" if len(categories) > 3 else "MEDIUM",
|
||||
"dpia_required": len(categories) > 3,
|
||||
"status": "DRAFT",
|
||||
"responsible": dpo or company,
|
||||
}
|
||||
activities.append(activity)
|
||||
|
||||
return activities
|
||||
|
||||
|
||||
def _build_hr_activity(index: int, company: str, dpo: str) -> dict:
|
||||
return {
|
||||
"vvt_id": f"VVT-STD-{index:03d}",
|
||||
"name": "Mitarbeiterverwaltung / HR",
|
||||
"description": "Standard-Verarbeitungstaetigkeit Personalverwaltung",
|
||||
"purposes": ["Personalverwaltung, Lohnabrechnung, Arbeitszeiterfassung"],
|
||||
"legal_bases": ["Art. 6 Abs. 1 lit. b/c DS-GVO i.V.m. §26 BDSG"],
|
||||
"data_subject_categories": ["Beschaeftigte"],
|
||||
"personal_data_categories": ["Stammdaten", "Vertragsdaten", "Bankverbindung", "Arbeitszeiten"],
|
||||
"recipient_categories": ["Lohnbuero (AVV)", "Finanzamt", "Sozialversicherungstraeger"],
|
||||
"third_country_transfers": [],
|
||||
"retention_period": {"default": "10 Jahre nach Austritt"},
|
||||
"tom_description": "Besonderer Zugriffsschutz (nur HR), verschluesselte Speicherung",
|
||||
"business_function": "HR",
|
||||
"systems": [],
|
||||
"protection_level": "HIGH",
|
||||
"dpia_required": False,
|
||||
"status": "DRAFT",
|
||||
"responsible": dpo or company,
|
||||
}
|
||||
|
||||
|
||||
def _assess_third_country(ctx: dict) -> list:
|
||||
if ctx.get("third_country_transfer"):
|
||||
return [{"country": "Abhaengig von Dienstleister", "mechanism": "Pruefung erforderlich"}]
|
||||
return []
|
||||
|
||||
|
||||
def _assess_third_country_hosting(hosting: str) -> list:
|
||||
if hosting in ("us-cloud", "international"):
|
||||
return [{"country": "USA", "mechanism": "EU-US Data Privacy Framework"}]
|
||||
return []
|
||||
|
||||
|
||||
def _infer_business_function(name: str) -> str:
|
||||
name_lower = name.lower()
|
||||
if any(kw in name_lower for kw in ["mitarbeiter", "hr", "personal", "bewerbung"]):
|
||||
return "HR"
|
||||
if any(kw in name_lower for kw in ["abrechnung", "rechnung", "zahlung", "buchhaltung"]):
|
||||
return "Finanzen"
|
||||
if any(kw in name_lower for kw in ["marketing", "newsletter", "webanalyse", "crm", "akquise"]):
|
||||
return "Marketing/Vertrieb"
|
||||
if any(kw in name_lower for kw in ["support", "ticket", "kundenservice"]):
|
||||
return "Support"
|
||||
if any(kw in name_lower for kw in ["patient", "befund", "labor", "termin"]):
|
||||
return "Medizin"
|
||||
if any(kw in name_lower for kw in ["schueler", "noten", "lernplattform"]):
|
||||
return "Paedagogik"
|
||||
return "IT"
|
||||
405
document-templates/migrations/001_dsfa_template_v2.sql
Normal file
405
document-templates/migrations/001_dsfa_template_v2.sql
Normal file
@@ -0,0 +1,405 @@
|
||||
-- Migration 001: DSFA Template V2 — Datenschutz-Folgenabschaetzung
|
||||
-- Archiviert V1 (aus Migration 025) und fuegt erweiterte V2 ein.
|
||||
-- Zielrepo: breakpilot-compliance (spaetere Integration)
|
||||
|
||||
-- 1. Bestehende V1 archivieren
|
||||
UPDATE compliance.compliance_legal_templates
|
||||
SET status = 'archived', updated_at = NOW()
|
||||
WHERE document_type = 'dsfa'
|
||||
AND status = 'published';
|
||||
|
||||
-- 2. DSFA V2 einfuegen
|
||||
INSERT INTO compliance.compliance_legal_templates (
|
||||
tenant_id, document_type, title, description, language, jurisdiction,
|
||||
version, status, license_name, source_name, attribution_required,
|
||||
is_complete_document, placeholders, content
|
||||
) VALUES (
|
||||
'9282a473-5c95-4b3a-bf78-0ecc0ec71d3e'::uuid,
|
||||
'dsfa',
|
||||
'Datenschutz-Folgenabschaetzung (DSFA) gemaess Art. 35 DSGVO — V2',
|
||||
'Erweiterte Vorlage fuer eine Datenschutz-Folgenabschaetzung mit Schwellwertanalyse (WP248), SDM-basierter TOM-Struktur, strukturierter Risikobewertung nach ISO 29134 und KI-Modul (EU AI Act). Geeignet fuer alle Verarbeitungen, die einer DSFA beduerfen.',
|
||||
'de',
|
||||
'EU/DSGVO',
|
||||
'2.0',
|
||||
'published',
|
||||
'MIT',
|
||||
'BreakPilot Compliance',
|
||||
false,
|
||||
true,
|
||||
CAST('[
|
||||
"{{ORGANISATION_NAME}}",
|
||||
"{{ORGANISATION_ADRESSE}}",
|
||||
"{{DSB_NAME}}",
|
||||
"{{DSB_KONTAKT}}",
|
||||
"{{BUNDESLAND}}",
|
||||
"{{AUFSICHTSBEHOERDE}}",
|
||||
"{{ERSTELLT_VON}}",
|
||||
"{{ERSTELLT_AM}}",
|
||||
"{{GENEHMIGT_VON}}",
|
||||
"{{GENEHMIGT_AM}}",
|
||||
"{{WP248_K1_BEWERTUNG_SCORING}}",
|
||||
"{{WP248_K2_AUTOMATISIERTE_ENTSCHEIDUNG}}",
|
||||
"{{WP248_K3_SYSTEMATISCHE_UEBERWACHUNG}}",
|
||||
"{{WP248_K4_SENSIBLE_DATEN}}",
|
||||
"{{WP248_K5_GROSSER_UMFANG}}",
|
||||
"{{WP248_K6_DATENVERKNUEPFUNG}}",
|
||||
"{{WP248_K7_SCHUTZBEDUERFTIGE_BETROFFENE}}",
|
||||
"{{WP248_K8_INNOVATIVE_TECHNOLOGIE}}",
|
||||
"{{WP248_K9_RECHTSAUSUEBUNG_HINDERT}}",
|
||||
"{{SCHWELLWERT_ERGEBNIS}}",
|
||||
"{{MUSS_LISTEN_REFERENZ}}",
|
||||
"{{VERARBEITUNG_TITEL}}",
|
||||
"{{VERARBEITUNG_BESCHREIBUNG}}",
|
||||
"{{VERARBEITUNG_UMFANG}}",
|
||||
"{{VERARBEITUNG_KONTEXT}}",
|
||||
"{{VERARBEITUNGSMITTEL}}",
|
||||
"{{ZWECK_VERARBEITUNG}}",
|
||||
"{{RECHTSGRUNDLAGE}}",
|
||||
"{{RECHTSGRUNDLAGE_DETAILS}}",
|
||||
"{{DATENKATEGORIEN}}",
|
||||
"{{BETROFFENENGRUPPEN}}",
|
||||
"{{EMPFAENGER}}",
|
||||
"{{DRITTLANDTRANSFER}}",
|
||||
"{{SPEICHERDAUER}}",
|
||||
"{{GEMEINSAME_VERANTWORTUNG_DETAILS}}",
|
||||
"{{AUFTRAGSVERARBEITER_DETAILS}}",
|
||||
"{{NOTWENDIGKEIT_BEWERTUNG}}",
|
||||
"{{VERHAELTNISMAESSIGKEIT_BEWERTUNG}}",
|
||||
"{{DATENMINIMIERUNG_NACHWEIS}}",
|
||||
"{{ALTERNATIVEN_GEPRUEFT}}",
|
||||
"{{SPEICHERBEGRENZUNG_NACHWEIS}}",
|
||||
"{{RISIKO_METHODIK}}",
|
||||
"{{RISIKEN_TABELLE}}",
|
||||
"{{GESAMT_RISIKO_NIVEAU}}",
|
||||
"{{KONSULTATION_BETROFFENE}}",
|
||||
"{{KONSULTATION_BETRIEBSRAT}}",
|
||||
"{{TOM_VERFUEGBARKEIT}}",
|
||||
"{{TOM_INTEGRITAET}}",
|
||||
"{{TOM_VERTRAULICHKEIT}}",
|
||||
"{{TOM_NICHTVERKETTUNG}}",
|
||||
"{{TOM_TRANSPARENZ}}",
|
||||
"{{TOM_INTERVENIERBARKEIT}}",
|
||||
"{{TOM_DATENMINIMIERUNG}}",
|
||||
"{{DSB_STELLUNGNAHME}}",
|
||||
"{{DSB_DATUM}}",
|
||||
"{{ART36_BEGRUENDUNG}}",
|
||||
"{{DSFA_ERGEBNIS}}",
|
||||
"{{RESTRISIKO_BEWERTUNG}}",
|
||||
"{{UEBERPRUFUNGSINTERVALL}}",
|
||||
"{{NAECHSTE_UEBERPRUFUNG}}",
|
||||
"{{AENDERUNGSTRIGGER}}",
|
||||
"{{KI_SYSTEME_DETAILS}}",
|
||||
"{{KI_GRUNDRECHTSPRUEFUNG}}"
|
||||
]' AS jsonb),
|
||||
$template$# Datenschutz-Folgenabschaetzung (DSFA)
|
||||
**gemaess Art. 35 DS-GVO**
|
||||
|
||||
---
|
||||
|
||||
## 0. Schwellwertanalyse
|
||||
|
||||
Vor Durchfuehrung einer vollstaendigen DSFA ist zu pruefen, ob die geplante Verarbeitung eine solche erfordert. Die Pruefung erfolgt anhand der neun Kriterien der WP29/EDPB-Leitlinien (WP 248 rev.01) sowie der Muss-Liste der zustaendigen Aufsichtsbehoerde.
|
||||
|
||||
### 0.1 WP248-Kriterien (Art. 29-Datenschutzgruppe)
|
||||
|
||||
Sobald mindestens **zwei** der folgenden Kriterien zutreffen, ist eine DSFA in der Regel erforderlich.
|
||||
|
||||
| Nr. | Kriterium | Zutreffend? | Begruendung |
|
||||
|-----|-----------|-------------|-------------|
|
||||
| K1 | Bewertung oder Scoring (einschl. Profiling und Prognose) | {{WP248_K1_BEWERTUNG_SCORING}} | |
|
||||
| K2 | Automatisierte Entscheidungsfindung mit Rechtswirkung oder aehnlich erheblicher Wirkung | {{WP248_K2_AUTOMATISIERTE_ENTSCHEIDUNG}} | |
|
||||
| K3 | Systematische Ueberwachung von Personen | {{WP248_K3_SYSTEMATISCHE_UEBERWACHUNG}} | |
|
||||
| K4 | Verarbeitung sensibler Daten oder hoechst persoenlicher Daten (Art. 9, 10 DS-GVO) | {{WP248_K4_SENSIBLE_DATEN}} | |
|
||||
| K5 | Datenverarbeitung in grossem Umfang | {{WP248_K5_GROSSER_UMFANG}} | |
|
||||
| K6 | Verknuepfung oder Zusammenfuehrung von Datenbestaenden | {{WP248_K6_DATENVERKNUEPFUNG}} | |
|
||||
| K7 | Daten zu schutzbeduerftigen Betroffenen (Kinder, Beschaeftigte, Patienten) | {{WP248_K7_SCHUTZBEDUERFTIGE_BETROFFENE}} | |
|
||||
| K8 | Innovative Nutzung oder Anwendung neuer technologischer Loesungen | {{WP248_K8_INNOVATIVE_TECHNOLOGIE}} | |
|
||||
| K9 | Verarbeitung, die Betroffene an der Ausuebung eines Rechts oder der Nutzung einer Dienstleistung hindert | {{WP248_K9_RECHTSAUSUEBUNG_HINDERT}} | |
|
||||
|
||||
### 0.2 Muss-Liste der Aufsichtsbehoerde
|
||||
|
||||
**Bundesland:** {{BUNDESLAND}}
|
||||
**Zustaendige Aufsichtsbehoerde:** {{AUFSICHTSBEHOERDE}}
|
||||
**Referenz:** {{MUSS_LISTEN_REFERENZ}}
|
||||
|
||||
### 0.3 Ergebnis der Schwellwertanalyse
|
||||
|
||||
{{SCHWELLWERT_ERGEBNIS}}
|
||||
|
||||
---
|
||||
|
||||
## 1. Allgemeine Informationen und Verarbeitungsbeschreibung
|
||||
|
||||
| Feld | Inhalt |
|
||||
|------|--------|
|
||||
| **Organisation** | {{ORGANISATION_NAME}} |
|
||||
| **Adresse** | {{ORGANISATION_ADRESSE}} |
|
||||
| **Datenschutzbeauftragter** | {{DSB_NAME}} |
|
||||
| **DSB-Kontakt** | {{DSB_KONTAKT}} |
|
||||
| **Erstellt von** | {{ERSTELLT_VON}} |
|
||||
| **Erstellt am** | {{ERSTELLT_AM}} |
|
||||
{{#IF GENEHMIGT_VON}}| **Genehmigt von** | {{GENEHMIGT_VON}} |
|
||||
| **Genehmigt am** | {{GENEHMIGT_AM}} |
|
||||
{{/IF}}
|
||||
|
||||
### 1.1 Bezeichnung der Verarbeitungstaetigkeit
|
||||
|
||||
**{{VERARBEITUNG_TITEL}}**
|
||||
|
||||
### 1.2 Beschreibung der Verarbeitung
|
||||
|
||||
{{VERARBEITUNG_BESCHREIBUNG}}
|
||||
|
||||
### 1.3 Umfang und Kontext
|
||||
|
||||
| Aspekt | Beschreibung |
|
||||
|--------|--------------|
|
||||
| **Umfang** | {{VERARBEITUNG_UMFANG}} |
|
||||
| **Kontext** | {{VERARBEITUNG_KONTEXT}} |
|
||||
| **Eingesetzte Verarbeitungsmittel** | {{VERARBEITUNGSMITTEL}} |
|
||||
|
||||
### 1.4 Zweck der Verarbeitung
|
||||
|
||||
{{ZWECK_VERARBEITUNG}}
|
||||
|
||||
### 1.5 Rechtsgrundlage
|
||||
|
||||
**Rechtsgrundlage:** {{RECHTSGRUNDLAGE}}
|
||||
|
||||
{{#IF RECHTSGRUNDLAGE_DETAILS}}
|
||||
**Erlaeuterung:** {{RECHTSGRUNDLAGE_DETAILS}}
|
||||
{{/IF}}
|
||||
|
||||
### 1.6 Verarbeitete Datenkategorien
|
||||
|
||||
{{DATENKATEGORIEN}}
|
||||
|
||||
### 1.7 Betroffene Personengruppen
|
||||
|
||||
{{BETROFFENENGRUPPEN}}
|
||||
|
||||
### 1.8 Empfaenger und Auftragsverarbeiter
|
||||
|
||||
{{EMPFAENGER}}
|
||||
|
||||
{{#IF DRITTLANDTRANSFER}}
|
||||
### 1.9 Uebermittlung in Drittlaender
|
||||
|
||||
{{DRITTLANDTRANSFER}}
|
||||
{{/IF}}
|
||||
|
||||
### 1.10 Speicherdauer und Loeschfristen
|
||||
|
||||
{{SPEICHERDAUER}}
|
||||
|
||||
{{#IF GEMEINSAME_VERANTWORTUNG_DETAILS}}
|
||||
### 1.11 Gemeinsame Verantwortlichkeit (Art. 26 DS-GVO)
|
||||
|
||||
{{GEMEINSAME_VERANTWORTUNG_DETAILS}}
|
||||
{{/IF}}
|
||||
|
||||
{{#IF AUFTRAGSVERARBEITER_DETAILS}}
|
||||
### 1.12 Auftragsverarbeitung (Art. 28 DS-GVO)
|
||||
|
||||
{{AUFTRAGSVERARBEITER_DETAILS}}
|
||||
{{/IF}}
|
||||
|
||||
---
|
||||
|
||||
## 2. Notwendigkeit und Verhaeltnismaessigkeit
|
||||
|
||||
### 2.1 Notwendigkeit der Verarbeitung
|
||||
|
||||
{{NOTWENDIGKEIT_BEWERTUNG}}
|
||||
|
||||
### 2.2 Verhaeltnismaessigkeit
|
||||
|
||||
{{VERHAELTNISMAESSIGKEIT_BEWERTUNG}}
|
||||
|
||||
### 2.3 Pruefung der Grundsaetze (Art. 5 DS-GVO)
|
||||
|
||||
| Grundsatz | Einhaltung | Nachweis |
|
||||
|-----------|------------|----------|
|
||||
| **Zweckbindung** (Art. 5 Abs. 1 lit. b) | Die Verarbeitung erfolgt ausschliesslich fuer die angegebenen Zwecke. | Siehe Abschnitt 1.4 |
|
||||
| **Datenminimierung** (Art. 5 Abs. 1 lit. c) | {{DATENMINIMIERUNG_NACHWEIS}} | |
|
||||
| **Richtigkeit** (Art. 5 Abs. 1 lit. d) | Verfahren zur Sicherstellung der Datenqualitaet sind implementiert. | |
|
||||
| **Speicherbegrenzung** (Art. 5 Abs. 1 lit. e) | {{SPEICHERBEGRENZUNG_NACHWEIS}} | |
|
||||
| **Integritaet und Vertraulichkeit** (Art. 5 Abs. 1 lit. f) | Technische und organisatorische Massnahmen gemaess Abschnitt 5 umgesetzt. | Siehe Abschnitt 5 |
|
||||
|
||||
### 2.4 Pruefung alternativer Verarbeitungsmoeglichkeiten
|
||||
|
||||
{{ALTERNATIVEN_GEPRUEFT}}
|
||||
|
||||
---
|
||||
|
||||
## 3. Risikobewertung
|
||||
|
||||
### 3.1 Methodik
|
||||
|
||||
{{RISIKO_METHODIK}}
|
||||
|
||||
Die Risikobewertung erfolgt anhand zweier Dimensionen:
|
||||
- **Schwere des Schadens** fuer die Betroffenen (gering / ueberschaubar / substanziell / gross)
|
||||
- **Eintrittswahrscheinlichkeit** (gering / mittel / hoch / sehr hoch)
|
||||
|
||||
| | Schwere: Gering | Schwere: Ueberschaubar | Schwere: Substanziell | Schwere: Gross |
|
||||
|---|---|---|---|---|
|
||||
| **Wahrscheinlichkeit: Sehr hoch** | Mittel | Hoch | Sehr hoch | Sehr hoch |
|
||||
| **Wahrscheinlichkeit: Hoch** | Niedrig | Mittel | Hoch | Sehr hoch |
|
||||
| **Wahrscheinlichkeit: Mittel** | Niedrig | Niedrig | Mittel | Hoch |
|
||||
| **Wahrscheinlichkeit: Gering** | Niedrig | Niedrig | Niedrig | Mittel |
|
||||
|
||||
### 3.2 Identifizierte Risiken
|
||||
|
||||
{{RISIKEN_TABELLE}}
|
||||
|
||||
### 3.3 Gesamtrisikobewertung
|
||||
|
||||
{{GESAMT_RISIKO_NIVEAU}}
|
||||
|
||||
---
|
||||
|
||||
## 4. Konsultation der Betroffenen und Interessentraeger
|
||||
|
||||
### 4.1 Konsultation der Betroffenen (Art. 35 Abs. 9 DS-GVO)
|
||||
|
||||
{{#IF KONSULTATION_BETROFFENE}}
|
||||
{{KONSULTATION_BETROFFENE}}
|
||||
{{/IF}}
|
||||
{{#IF_NOT KONSULTATION_BETROFFENE}}
|
||||
Eine Konsultation der Betroffenen wurde nicht durchgefuehrt. Begruendung: [Bitte ergaenzen — z. B. Unverhaeltnismaessigkeit, Geheimhaltungsinteressen, fehlende Praktikabilitaet].
|
||||
{{/IF_NOT}}
|
||||
|
||||
{{#IF KONSULTATION_BETRIEBSRAT}}
|
||||
### 4.2 Beteiligung der Arbeitnehmervertretung
|
||||
|
||||
{{KONSULTATION_BETRIEBSRAT}}
|
||||
{{/IF}}
|
||||
|
||||
---
|
||||
|
||||
## 5. Technische und organisatorische Massnahmen (TOM)
|
||||
|
||||
Die Massnahmen sind nach den sieben Gewaehrleistungszielen des Standard-Datenschutzmodells (SDM V3.1a) strukturiert.
|
||||
|
||||
### 5.1 Verfuegbarkeit
|
||||
|
||||
Ziel: Personenbezogene Daten stehen zeitgerecht zur Verfuegung und koennen ordnungsgemaess verarbeitet werden.
|
||||
|
||||
{{TOM_VERFUEGBARKEIT}}
|
||||
|
||||
### 5.2 Integritaet
|
||||
|
||||
Ziel: Personenbezogene Daten bleiben waehrend der Verarbeitung unversehrt, vollstaendig und aktuell.
|
||||
|
||||
{{TOM_INTEGRITAET}}
|
||||
|
||||
### 5.3 Vertraulichkeit
|
||||
|
||||
Ziel: Nur befugte Personen koennen personenbezogene Daten zur Kenntnis nehmen.
|
||||
|
||||
{{TOM_VERTRAULICHKEIT}}
|
||||
|
||||
### 5.4 Nichtverkettung
|
||||
|
||||
Ziel: Personenbezogene Daten werden nur fuer den Zweck verarbeitet, zu dem sie erhoben wurden.
|
||||
|
||||
{{TOM_NICHTVERKETTUNG}}
|
||||
|
||||
### 5.5 Transparenz
|
||||
|
||||
Ziel: Betroffene, der Verantwortliche und die Aufsichtsbehoerde koennen die Verarbeitung nachvollziehen.
|
||||
|
||||
{{TOM_TRANSPARENZ}}
|
||||
|
||||
### 5.6 Intervenierbarkeit
|
||||
|
||||
Ziel: Betroffenenrechte (Auskunft, Berichtigung, Loeschung, Widerspruch) koennen wirksam ausgeuebt werden.
|
||||
|
||||
{{TOM_INTERVENIERBARKEIT}}
|
||||
|
||||
### 5.7 Datenminimierung
|
||||
|
||||
Ziel: Die Verarbeitung beschraenkt sich auf das erforderliche Mass.
|
||||
|
||||
{{TOM_DATENMINIMIERUNG}}
|
||||
|
||||
---
|
||||
|
||||
## 6. Stellungnahme des Datenschutzbeauftragten
|
||||
|
||||
### 6.1 Konsultation des DSB
|
||||
|
||||
{{DSB_STELLUNGNAHME}}
|
||||
|
||||
{{#IF DSB_DATUM}}
|
||||
**Datum der Stellungnahme:** {{DSB_DATUM}}
|
||||
{{/IF}}
|
||||
|
||||
### 6.2 Pruefung der Konsultationspflicht (Art. 36 DS-GVO)
|
||||
|
||||
Sofern das Restrisiko nach Umsetzung aller Massnahmen **hoch** bleibt, ist vor Beginn der Verarbeitung die zustaendige Aufsichtsbehoerde zu konsultieren (Art. 36 Abs. 1 DS-GVO).
|
||||
|
||||
{{#IF ART36_BEGRUENDUNG}}
|
||||
{{ART36_BEGRUENDUNG}}
|
||||
{{/IF}}
|
||||
{{#IF_NOT ART36_BEGRUENDUNG}}
|
||||
Nach Umsetzung der beschriebenen Massnahmen wird das Restrisiko als akzeptabel eingestuft. Eine Konsultation der Aufsichtsbehoerde ist nicht erforderlich.
|
||||
{{/IF_NOT}}
|
||||
|
||||
---
|
||||
|
||||
## 7. Ergebnis und Ueberprufungsplan
|
||||
|
||||
### 7.1 Ergebnis der DSFA
|
||||
|
||||
{{DSFA_ERGEBNIS}}
|
||||
|
||||
### 7.2 Restrisikobewertung
|
||||
|
||||
{{RESTRISIKO_BEWERTUNG}}
|
||||
|
||||
### 7.3 Ueberprufungsplan
|
||||
|
||||
| Aspekt | Festlegung |
|
||||
|--------|------------|
|
||||
| **Regelmaessiges Ueberprufungsintervall** | {{UEBERPRUFUNGSINTERVALL}} |
|
||||
| **Naechste geplante Ueberprufung** | {{NAECHSTE_UEBERPRUFUNG}} |
|
||||
|
||||
### 7.4 Trigger fuer ausserplanmaessige Ueberprufung
|
||||
|
||||
{{AENDERUNGSTRIGGER}}
|
||||
|
||||
---
|
||||
|
||||
{{#IF KI_SYSTEME_DETAILS}}
|
||||
## 8. KI-spezifisches Modul (EU AI Act)
|
||||
|
||||
Dieses Kapitel ist relevant, da KI-Systeme in der beschriebenen Verarbeitung eingesetzt werden.
|
||||
|
||||
### 8.1 Eingesetzte KI-Systeme
|
||||
|
||||
{{KI_SYSTEME_DETAILS}}
|
||||
|
||||
### 8.2 Grundrechtliche Folgenabschaetzung (Art. 27 KI-VO)
|
||||
|
||||
{{KI_GRUNDRECHTSPRUEFUNG}}
|
||||
|
||||
{{/IF}}
|
||||
|
||||
---
|
||||
|
||||
## Unterschriften
|
||||
|
||||
| Rolle | Name | Datum | Unterschrift |
|
||||
|-------|------|-------|--------------|
|
||||
| Erstellt von | {{ERSTELLT_VON}} | {{ERSTELLT_AM}} | _________________ |
|
||||
{{#IF GENEHMIGT_VON}}| Datenschutzbeauftragter | {{GENEHMIGT_VON}} | {{GENEHMIGT_AM}} | _________________ |
|
||||
{{/IF}}
|
||||
| Verantwortlicher | | | _________________ |
|
||||
|
||||
---
|
||||
|
||||
*Erstellt mit BreakPilot Compliance. Dieses Dokument ist vertraulich und nur fuer den internen Gebrauch bestimmt.*
|
||||
$template$
|
||||
) ON CONFLICT DO NOTHING;
|
||||
247
document-templates/migrations/002_tom_sdm_template.sql
Normal file
247
document-templates/migrations/002_tom_sdm_template.sql
Normal file
@@ -0,0 +1,247 @@
|
||||
-- Migration 002: TOM Template V2 — nach SDM-Gewaehrleistungszielen
|
||||
-- Archiviert V1 und fuegt SDM-strukturierte TOM-Dokumentation ein.
|
||||
|
||||
-- 1. Bestehende V1 archivieren
|
||||
UPDATE compliance.compliance_legal_templates
|
||||
SET status = 'archived', updated_at = NOW()
|
||||
WHERE document_type = 'tom_documentation'
|
||||
AND status = 'published';
|
||||
|
||||
-- 2. TOM V2 einfuegen
|
||||
INSERT INTO compliance.compliance_legal_templates (
|
||||
tenant_id, document_type, title, description, language, jurisdiction,
|
||||
version, status, license_name, source_name, attribution_required,
|
||||
is_complete_document, placeholders, content
|
||||
) VALUES (
|
||||
'9282a473-5c95-4b3a-bf78-0ecc0ec71d3e'::uuid,
|
||||
'tom_documentation',
|
||||
'Technische und Organisatorische Massnahmen (TOM) nach SDM V3.1a',
|
||||
'TOM-Dokumentation strukturiert nach den sieben Gewaehrleistungszielen des Standard-Datenschutzmodells (SDM V3.1a). Mit sektorspezifischen Ergaenzungen und Compliance-Bewertung.',
|
||||
'de',
|
||||
'EU/DSGVO',
|
||||
'2.0',
|
||||
'published',
|
||||
'MIT',
|
||||
'BreakPilot Compliance',
|
||||
false,
|
||||
true,
|
||||
CAST('[
|
||||
"{{ORGANISATION_NAME}}",
|
||||
"{{ORGANISATION_ADRESSE}}",
|
||||
"{{DSB_NAME}}",
|
||||
"{{DSB_KONTAKT}}",
|
||||
"{{ERSTELLT_VON}}",
|
||||
"{{ERSTELLT_AM}}",
|
||||
"{{VERSION}}",
|
||||
"{{GELTUNGSBEREICH}}",
|
||||
"{{SCHUTZBEDARF_VERTRAULICHKEIT}}",
|
||||
"{{SCHUTZBEDARF_INTEGRITAET}}",
|
||||
"{{SCHUTZBEDARF_VERFUEGBARKEIT}}",
|
||||
"{{GESAMTSCHUTZNIVEAU}}",
|
||||
"{{TOM_VERFUEGBARKEIT}}",
|
||||
"{{TOM_INTEGRITAET}}",
|
||||
"{{TOM_VERTRAULICHKEIT}}",
|
||||
"{{TOM_NICHTVERKETTUNG}}",
|
||||
"{{TOM_TRANSPARENZ}}",
|
||||
"{{TOM_INTERVENIERBARKEIT}}",
|
||||
"{{TOM_DATENMINIMIERUNG}}",
|
||||
"{{TOM_SEKTOR_ERGAENZUNGEN}}",
|
||||
"{{COMPLIANCE_BEWERTUNG}}",
|
||||
"{{NAECHSTE_UEBERPRUFUNG}}",
|
||||
"{{UEBERPRUFUNGSINTERVALL}}"
|
||||
]' AS jsonb),
|
||||
$template$# Technische und Organisatorische Massnahmen (TOM)
|
||||
**gemaess Art. 32 DS-GVO — strukturiert nach SDM V3.1a**
|
||||
|
||||
---
|
||||
|
||||
## 1. Allgemeine Informationen
|
||||
|
||||
| Feld | Inhalt |
|
||||
|------|--------|
|
||||
| **Organisation** | {{ORGANISATION_NAME}} |
|
||||
| **Adresse** | {{ORGANISATION_ADRESSE}} |
|
||||
| **Datenschutzbeauftragter** | {{DSB_NAME}} ({{DSB_KONTAKT}}) |
|
||||
| **Erstellt von** | {{ERSTELLT_VON}} |
|
||||
| **Erstellt am** | {{ERSTELLT_AM}} |
|
||||
| **Version** | {{VERSION}} |
|
||||
|
||||
### 1.1 Geltungsbereich
|
||||
|
||||
{{GELTUNGSBEREICH}}
|
||||
|
||||
---
|
||||
|
||||
## 2. Schutzbedarfsanalyse
|
||||
|
||||
Die Schutzbedarfsanalyse bildet die Grundlage fuer die Auswahl angemessener Massnahmen. Der Schutzbedarf wird fuer die drei klassischen Schutzziele bewertet.
|
||||
|
||||
| Schutzziel | Schutzbedarf | Begruendung |
|
||||
|------------|-------------|-------------|
|
||||
| **Vertraulichkeit** | {{SCHUTZBEDARF_VERTRAULICHKEIT}} | |
|
||||
| **Integritaet** | {{SCHUTZBEDARF_INTEGRITAET}} | |
|
||||
| **Verfuegbarkeit** | {{SCHUTZBEDARF_VERFUEGBARKEIT}} | |
|
||||
|
||||
**Gesamtschutzniveau:** {{GESAMTSCHUTZNIVEAU}}
|
||||
|
||||
*Bewertungsskala: normal / hoch / sehr hoch*
|
||||
|
||||
---
|
||||
|
||||
## 3. Massnahmen nach SDM-Gewaehrleistungszielen
|
||||
|
||||
Die folgende Struktur folgt den sieben Gewaehrleistungszielen des Standard-Datenschutzmodells (SDM V3.1a) der Datenschutzkonferenz.
|
||||
|
||||
### 3.1 Verfuegbarkeit
|
||||
|
||||
**Ziel:** Personenbezogene Daten stehen zeitgerecht zur Verfuegung und koennen ordnungsgemaess verarbeitet werden.
|
||||
|
||||
**Referenz:** SDM-Baustein 11 (Aufbewahren)
|
||||
|
||||
{{TOM_VERFUEGBARKEIT}}
|
||||
|
||||
| Massnahme | Typ | Status | Verantwortlich | Pruefintervall |
|
||||
|-----------|-----|--------|----------------|----------------|
|
||||
| Redundante Datenhaltung (RAID, Replikation) | technisch | | IT-Betrieb | 12 Monate |
|
||||
| Regelmaessige Backups (taeglich inkrementell, woechentlich voll) | technisch | | IT-Betrieb | 6 Monate |
|
||||
| Disaster-Recovery-Plan mit dokumentierten RTO/RPO | organisatorisch | | IT-Sicherheit | 12 Monate |
|
||||
| USV und Notstromversorgung | technisch | | Facility Mgmt | 12 Monate |
|
||||
| Wiederherstellungstests (mind. jaehrlich) | organisatorisch | | IT-Betrieb | 12 Monate |
|
||||
|
||||
### 3.2 Integritaet
|
||||
|
||||
**Ziel:** Personenbezogene Daten bleiben waehrend der Verarbeitung unversehrt, vollstaendig und aktuell.
|
||||
|
||||
**Referenz:** SDM-Baustein 61 (Berichtigen)
|
||||
|
||||
{{TOM_INTEGRITAET}}
|
||||
|
||||
| Massnahme | Typ | Status | Verantwortlich | Pruefintervall |
|
||||
|-----------|-----|--------|----------------|----------------|
|
||||
| Pruefsummen und digitale Signaturen | technisch | | IT-Entwicklung | 12 Monate |
|
||||
| Eingabevalidierung und Plausibilitaetspruefungen | technisch | | IT-Entwicklung | bei Release |
|
||||
| Change-Management-Verfahren | organisatorisch | | IT-Betrieb | 12 Monate |
|
||||
| Versionierung von Datensaetzen | technisch | | IT-Entwicklung | 12 Monate |
|
||||
|
||||
### 3.3 Vertraulichkeit
|
||||
|
||||
**Ziel:** Nur befugte Personen koennen personenbezogene Daten zur Kenntnis nehmen.
|
||||
|
||||
**Referenz:** SDM-Baustein 51 (Zugriffe regeln)
|
||||
|
||||
{{TOM_VERTRAULICHKEIT}}
|
||||
|
||||
| Massnahme | Typ | Status | Verantwortlich | Pruefintervall |
|
||||
|-----------|-----|--------|----------------|----------------|
|
||||
| Verschluesselung im Transit (TLS 1.3) | technisch | | IT-Sicherheit | 12 Monate |
|
||||
| Verschluesselung at Rest (AES-256) | technisch | | IT-Sicherheit | 12 Monate |
|
||||
| Rollenbasiertes Zugriffskonzept (RBAC, Least Privilege) | technisch | | IT-Sicherheit | 6 Monate |
|
||||
| Multi-Faktor-Authentifizierung (MFA) | technisch | | IT-Sicherheit | 12 Monate |
|
||||
| Physische Zutrittskontrolle (Schluessel, Kartenleser) | technisch | | Facility Mgmt | 12 Monate |
|
||||
| Vertraulichkeitsverpflichtung Mitarbeitende | organisatorisch | | HR / DSB | bei Eintritt |
|
||||
| Passwortrichtlinie (Komplexitaet, Ablauf, Historie) | organisatorisch | | IT-Sicherheit | 12 Monate |
|
||||
|
||||
### 3.4 Nichtverkettung
|
||||
|
||||
**Ziel:** Personenbezogene Daten werden nur fuer den Zweck verarbeitet, zu dem sie erhoben wurden.
|
||||
|
||||
**Referenz:** SDM-Baustein 50 (Trennen)
|
||||
|
||||
{{TOM_NICHTVERKETTUNG}}
|
||||
|
||||
| Massnahme | Typ | Status | Verantwortlich | Pruefintervall |
|
||||
|-----------|-----|--------|----------------|----------------|
|
||||
| Mandantentrennung (logisch oder physisch) | technisch | | IT-Architektur | 12 Monate |
|
||||
| Pseudonymisierung wo fachlich moeglich | technisch | | IT-Entwicklung | 12 Monate |
|
||||
| Zweckbindungspruefung bei neuen Datennutzungen | organisatorisch | | DSB | bei Bedarf |
|
||||
| Getrennte Datenbanken je Verarbeitungszweck | technisch | | IT-Architektur | 12 Monate |
|
||||
|
||||
### 3.5 Transparenz
|
||||
|
||||
**Ziel:** Betroffene, der Verantwortliche und die Aufsichtsbehoerde koennen die Verarbeitung nachvollziehen.
|
||||
|
||||
**Referenz:** SDM-Baustein 42 (Dokumentieren), SDM-Baustein 43 (Protokollieren)
|
||||
|
||||
{{TOM_TRANSPARENZ}}
|
||||
|
||||
| Massnahme | Typ | Status | Verantwortlich | Pruefintervall |
|
||||
|-----------|-----|--------|----------------|----------------|
|
||||
| Verzeichnis der Verarbeitungstaetigkeiten (Art. 30) | organisatorisch | | DSB | 12 Monate |
|
||||
| Vollstaendiges Audit-Log aller Datenzugriffe | technisch | | IT-Betrieb | 6 Monate |
|
||||
| Datenschutzerklaerung (Art. 13/14 DS-GVO) | organisatorisch | | DSB / Recht | bei Aenderung |
|
||||
| Dokumentierte Prozesse fuer Datenpannen-Meldung | organisatorisch | | DSB | 12 Monate |
|
||||
|
||||
### 3.6 Intervenierbarkeit
|
||||
|
||||
**Ziel:** Betroffenenrechte (Auskunft, Berichtigung, Loeschung, Widerspruch) koennen wirksam ausgeuebt werden.
|
||||
|
||||
**Referenz:** SDM-Baustein 60 (Loeschen), SDM-Baustein 61 (Berichtigen), SDM-Baustein 62 (Einschraenken)
|
||||
|
||||
{{TOM_INTERVENIERBARKEIT}}
|
||||
|
||||
| Massnahme | Typ | Status | Verantwortlich | Pruefintervall |
|
||||
|-----------|-----|--------|----------------|----------------|
|
||||
| Prozess fuer Betroffenenanfragen (Auskunft, Loeschung, Berichtigung) | organisatorisch | | DSB | 12 Monate |
|
||||
| Technische Loeschfaehigkeit mit Nachweis | technisch | | IT-Entwicklung | 12 Monate |
|
||||
| Datenexport in maschinenlesbarem Format (Art. 20) | technisch | | IT-Entwicklung | 12 Monate |
|
||||
| Sperrfunktion (Einschraenkung der Verarbeitung) | technisch | | IT-Entwicklung | 12 Monate |
|
||||
| Widerspruchsmoeglichkeit gegen Verarbeitung | organisatorisch | | DSB | 12 Monate |
|
||||
|
||||
### 3.7 Datenminimierung
|
||||
|
||||
**Ziel:** Die Verarbeitung beschraenkt sich auf das erforderliche Mass.
|
||||
|
||||
**Referenz:** SDM-Baustein 41 (Planen und Spezifizieren)
|
||||
|
||||
{{TOM_DATENMINIMIERUNG}}
|
||||
|
||||
| Massnahme | Typ | Status | Verantwortlich | Pruefintervall |
|
||||
|-----------|-----|--------|----------------|----------------|
|
||||
| Regelmaessige Pruefung der Erforderlichkeit | organisatorisch | | DSB | 12 Monate |
|
||||
| Automatisierte Loeschung nach Fristablauf | technisch | | IT-Entwicklung | 6 Monate |
|
||||
| Anonymisierung fuer statistische Zwecke | technisch | | IT-Entwicklung | bei Bedarf |
|
||||
| Privacy by Design bei neuen Verarbeitungen | organisatorisch | | IT-Architektur / DSB | bei Bedarf |
|
||||
| Loeschfristenkatalog (dokumentiert) | organisatorisch | | DSB / Recht | 12 Monate |
|
||||
|
||||
---
|
||||
|
||||
## 4. Sektorspezifische Ergaenzungen
|
||||
|
||||
{{#IF TOM_SEKTOR_ERGAENZUNGEN}}
|
||||
{{TOM_SEKTOR_ERGAENZUNGEN}}
|
||||
{{/IF}}
|
||||
{{#IF_NOT TOM_SEKTOR_ERGAENZUNGEN}}
|
||||
Keine sektorspezifischen Ergaenzungen erforderlich.
|
||||
{{/IF_NOT}}
|
||||
|
||||
---
|
||||
|
||||
## 5. Compliance-Bewertung
|
||||
|
||||
{{#IF COMPLIANCE_BEWERTUNG}}
|
||||
{{COMPLIANCE_BEWERTUNG}}
|
||||
{{/IF}}
|
||||
{{#IF_NOT COMPLIANCE_BEWERTUNG}}
|
||||
Die Compliance-Bewertung erfolgt nach erstmaliger Implementierung aller Massnahmen.
|
||||
{{/IF_NOT}}
|
||||
|
||||
---
|
||||
|
||||
## 6. Ueberprufungsplan
|
||||
|
||||
| Aspekt | Festlegung |
|
||||
|--------|------------|
|
||||
| **Regelmaessige Ueberprufung** | {{UEBERPRUFUNGSINTERVALL}} |
|
||||
| **Naechste geplante Ueberprufung** | {{NAECHSTE_UEBERPRUFUNG}} |
|
||||
|
||||
**Trigger fuer ausserplanmaessige Ueberprufung:**
|
||||
- Sicherheitsvorfall oder Datenpanne
|
||||
- Wesentliche Aenderung der Verarbeitungssysteme
|
||||
- Neue regulatorische Anforderungen (z. B. NIS2, AI Act)
|
||||
- Ergebnisse interner oder externer Audits
|
||||
|
||||
---
|
||||
|
||||
*Erstellt mit BreakPilot Compliance. Struktur basiert auf dem Standard-Datenschutzmodell (SDM V3.1a) der Datenschutzkonferenz.*
|
||||
$template$
|
||||
) ON CONFLICT DO NOTHING;
|
||||
663
document-templates/migrations/003_vvt_sector_templates.sql
Normal file
663
document-templates/migrations/003_vvt_sector_templates.sql
Normal file
@@ -0,0 +1,663 @@
|
||||
-- Migration 003: VVT Sector Templates — Branchenspezifische Verarbeitungsverzeichnisse
|
||||
-- 6 Branchen-Muster + 1 allgemeine V2-Vorlage
|
||||
|
||||
-- 1. Bestehende V1 archivieren
|
||||
UPDATE compliance.compliance_legal_templates
|
||||
SET status = 'archived', updated_at = NOW()
|
||||
WHERE document_type = 'vvt_register'
|
||||
AND status = 'published';
|
||||
|
||||
-- 2. Allgemeine VVT V2 Vorlage (branchenuebergreifend)
|
||||
INSERT INTO compliance.compliance_legal_templates (
|
||||
tenant_id, document_type, title, description, language, jurisdiction,
|
||||
version, status, license_name, source_name, attribution_required,
|
||||
is_complete_document, placeholders, content
|
||||
) VALUES (
|
||||
'9282a473-5c95-4b3a-bf78-0ecc0ec71d3e'::uuid,
|
||||
'vvt_register',
|
||||
'Verzeichnis von Verarbeitungstaetigkeiten (VVT) gemaess Art. 30 DS-GVO — V2',
|
||||
'Erweiterte VVT-Vorlage mit vollstaendiger Art. 30 Struktur, Loeschfristen-Integration und DSFA-Verweis. Branchenuebergreifend einsetzbar.',
|
||||
'de',
|
||||
'EU/DSGVO',
|
||||
'2.0',
|
||||
'published',
|
||||
'MIT',
|
||||
'BreakPilot Compliance',
|
||||
false,
|
||||
true,
|
||||
CAST('[
|
||||
"{{ORGANISATION_NAME}}",
|
||||
"{{ORGANISATION_ADRESSE}}",
|
||||
"{{VERTRETER_NAME}}",
|
||||
"{{DSB_NAME}}",
|
||||
"{{DSB_KONTAKT}}",
|
||||
"{{ERSTELLT_AM}}",
|
||||
"{{VERSION}}",
|
||||
"{{VVT_NR}}",
|
||||
"{{VERARBEITUNG_NAME}}",
|
||||
"{{VERARBEITUNG_BESCHREIBUNG}}",
|
||||
"{{ZWECKE}}",
|
||||
"{{RECHTSGRUNDLAGEN}}",
|
||||
"{{BETROFFENE}}",
|
||||
"{{DATENKATEGORIEN}}",
|
||||
"{{EMPFAENGER}}",
|
||||
"{{DRITTLAND}}",
|
||||
"{{DRITTLAND_GARANTIEN}}",
|
||||
"{{LOESCHFRISTEN}}",
|
||||
"{{TOM_REFERENZ}}",
|
||||
"{{SYSTEME}}",
|
||||
"{{VERANTWORTLICHER}}",
|
||||
"{{RISIKOBEWERTUNG}}",
|
||||
"{{DSFA_ERFORDERLICH}}",
|
||||
"{{LETZTE_PRUEFUNG}}",
|
||||
"{{NAECHSTE_PRUEFUNG}}",
|
||||
"{{STATUS}}"
|
||||
]' AS jsonb),
|
||||
$template$# Verzeichnis von Verarbeitungstaetigkeiten (VVT)
|
||||
**gemaess Art. 30 DS-GVO**
|
||||
|
||||
---
|
||||
|
||||
## Angaben zum Verantwortlichen
|
||||
|
||||
| Feld | Inhalt |
|
||||
|------|--------|
|
||||
| **Name / Firma** | {{ORGANISATION_NAME}} |
|
||||
| **Adresse** | {{ORGANISATION_ADRESSE}} |
|
||||
| **Vertreter des Verantwortlichen** | {{VERTRETER_NAME}} |
|
||||
| **Datenschutzbeauftragter** | {{DSB_NAME}} ({{DSB_KONTAKT}}) |
|
||||
| **Stand** | {{ERSTELLT_AM}} |
|
||||
| **Version** | {{VERSION}} |
|
||||
|
||||
---
|
||||
|
||||
## Verarbeitungstaetigkeit
|
||||
|
||||
### Stammdaten
|
||||
|
||||
| Pflichtfeld (Art. 30) | Inhalt |
|
||||
|------------------------|--------|
|
||||
| **VVT-Nr.** | {{VVT_NR}} |
|
||||
| **Bezeichnung** | {{VERARBEITUNG_NAME}} |
|
||||
| **Beschreibung** | {{VERARBEITUNG_BESCHREIBUNG}} |
|
||||
|
||||
### Zweck und Rechtsgrundlage
|
||||
|
||||
| Pflichtfeld | Inhalt |
|
||||
|-------------|--------|
|
||||
| **Zweck(e) der Verarbeitung** | {{ZWECKE}} |
|
||||
| **Rechtsgrundlage(n)** | {{RECHTSGRUNDLAGEN}} |
|
||||
|
||||
### Betroffene und Daten
|
||||
|
||||
| Pflichtfeld | Inhalt |
|
||||
|-------------|--------|
|
||||
| **Kategorien betroffener Personen** | {{BETROFFENE}} |
|
||||
| **Kategorien personenbezogener Daten** | {{DATENKATEGORIEN}} |
|
||||
|
||||
### Empfaenger und Uebermittlung
|
||||
|
||||
| Pflichtfeld | Inhalt |
|
||||
|-------------|--------|
|
||||
| **Kategorien von Empfaengern** | {{EMPFAENGER}} |
|
||||
|
||||
{{#IF DRITTLAND}}
|
||||
| **Uebermittlung in Drittlaender** | {{DRITTLAND}} |
|
||||
| **Geeignete Garantien (Art. 46)** | {{DRITTLAND_GARANTIEN}} |
|
||||
{{/IF}}
|
||||
|
||||
### Fristen und Schutzmassnahmen
|
||||
|
||||
| Pflichtfeld | Inhalt |
|
||||
|-------------|--------|
|
||||
| **Loeschfristen** | {{LOESCHFRISTEN}} |
|
||||
| **TOM-Beschreibung (Art. 32)** | {{TOM_REFERENZ}} |
|
||||
|
||||
### Zusaetzliche Angaben (empfohlen)
|
||||
|
||||
| Feld | Inhalt |
|
||||
|------|--------|
|
||||
| **Eingesetzte Systeme** | {{SYSTEME}} |
|
||||
| **Verantwortliche Abteilung** | {{VERANTWORTLICHER}} |
|
||||
| **Risikobewertung** | {{RISIKOBEWERTUNG}} |
|
||||
| **DSFA erforderlich?** | {{DSFA_ERFORDERLICH}} |
|
||||
| **Letzte Pruefung** | {{LETZTE_PRUEFUNG}} |
|
||||
| **Naechste Pruefung** | {{NAECHSTE_PRUEFUNG}} |
|
||||
| **Status** | {{STATUS}} |
|
||||
|
||||
---
|
||||
|
||||
*Erstellt mit BreakPilot Compliance. Struktur entspricht Art. 30 Abs. 1 DS-GVO.*
|
||||
$template$
|
||||
) ON CONFLICT DO NOTHING;
|
||||
|
||||
-- 3. VVT Branchenvorlage: IT / SaaS
|
||||
INSERT INTO compliance.compliance_legal_templates (
|
||||
tenant_id, document_type, title, description, language, jurisdiction,
|
||||
version, status, license_name, source_name, attribution_required,
|
||||
is_complete_document, placeholders, content
|
||||
) VALUES (
|
||||
'9282a473-5c95-4b3a-bf78-0ecc0ec71d3e'::uuid,
|
||||
'vvt_register',
|
||||
'VVT Branchenvorlage: IT / SaaS-Unternehmen',
|
||||
'Vorbefuelltes Verarbeitungsverzeichnis mit typischen Verarbeitungstaetigkeiten eines IT- oder SaaS-Unternehmens. Enthalt 8 Standard-Verarbeitungen.',
|
||||
'de', 'EU/DSGVO', '2.0', 'published', 'MIT', 'BreakPilot Compliance', false, true,
|
||||
'[]'::jsonb,
|
||||
$template$# VVT Branchenvorlage: IT / SaaS-Unternehmen
|
||||
|
||||
Die folgenden Verarbeitungstaetigkeiten sind typisch fuer IT- und SaaS-Unternehmen. Bitte pruefen und an Ihre konkrete Situation anpassen.
|
||||
|
||||
---
|
||||
|
||||
## VVT-001: SaaS-Plattformbetrieb
|
||||
|
||||
| Feld | Inhalt |
|
||||
|------|--------|
|
||||
| **Zweck** | Bereitstellung und Betrieb der SaaS-Plattform fuer Kunden |
|
||||
| **Rechtsgrundlage** | Art. 6 Abs. 1 lit. b DS-GVO (Vertragserfullung) |
|
||||
| **Betroffene** | Kunden, Endnutzer der Plattform |
|
||||
| **Datenkategorien** | Stammdaten, Nutzungsdaten, Inhaltsdaten, technische Logdaten |
|
||||
| **Empfaenger** | Hosting-Anbieter (AVV), Support-Dienstleister (AVV) |
|
||||
| **Loeschfrist** | 90 Tage nach Vertragsende + gesetzliche Aufbewahrungsfristen |
|
||||
| **TOM** | Siehe TOM-Dokumentation: Mandantentrennung, Verschluesselung, RBAC |
|
||||
| **DSFA erforderlich?** | Abhaengig von Art und Umfang der verarbeiteten Daten |
|
||||
|
||||
## VVT-002: Kundenverwaltung / CRM
|
||||
|
||||
| Feld | Inhalt |
|
||||
|------|--------|
|
||||
| **Zweck** | Verwaltung von Kundenbeziehungen, Vertragsmanagement |
|
||||
| **Rechtsgrundlage** | Art. 6 Abs. 1 lit. b DS-GVO (Vertragserfullung) |
|
||||
| **Betroffene** | Kunden, Ansprechpartner, Interessenten |
|
||||
| **Datenkategorien** | Kontaktdaten, Vertragsdaten, Kommunikationshistorie |
|
||||
| **Empfaenger** | CRM-Anbieter (AVV), ggf. Vertriebspartner |
|
||||
| **Loeschfrist** | 3 Jahre nach letztem Kontakt (Verjaeherung), 10 Jahre Rechnungsdaten (HGB/AO) |
|
||||
| **TOM** | Zugriffsbeschraenkung auf Vertrieb/Support, Protokollierung |
|
||||
|
||||
## VVT-003: E-Mail-Marketing / Newsletter
|
||||
|
||||
| Feld | Inhalt |
|
||||
|------|--------|
|
||||
| **Zweck** | Versand von Produkt-Updates, Marketing-Newsletter |
|
||||
| **Rechtsgrundlage** | Art. 6 Abs. 1 lit. a DS-GVO (Einwilligung) + UWG §7 |
|
||||
| **Betroffene** | Newsletter-Abonnenten |
|
||||
| **Datenkategorien** | E-Mail-Adresse, Name, Oeffnungs-/Klickverhalten |
|
||||
| **Empfaenger** | E-Mail-Dienstleister (AVV) |
|
||||
| **Loeschfrist** | Unverzueglich nach Widerruf der Einwilligung |
|
||||
| **TOM** | Double-Opt-In, einfache Abmeldefunktion |
|
||||
|
||||
## VVT-004: Webanalyse
|
||||
|
||||
| Feld | Inhalt |
|
||||
|------|--------|
|
||||
| **Zweck** | Analyse der Website-Nutzung zur Verbesserung des Angebots |
|
||||
| **Rechtsgrundlage** | Art. 6 Abs. 1 lit. a DS-GVO (Einwilligung via Cookie-Banner) |
|
||||
| **Betroffene** | Website-Besucher |
|
||||
| **Datenkategorien** | IP-Adresse (anonymisiert), Seitenaufrufe, Verweildauer, Geraeteinformationen |
|
||||
| **Empfaenger** | Analyse-Anbieter (AVV) |
|
||||
| **Loeschfrist** | 14 Monate (max. Cookie-Laufzeit) |
|
||||
| **TOM** | IP-Anonymisierung, Cookie-Consent-Management (TDDDG §25) |
|
||||
|
||||
## VVT-005: Bewerbermanagement
|
||||
|
||||
| Feld | Inhalt |
|
||||
|------|--------|
|
||||
| **Zweck** | Bearbeitung von Bewerbungen, Auswahlverfahren |
|
||||
| **Rechtsgrundlage** | Art. 6 Abs. 1 lit. b DS-GVO i.V.m. §26 BDSG (Beschaeftigungsverhaeltnis) |
|
||||
| **Betroffene** | Bewerberinnen und Bewerber |
|
||||
| **Datenkategorien** | Kontaktdaten, Lebenslauf, Qualifikationen, Bewerbungsunterlagen |
|
||||
| **Empfaenger** | Fachabteilung, ggf. Personaldienstleister (AVV) |
|
||||
| **Loeschfrist** | 6 Monate nach Abschluss des Verfahrens (AGG-Frist) |
|
||||
| **TOM** | Zugriffsschutz auf Bewerbungsportal, verschluesselte Uebertragung |
|
||||
|
||||
## VVT-006: Mitarbeiterverwaltung / HR
|
||||
|
||||
| Feld | Inhalt |
|
||||
|------|--------|
|
||||
| **Zweck** | Personalverwaltung, Lohn-/Gehaltsabrechnung, Arbeitszeiterfassung |
|
||||
| **Rechtsgrundlage** | Art. 6 Abs. 1 lit. b/c DS-GVO i.V.m. §26 BDSG |
|
||||
| **Betroffene** | Beschaeftigte |
|
||||
| **Datenkategorien** | Stammdaten, Vertragsdaten, Bankverbindung, Sozialversicherung, Arbeitszeitdaten |
|
||||
| **Empfaenger** | Lohnbuero (AVV), Finanzamt, Sozialversicherungstraeger |
|
||||
| **Loeschfrist** | 10 Jahre nach Austritt (steuerliche Aufbewahrung), Personalakte 3 Jahre |
|
||||
| **TOM** | Besonderer Zugriffsschutz (nur HR), verschluesselte Speicherung |
|
||||
|
||||
## VVT-007: Support-Ticketing
|
||||
|
||||
| Feld | Inhalt |
|
||||
|------|--------|
|
||||
| **Zweck** | Bearbeitung von Kundenanfragen und Stoerungsmeldungen |
|
||||
| **Rechtsgrundlage** | Art. 6 Abs. 1 lit. b DS-GVO (Vertragserfullung) |
|
||||
| **Betroffene** | Kunden, Endnutzer |
|
||||
| **Datenkategorien** | Kontaktdaten, Ticket-Inhalt, Screenshots, Systemlogs |
|
||||
| **Empfaenger** | Support-Tool-Anbieter (AVV), ggf. Entwicklungsteam |
|
||||
| **Loeschfrist** | 2 Jahre nach Ticket-Schliessung |
|
||||
| **TOM** | Rollenbasierter Zugriff, Pseudonymisierung in internen Reports |
|
||||
|
||||
## VVT-008: Logging und Monitoring
|
||||
|
||||
| Feld | Inhalt |
|
||||
|------|--------|
|
||||
| **Zweck** | Sicherheitsueberwachung, Fehleranalyse, Leistungsoptimierung |
|
||||
| **Rechtsgrundlage** | Art. 6 Abs. 1 lit. f DS-GVO (berechtigtes Interesse: IT-Sicherheit) |
|
||||
| **Betroffene** | Nutzer der Plattform, Administratoren |
|
||||
| **Datenkategorien** | IP-Adressen, Zugriffszeitpunkte, Fehlerprotokolle, Performance-Metriken |
|
||||
| **Empfaenger** | Log-Management-Anbieter (AVV) |
|
||||
| **Loeschfrist** | 30 Tage Anwendungslogs, 90 Tage Sicherheitslogs |
|
||||
| **TOM** | Zugriffsschutz auf Logdaten, automatische Rotation |
|
||||
|
||||
---
|
||||
|
||||
*Erstellt mit BreakPilot Compliance. Branchenvorlage IT / SaaS.*
|
||||
$template$
|
||||
) ON CONFLICT DO NOTHING;
|
||||
|
||||
-- 4. VVT Branchenvorlage: Gesundheitswesen
|
||||
INSERT INTO compliance.compliance_legal_templates (
|
||||
tenant_id, document_type, title, description, language, jurisdiction,
|
||||
version, status, license_name, source_name, attribution_required,
|
||||
is_complete_document, placeholders, content
|
||||
) VALUES (
|
||||
'9282a473-5c95-4b3a-bf78-0ecc0ec71d3e'::uuid,
|
||||
'vvt_register',
|
||||
'VVT Branchenvorlage: Gesundheitswesen',
|
||||
'Vorbefuelltes Verarbeitungsverzeichnis mit typischen Verarbeitungen im Gesundheitswesen (Arztpraxis, MVZ, Klinik). Beruecksichtigt Art. 9 DS-GVO besondere Kategorien.',
|
||||
'de', 'EU/DSGVO', '2.0', 'published', 'MIT', 'BreakPilot Compliance', false, true,
|
||||
'[]'::jsonb,
|
||||
$template$# VVT Branchenvorlage: Gesundheitswesen
|
||||
|
||||
Typische Verarbeitungstaetigkeiten fuer Arztpraxen, MVZ und Kliniken. **Besonderheit:** Verarbeitung besonderer Kategorien personenbezogener Daten (Art. 9 DS-GVO — Gesundheitsdaten).
|
||||
|
||||
---
|
||||
|
||||
## VVT-G01: Patientenverwaltung
|
||||
|
||||
| Feld | Inhalt |
|
||||
|------|--------|
|
||||
| **Zweck** | Fuehrung der Patientenakte, Behandlungsdokumentation |
|
||||
| **Rechtsgrundlage** | Art. 9 Abs. 2 lit. h DS-GVO i.V.m. §630f BGB (Dokumentationspflicht) |
|
||||
| **Betroffene** | Patienten |
|
||||
| **Datenkategorien** | Stammdaten, Versicherungsdaten, Diagnosen, Befunde, Behandlungsverlaeufe (Art. 9) |
|
||||
| **Empfaenger** | Praxisverwaltungssystem-Anbieter (AVV), Labor (AVV), ueberweisende Aerzte |
|
||||
| **Loeschfrist** | 10 Jahre nach letzter Behandlung (§630f Abs. 3 BGB), Strahlenpass 30 Jahre |
|
||||
| **TOM** | Verschluesselung Patientenakte, Zugriffsschutz (nur behandelnde Aerzte), Notfallzugriff |
|
||||
| **DSFA erforderlich?** | Ja (umfangreiche Verarbeitung Art. 9 Daten) |
|
||||
|
||||
## VVT-G02: Terminmanagement
|
||||
|
||||
| Feld | Inhalt |
|
||||
|------|--------|
|
||||
| **Zweck** | Organisation und Verwaltung von Patienten-Terminen |
|
||||
| **Rechtsgrundlage** | Art. 6 Abs. 1 lit. b DS-GVO (Behandlungsvertrag) |
|
||||
| **Betroffene** | Patienten |
|
||||
| **Datenkategorien** | Name, Kontaktdaten, Terminwunsch, ggf. Behandlungsgrund |
|
||||
| **Empfaenger** | Online-Terminbuchungs-Anbieter (AVV) |
|
||||
| **Loeschfrist** | 6 Monate nach Termin (sofern nicht zur Patientenakte) |
|
||||
| **TOM** | Verschluesselte Uebertragung, Zugriffsschutz Terminkalender |
|
||||
|
||||
## VVT-G03: Abrechnung (KV / PKV)
|
||||
|
||||
| Feld | Inhalt |
|
||||
|------|--------|
|
||||
| **Zweck** | Abrechnung aerztlicher Leistungen gegenueber Krankenkassen / Privatpatienten |
|
||||
| **Rechtsgrundlage** | Art. 6 Abs. 1 lit. c DS-GVO (gesetzliche Pflicht), Art. 9 Abs. 2 lit. h |
|
||||
| **Betroffene** | Patienten |
|
||||
| **Datenkategorien** | Stammdaten, Versicherungsdaten, Diagnosen (ICD), Leistungsziffern (EBM/GOAe) |
|
||||
| **Empfaenger** | KV (Kassenaerztliche Vereinigung), PKV, Abrechnungsstelle (AVV) |
|
||||
| **Loeschfrist** | 10 Jahre (steuerliche Aufbewahrung AO) |
|
||||
| **TOM** | Verschluesselte Datenuebermittlung (KV-Connect/KIM), Zugriffskontrolle |
|
||||
|
||||
## VVT-G04: Laborbefunde
|
||||
|
||||
| Feld | Inhalt |
|
||||
|------|--------|
|
||||
| **Zweck** | Beauftragung und Empfang von Laboruntersuchungen |
|
||||
| **Rechtsgrundlage** | Art. 9 Abs. 2 lit. h DS-GVO |
|
||||
| **Betroffene** | Patienten |
|
||||
| **Datenkategorien** | Proben-ID, Untersuchungsparameter, Befundergebnisse (Art. 9) |
|
||||
| **Empfaenger** | Labordienstleister (AVV) |
|
||||
| **Loeschfrist** | 10 Jahre (Dokumentationspflicht) |
|
||||
| **TOM** | Pseudonymisierung der Proben, verschluesselte Uebertragung |
|
||||
|
||||
## VVT-G05: Mitarbeiterverwaltung
|
||||
|
||||
| Feld | Inhalt |
|
||||
|------|--------|
|
||||
| **Zweck** | Personalverwaltung, Dienstplanung, Lohnabrechnung |
|
||||
| **Rechtsgrundlage** | Art. 6 Abs. 1 lit. b/c DS-GVO i.V.m. §26 BDSG |
|
||||
| **Betroffene** | Beschaeftigte (Aerzte, MFA, Verwaltung) |
|
||||
| **Datenkategorien** | Stammdaten, Vertragsdaten, Bankverbindung, Dienstzeiten |
|
||||
| **Empfaenger** | Lohnbuero (AVV), Finanzamt, Sozialversicherungstraeger |
|
||||
| **Loeschfrist** | 10 Jahre nach Austritt |
|
||||
| **TOM** | Zugriffsschutz (nur HR/Praxisleitung) |
|
||||
|
||||
---
|
||||
|
||||
*Erstellt mit BreakPilot Compliance. Branchenvorlage Gesundheitswesen.*
|
||||
$template$
|
||||
) ON CONFLICT DO NOTHING;
|
||||
|
||||
-- 5. VVT Branchenvorlage: Handel / E-Commerce
|
||||
INSERT INTO compliance.compliance_legal_templates (
|
||||
tenant_id, document_type, title, description, language, jurisdiction,
|
||||
version, status, license_name, source_name, attribution_required,
|
||||
is_complete_document, placeholders, content
|
||||
) VALUES (
|
||||
'9282a473-5c95-4b3a-bf78-0ecc0ec71d3e'::uuid,
|
||||
'vvt_register',
|
||||
'VVT Branchenvorlage: Handel / E-Commerce',
|
||||
'Vorbefuelltes Verarbeitungsverzeichnis fuer Online-Shops und Einzelhaendler. Beruecksichtigt TDDDG, Fernabsatzrecht und Zahlungsdienste.',
|
||||
'de', 'EU/DSGVO', '2.0', 'published', 'MIT', 'BreakPilot Compliance', false, true,
|
||||
'[]'::jsonb,
|
||||
$template$# VVT Branchenvorlage: Handel / E-Commerce
|
||||
|
||||
Typische Verarbeitungstaetigkeiten fuer Online-Shops und Einzelhandel.
|
||||
|
||||
---
|
||||
|
||||
## VVT-H01: Bestellabwicklung
|
||||
|
||||
| Feld | Inhalt |
|
||||
|------|--------|
|
||||
| **Zweck** | Bestellannahme, Versand, Rechnungsstellung |
|
||||
| **Rechtsgrundlage** | Art. 6 Abs. 1 lit. b DS-GVO (Vertragserfullung) |
|
||||
| **Betroffene** | Kunden (Besteller) |
|
||||
| **Datenkategorien** | Kontaktdaten, Lieferadresse, Bestelldaten, Rechnungsdaten |
|
||||
| **Empfaenger** | Versanddienstleister, Zahlungsanbieter (AVV), Warenwirtschaft |
|
||||
| **Loeschfrist** | 10 Jahre Rechnungsdaten (AO/HGB), 3 Jahre Bestelldaten (Verjaeherung) |
|
||||
| **TOM** | Verschluesselte Uebertragung, Zugriffsschutz Bestellsystem |
|
||||
|
||||
## VVT-H02: Kundenkonto
|
||||
|
||||
| Feld | Inhalt |
|
||||
|------|--------|
|
||||
| **Zweck** | Bereitstellung eines Kundenkontos (optional, nicht Pflicht) |
|
||||
| **Rechtsgrundlage** | Art. 6 Abs. 1 lit. a/b DS-GVO |
|
||||
| **Betroffene** | Registrierte Kunden |
|
||||
| **Datenkategorien** | Stammdaten, Passwort (gehasht), Bestellhistorie, Wunschliste |
|
||||
| **Empfaenger** | Shop-Plattform-Anbieter (AVV) |
|
||||
| **Loeschfrist** | Unverzueglich nach Kontoloesch-Anfrage, Rechnungsdaten 10 Jahre |
|
||||
| **TOM** | MFA-Option, sichere Passwortspeicherung (bcrypt), Gastzugang-Alternative |
|
||||
|
||||
## VVT-H03: Zahlungsabwicklung
|
||||
|
||||
| Feld | Inhalt |
|
||||
|------|--------|
|
||||
| **Zweck** | Abwicklung von Zahlungsvorgaengen |
|
||||
| **Rechtsgrundlage** | Art. 6 Abs. 1 lit. b DS-GVO |
|
||||
| **Betroffene** | Zahlende Kunden |
|
||||
| **Datenkategorien** | Zahlungsart, Transaktionsdaten (keine Kartennummern bei Tokenisierung) |
|
||||
| **Empfaenger** | Payment-Service-Provider (eigene Verantwortung oder AVV) |
|
||||
| **Loeschfrist** | 10 Jahre (steuerliche Aufbewahrung) |
|
||||
| **TOM** | PCI-DSS Compliance, Tokenisierung, keine direkte Kartenspeicherung |
|
||||
|
||||
## VVT-H04: Newsletter / E-Mail-Marketing
|
||||
|
||||
| Feld | Inhalt |
|
||||
|------|--------|
|
||||
| **Zweck** | Versand von Angeboten und Produktneuheiten |
|
||||
| **Rechtsgrundlage** | Art. 6 Abs. 1 lit. a DS-GVO (Einwilligung) + UWG §7 Abs. 3 (Bestandskunden) |
|
||||
| **Betroffene** | Newsletter-Abonnenten |
|
||||
| **Datenkategorien** | E-Mail-Adresse, Name, Kaufhistorie (Bestandskunden), Oeffnungsraten |
|
||||
| **Empfaenger** | Newsletter-Dienstleister (AVV) |
|
||||
| **Loeschfrist** | Sofort nach Abmeldung |
|
||||
| **TOM** | Double-Opt-In, Abmeldelink in jeder E-Mail |
|
||||
|
||||
## VVT-H05: Webanalyse und Tracking
|
||||
|
||||
| Feld | Inhalt |
|
||||
|------|--------|
|
||||
| **Zweck** | Analyse des Nutzerverhaltens im Shop, Conversion-Optimierung |
|
||||
| **Rechtsgrundlage** | Art. 6 Abs. 1 lit. a DS-GVO (Einwilligung, TDDDG §25) |
|
||||
| **Betroffene** | Website-Besucher |
|
||||
| **Datenkategorien** | Anonymisierte IP, Seitenaufrufe, Klickpfade, Warenkorbdaten |
|
||||
| **Empfaenger** | Analyse-Anbieter (AVV) |
|
||||
| **Loeschfrist** | 14 Monate |
|
||||
| **TOM** | IP-Anonymisierung, Cookie-Consent-Management, Opt-Out |
|
||||
|
||||
## VVT-H06: Retouren und Widerruf
|
||||
|
||||
| Feld | Inhalt |
|
||||
|------|--------|
|
||||
| **Zweck** | Bearbeitung von Retouren und Widerrufen (Fernabsatzrecht) |
|
||||
| **Rechtsgrundlage** | Art. 6 Abs. 1 lit. b/c DS-GVO |
|
||||
| **Betroffene** | Kunden (Verbraucher) |
|
||||
| **Datenkategorien** | Bestelldaten, Retourengrund, Erstattungsdaten |
|
||||
| **Empfaenger** | Logistikdienstleister, Zahlungsanbieter |
|
||||
| **Loeschfrist** | 3 Jahre (Verjaeherung), Buchhaltung 10 Jahre |
|
||||
| **TOM** | Nachvollziehbare Retourenprozesse, Zugriffsbeschraenkung |
|
||||
|
||||
---
|
||||
|
||||
*Erstellt mit BreakPilot Compliance. Branchenvorlage Handel / E-Commerce.*
|
||||
$template$
|
||||
) ON CONFLICT DO NOTHING;
|
||||
|
||||
-- 6. VVT Branchenvorlage: Handwerk
|
||||
INSERT INTO compliance.compliance_legal_templates (
|
||||
tenant_id, document_type, title, description, language, jurisdiction,
|
||||
version, status, license_name, source_name, attribution_required,
|
||||
is_complete_document, placeholders, content
|
||||
) VALUES (
|
||||
'9282a473-5c95-4b3a-bf78-0ecc0ec71d3e'::uuid,
|
||||
'vvt_register',
|
||||
'VVT Branchenvorlage: Handwerksbetrieb',
|
||||
'Vorbefuelltes Verarbeitungsverzeichnis fuer Handwerksbetriebe (Bau, Kfz, Elektro, etc.).',
|
||||
'de', 'EU/DSGVO', '2.0', 'published', 'MIT', 'BreakPilot Compliance', false, true,
|
||||
'[]'::jsonb,
|
||||
$template$# VVT Branchenvorlage: Handwerksbetrieb
|
||||
|
||||
Typische Verarbeitungstaetigkeiten fuer Handwerksbetriebe.
|
||||
|
||||
---
|
||||
|
||||
## VVT-HW01: Kundenauftraege und Angebotserstellung
|
||||
|
||||
| Feld | Inhalt |
|
||||
|------|--------|
|
||||
| **Zweck** | Angebotserstellung, Auftragsabwicklung, Rechnungsstellung |
|
||||
| **Rechtsgrundlage** | Art. 6 Abs. 1 lit. b DS-GVO (Vertragserfullung) |
|
||||
| **Betroffene** | Kunden (Privat und Gewerbe) |
|
||||
| **Datenkategorien** | Kontaktdaten, Objektadresse, Auftragsbeschreibung, Rechnungsdaten |
|
||||
| **Empfaenger** | Buchhaltung, Steuerberater, ggf. Subunternehmer |
|
||||
| **Loeschfrist** | 10 Jahre Rechnungen (AO/HGB), 5 Jahre Gewaehrleistung (BGB) |
|
||||
| **TOM** | Zugriffskontrolle Auftragssystem, verschluesselte Speicherung |
|
||||
|
||||
## VVT-HW02: Mitarbeiterverwaltung
|
||||
|
||||
| Feld | Inhalt |
|
||||
|------|--------|
|
||||
| **Zweck** | Personalverwaltung, Lohnabrechnung, Arbeitszeiterfassung |
|
||||
| **Rechtsgrundlage** | Art. 6 Abs. 1 lit. b/c DS-GVO i.V.m. §26 BDSG |
|
||||
| **Betroffene** | Beschaeftigte, Auszubildende |
|
||||
| **Datenkategorien** | Stammdaten, Vertragsdaten, Bankverbindung, Arbeitszeiten, Gesundheitszeugnisse |
|
||||
| **Empfaenger** | Lohnbuero (AVV), Finanzamt, Berufsgenossenschaft |
|
||||
| **Loeschfrist** | 10 Jahre nach Austritt |
|
||||
| **TOM** | Verschlossene Personalakte, Zugriffsschutz |
|
||||
|
||||
## VVT-HW03: Baustellendokumentation
|
||||
|
||||
| Feld | Inhalt |
|
||||
|------|--------|
|
||||
| **Zweck** | Dokumentation von Baufortschritt, Maengelprotokoll |
|
||||
| **Rechtsgrundlage** | Art. 6 Abs. 1 lit. b/f DS-GVO (Vertrag + berechtigtes Interesse) |
|
||||
| **Betroffene** | Kunden, Mitarbeitende auf der Baustelle |
|
||||
| **Datenkategorien** | Fotos (ggf. mit Personen), Protokolle, Abnahmedokumente |
|
||||
| **Empfaenger** | Auftraggeber, Architekten, Baugutachter |
|
||||
| **Loeschfrist** | 5 Jahre nach Abnahme (Verjaeherung), Fotos nach Projektabschluss |
|
||||
| **TOM** | Beschraenkter Zugriff auf Projektordner, keine oeffentliche Cloud ohne AVV |
|
||||
|
||||
## VVT-HW04: Materialwirtschaft
|
||||
|
||||
| Feld | Inhalt |
|
||||
|------|--------|
|
||||
| **Zweck** | Materialbeschaffung, Lagerverwaltung, Lieferantenmanagement |
|
||||
| **Rechtsgrundlage** | Art. 6 Abs. 1 lit. b DS-GVO |
|
||||
| **Betroffene** | Lieferanten (Ansprechpartner) |
|
||||
| **Datenkategorien** | Firmendaten, Ansprechpartner, Bestellhistorie, Konditionen |
|
||||
| **Empfaenger** | Grosshandel, Buchhaltung |
|
||||
| **Loeschfrist** | 6 Jahre (Handelsbriefe HGB), 10 Jahre Rechnungen |
|
||||
| **TOM** | Zugriffskontrolle ERP/Warenwirtschaft |
|
||||
|
||||
---
|
||||
|
||||
*Erstellt mit BreakPilot Compliance. Branchenvorlage Handwerksbetrieb.*
|
||||
$template$
|
||||
) ON CONFLICT DO NOTHING;
|
||||
|
||||
-- 7. VVT Branchenvorlage: Bildung
|
||||
INSERT INTO compliance.compliance_legal_templates (
|
||||
tenant_id, document_type, title, description, language, jurisdiction,
|
||||
version, status, license_name, source_name, attribution_required,
|
||||
is_complete_document, placeholders, content
|
||||
) VALUES (
|
||||
'9282a473-5c95-4b3a-bf78-0ecc0ec71d3e'::uuid,
|
||||
'vvt_register',
|
||||
'VVT Branchenvorlage: Bildungseinrichtung',
|
||||
'Vorbefuelltes Verarbeitungsverzeichnis fuer Schulen, Hochschulen und Bildungstraeger. Beruecksichtigt Schueler-/Studentendaten als schutzbeduerftige Betroffene.',
|
||||
'de', 'EU/DSGVO', '2.0', 'published', 'MIT', 'BreakPilot Compliance', false, true,
|
||||
'[]'::jsonb,
|
||||
$template$# VVT Branchenvorlage: Bildungseinrichtung
|
||||
|
||||
Typische Verarbeitungstaetigkeiten fuer Schulen, Hochschulen und Bildungstraeger.
|
||||
|
||||
---
|
||||
|
||||
## VVT-B01: Schueler-/Studierendenverwaltung
|
||||
|
||||
| Feld | Inhalt |
|
||||
|------|--------|
|
||||
| **Zweck** | Verwaltung von Schueler-/Studierendendaten, Anmeldung, Klassenzuordnung |
|
||||
| **Rechtsgrundlage** | Art. 6 Abs. 1 lit. c/e DS-GVO i.V.m. Landesschulgesetz |
|
||||
| **Betroffene** | Schueler/Studierende (ggf. Minderjaehrige — besonders schutzbeduerftig), Erziehungsberechtigte |
|
||||
| **Datenkategorien** | Stammdaten, Kontaktdaten Erziehungsberechtigte, Klassenzuordnung |
|
||||
| **Empfaenger** | Schulverwaltungssoftware-Anbieter (AVV), Schulbehoerde |
|
||||
| **Loeschfrist** | Gemaess Landesschulgesetz (i.d.R. 5 Jahre nach Abgang) |
|
||||
| **TOM** | Besonderer Zugriffsschutz, Altersverifizierung, Einwilligung Erziehungsberechtigte |
|
||||
| **DSFA erforderlich?** | Ja (schutzbeduerftige Betroffene, ggf. grosser Umfang) |
|
||||
|
||||
## VVT-B02: Notenverarbeitung und Zeugniserstellung
|
||||
|
||||
| Feld | Inhalt |
|
||||
|------|--------|
|
||||
| **Zweck** | Leistungsbewertung, Zeugnis- und Notenverwaltung |
|
||||
| **Rechtsgrundlage** | Art. 6 Abs. 1 lit. c/e DS-GVO i.V.m. Schulgesetz |
|
||||
| **Betroffene** | Schueler/Studierende |
|
||||
| **Datenkategorien** | Noten, Leistungsbewertungen, Pruefungsergebnisse |
|
||||
| **Empfaenger** | Lehrkraefte, Schulleitung, Pruefungsamt |
|
||||
| **Loeschfrist** | Zeugniskopien: 50 Jahre (Nachweispflicht), Einzelnoten: 2 Jahre |
|
||||
| **TOM** | Zugriffsbeschraenkung auf Fachlehrkraft, verschluesselte Speicherung |
|
||||
|
||||
## VVT-B03: Lernplattform / LMS
|
||||
|
||||
| Feld | Inhalt |
|
||||
|------|--------|
|
||||
| **Zweck** | Digitaler Unterricht, Aufgabenverteilung, Kommunikation |
|
||||
| **Rechtsgrundlage** | Art. 6 Abs. 1 lit. e DS-GVO (oeffentliches Interesse) / lit. a (Einwilligung bei Minderjaehrigen) |
|
||||
| **Betroffene** | Schueler/Studierende, Lehrkraefte |
|
||||
| **Datenkategorien** | Nutzungsdaten, eingereichte Aufgaben, Chat-Nachrichten |
|
||||
| **Empfaenger** | LMS-Anbieter (AVV), Hosting-Provider (AVV) |
|
||||
| **Loeschfrist** | Kursende + 1 Schuljahr |
|
||||
| **TOM** | Datensparsamkeit, keine Lernanalytics ohne Einwilligung, Hosting in EU |
|
||||
|
||||
## VVT-B04: Elternkommunikation
|
||||
|
||||
| Feld | Inhalt |
|
||||
|------|--------|
|
||||
| **Zweck** | Information und Kommunikation mit Erziehungsberechtigten |
|
||||
| **Rechtsgrundlage** | Art. 6 Abs. 1 lit. e DS-GVO |
|
||||
| **Betroffene** | Erziehungsberechtigte |
|
||||
| **Datenkategorien** | Kontaktdaten, Nachrichteninhalt |
|
||||
| **Empfaenger** | Kommunikationsplattform-Anbieter (AVV) |
|
||||
| **Loeschfrist** | Ende des Schuljahres bzw. Abgang des Kindes |
|
||||
| **TOM** | Verschluesselte Kommunikation, kein WhatsApp/Social Media |
|
||||
|
||||
---
|
||||
|
||||
*Erstellt mit BreakPilot Compliance. Branchenvorlage Bildungseinrichtung.*
|
||||
$template$
|
||||
) ON CONFLICT DO NOTHING;
|
||||
|
||||
-- 8. VVT Branchenvorlage: Beratung / Dienstleistung
|
||||
INSERT INTO compliance.compliance_legal_templates (
|
||||
tenant_id, document_type, title, description, language, jurisdiction,
|
||||
version, status, license_name, source_name, attribution_required,
|
||||
is_complete_document, placeholders, content
|
||||
) VALUES (
|
||||
'9282a473-5c95-4b3a-bf78-0ecc0ec71d3e'::uuid,
|
||||
'vvt_register',
|
||||
'VVT Branchenvorlage: Beratung / Dienstleistung',
|
||||
'Vorbefuelltes Verarbeitungsverzeichnis fuer Beratungsunternehmen, Kanzleien und Dienstleister.',
|
||||
'de', 'EU/DSGVO', '2.0', 'published', 'MIT', 'BreakPilot Compliance', false, true,
|
||||
'[]'::jsonb,
|
||||
$template$# VVT Branchenvorlage: Beratung / Dienstleistung
|
||||
|
||||
Typische Verarbeitungstaetigkeiten fuer Beratungsunternehmen, Kanzleien und professionelle Dienstleister.
|
||||
|
||||
---
|
||||
|
||||
## VVT-D01: Mandantenverwaltung
|
||||
|
||||
| Feld | Inhalt |
|
||||
|------|--------|
|
||||
| **Zweck** | Verwaltung von Mandanten-/Kundenbeziehungen, Vertragsdokumentation |
|
||||
| **Rechtsgrundlage** | Art. 6 Abs. 1 lit. b DS-GVO (Vertragserfullung) |
|
||||
| **Betroffene** | Mandanten, Ansprechpartner |
|
||||
| **Datenkategorien** | Kontaktdaten, Vertragsdaten, Korrespondenz, Rechnungsdaten |
|
||||
| **Empfaenger** | Kanzleisoftware-Anbieter (AVV), Steuerberater |
|
||||
| **Loeschfrist** | 10 Jahre Rechnungen, 5 Jahre Handakten (Berufsrecht), 3 Jahre sonstige |
|
||||
| **TOM** | Mandantengeheimnis, verschluesselte Speicherung, Need-to-know-Prinzip |
|
||||
|
||||
## VVT-D02: Projektmanagement
|
||||
|
||||
| Feld | Inhalt |
|
||||
|------|--------|
|
||||
| **Zweck** | Planung und Steuerung von Beratungsprojekten |
|
||||
| **Rechtsgrundlage** | Art. 6 Abs. 1 lit. b/f DS-GVO |
|
||||
| **Betroffene** | Projektbeteiligte (Mandant + intern) |
|
||||
| **Datenkategorien** | Projektdaten, Aufgaben, Zeiterfassung, Ergebnisdokumente |
|
||||
| **Empfaenger** | Projektmanagement-Tool (AVV), Mandant |
|
||||
| **Loeschfrist** | 2 Jahre nach Projektabschluss |
|
||||
| **TOM** | Projektspezifische Zugriffsrechte, Mandantentrennung |
|
||||
|
||||
## VVT-D03: Zeiterfassung und Abrechnung
|
||||
|
||||
| Feld | Inhalt |
|
||||
|------|--------|
|
||||
| **Zweck** | Erfassung geleisteter Stunden, Abrechnung gegenueber Mandanten |
|
||||
| **Rechtsgrundlage** | Art. 6 Abs. 1 lit. b DS-GVO |
|
||||
| **Betroffene** | Berater/Mitarbeitende, Mandanten |
|
||||
| **Datenkategorien** | Arbeitszeiten, Taetigkeitsbeschreibungen, Stundensaetze |
|
||||
| **Empfaenger** | Abrechnungssystem (AVV), Buchhaltung |
|
||||
| **Loeschfrist** | 10 Jahre (steuerliche Aufbewahrung) |
|
||||
| **TOM** | Zugriffsbeschraenkung (nur eigene Zeiten + Projektleitung) |
|
||||
|
||||
## VVT-D04: Dokumentenmanagement
|
||||
|
||||
| Feld | Inhalt |
|
||||
|------|--------|
|
||||
| **Zweck** | Verwaltung und Archivierung von Mandantendokumenten |
|
||||
| **Rechtsgrundlage** | Art. 6 Abs. 1 lit. b/c DS-GVO |
|
||||
| **Betroffene** | Mandanten, ggf. Dritte in Dokumenten |
|
||||
| **Datenkategorien** | Vertraege, Gutachten, Korrespondenz, Berichte |
|
||||
| **Empfaenger** | DMS-Anbieter (AVV), Cloud-Speicher (AVV) |
|
||||
| **Loeschfrist** | Gemaess Berufsrecht und Mandatsvereinbarung |
|
||||
| **TOM** | Dokumentenklassifizierung, Versionierung, Zugriffsprotokollierung |
|
||||
|
||||
## VVT-D05: CRM und Akquise
|
||||
|
||||
| Feld | Inhalt |
|
||||
|------|--------|
|
||||
| **Zweck** | Kontaktpflege, Akquise, Beziehungsmanagement |
|
||||
| **Rechtsgrundlage** | Art. 6 Abs. 1 lit. f DS-GVO (berechtigtes Interesse: Geschaeftsanbahnung) |
|
||||
| **Betroffene** | Interessenten, Geschaeftskontakte |
|
||||
| **Datenkategorien** | Kontaktdaten, Firma, Branche, Gespraechsnotizen |
|
||||
| **Empfaenger** | CRM-Anbieter (AVV) |
|
||||
| **Loeschfrist** | 3 Jahre nach letztem Kontakt |
|
||||
| **TOM** | Widerspruchsmoeglichkeit, Datenminimierung |
|
||||
|
||||
---
|
||||
|
||||
*Erstellt mit BreakPilot Compliance. Branchenvorlage Beratung / Dienstleistung.*
|
||||
$template$
|
||||
) ON CONFLICT DO NOTHING;
|
||||
212
document-templates/migrations/004_avv_template.sql
Normal file
212
document-templates/migrations/004_avv_template.sql
Normal file
@@ -0,0 +1,212 @@
|
||||
-- Migration 004: AVV Template — Auftragsverarbeitungsvertrag (Art. 28 DS-GVO)
|
||||
-- Deutsche AVV-Vorlage mit allen Pflichtinhalten.
|
||||
|
||||
INSERT INTO compliance.compliance_legal_templates (
|
||||
tenant_id, document_type, title, description, language, jurisdiction,
|
||||
version, status, license_name, source_name, attribution_required,
|
||||
is_complete_document, placeholders, content
|
||||
) VALUES (
|
||||
'9282a473-5c95-4b3a-bf78-0ecc0ec71d3e'::uuid,
|
||||
'dpa',
|
||||
'Auftragsverarbeitungsvertrag (AVV) gemaess Art. 28 DS-GVO',
|
||||
'Vollstaendiger Auftragsverarbeitungsvertrag mit allen Pflichtinhalten nach Art. 28 Abs. 3 DS-GVO. Inkl. TOM-Anlage und Drittlandtransfer-Klausel.',
|
||||
'de',
|
||||
'EU/DSGVO',
|
||||
'2.0',
|
||||
'published',
|
||||
'MIT',
|
||||
'BreakPilot Compliance',
|
||||
false,
|
||||
true,
|
||||
CAST('[
|
||||
"{{VERANTWORTLICHER_NAME}}",
|
||||
"{{VERANTWORTLICHER_ADRESSE}}",
|
||||
"{{VERANTWORTLICHER_VERTRETER}}",
|
||||
"{{AUFTRAGSVERARBEITER_NAME}}",
|
||||
"{{AUFTRAGSVERARBEITER_ADRESSE}}",
|
||||
"{{AUFTRAGSVERARBEITER_VERTRETER}}",
|
||||
"{{VERTRAGSGEGENSTAND}}",
|
||||
"{{VERTRAGSDAUER}}",
|
||||
"{{VERARBEITUNGSZWECK}}",
|
||||
"{{ART_DER_VERARBEITUNG}}",
|
||||
"{{DATENKATEGORIEN}}",
|
||||
"{{BETROFFENE}}",
|
||||
"{{UNTERAUFTRAGSVERARBEITER_LISTE}}",
|
||||
"{{TOM_ANLAGE}}",
|
||||
"{{DRITTLANDTRANSFER_DETAILS}}",
|
||||
"{{ORT_DATUM}}",
|
||||
"{{WEISUNGSBERECHTIGTER}}",
|
||||
"{{KONTAKT_DATENSCHUTZ_AV}}"
|
||||
]' AS jsonb),
|
||||
$template$# Auftragsverarbeitungsvertrag (AVV)
|
||||
**gemaess Art. 28 Abs. 3 DS-GVO**
|
||||
|
||||
---
|
||||
|
||||
## Vertragsparteien
|
||||
|
||||
**Verantwortlicher (Auftraggeber):**
|
||||
{{VERANTWORTLICHER_NAME}}
|
||||
{{VERANTWORTLICHER_ADRESSE}}
|
||||
Vertreten durch: {{VERANTWORTLICHER_VERTRETER}}
|
||||
|
||||
**Auftragsverarbeiter (Auftragnehmer):**
|
||||
{{AUFTRAGSVERARBEITER_NAME}}
|
||||
{{AUFTRAGSVERARBEITER_ADRESSE}}
|
||||
Vertreten durch: {{AUFTRAGSVERARBEITER_VERTRETER}}
|
||||
|
||||
---
|
||||
|
||||
## §1 Gegenstand und Dauer
|
||||
|
||||
(1) Der Auftragsverarbeiter verarbeitet personenbezogene Daten im Auftrag des Verantwortlichen. Gegenstand der Auftragsverarbeitung ist:
|
||||
|
||||
{{VERTRAGSGEGENSTAND}}
|
||||
|
||||
(2) Die Dauer der Verarbeitung entspricht der Laufzeit des Hauptvertrags: {{VERTRAGSDAUER}}.
|
||||
|
||||
---
|
||||
|
||||
## §2 Art und Zweck der Verarbeitung
|
||||
|
||||
(1) **Zweck:** {{VERARBEITUNGSZWECK}}
|
||||
|
||||
(2) **Art der Verarbeitung:** {{ART_DER_VERARBEITUNG}}
|
||||
|
||||
---
|
||||
|
||||
## §3 Art der personenbezogenen Daten
|
||||
|
||||
{{DATENKATEGORIEN}}
|
||||
|
||||
---
|
||||
|
||||
## §4 Kategorien betroffener Personen
|
||||
|
||||
{{BETROFFENE}}
|
||||
|
||||
---
|
||||
|
||||
## §5 Pflichten des Verantwortlichen
|
||||
|
||||
(1) Der Verantwortliche ist fuer die Rechtmaessigkeit der Datenverarbeitung verantwortlich.
|
||||
|
||||
(2) Der Verantwortliche erteilt Weisungen zur Datenverarbeitung. Weisungsberechtigt ist: {{WEISUNGSBERECHTIGTER}}.
|
||||
|
||||
(3) Der Verantwortliche informiert den Auftragsverarbeiter unverzueglich, wenn er Fehler oder Unregelmaessigkeiten feststellt.
|
||||
|
||||
(4) Der Verantwortliche ist verpflichtet, alle im Rahmen des Vertragsverhaeltnisses erlangten Kenntnisse vertraulich zu behandeln.
|
||||
|
||||
---
|
||||
|
||||
## §6 Pflichten des Auftragsverarbeiters
|
||||
|
||||
(1) Der Auftragsverarbeiter verarbeitet die Daten ausschliesslich auf dokumentierte Weisung des Verantwortlichen (Art. 28 Abs. 3 lit. a DS-GVO), es sei denn, er ist durch Unionsrecht oder nationales Recht hierzu verpflichtet.
|
||||
|
||||
(2) Der Auftragsverarbeiter gewaehrleistet, dass sich die zur Verarbeitung befugten Personen zur Vertraulichkeit verpflichtet haben oder einer angemessenen gesetzlichen Verschwiegenheitspflicht unterliegen (Art. 28 Abs. 3 lit. b).
|
||||
|
||||
(3) Der Auftragsverarbeiter trifft alle erforderlichen technischen und organisatorischen Massnahmen gemaess Art. 32 DS-GVO (siehe Anlage 1: TOM).
|
||||
|
||||
(4) Der Auftragsverarbeiter beachtet die Bedingungen fuer die Inanspruchnahme von Unterauftragsverarbeitern (§7 dieses Vertrags).
|
||||
|
||||
(5) Der Auftragsverarbeiter unterstuetzt den Verantwortlichen bei der Erfuellung der Betroffenenrechte (Art. 15-22 DS-GVO) durch geeignete technische und organisatorische Massnahmen (Art. 28 Abs. 3 lit. e).
|
||||
|
||||
(6) Der Auftragsverarbeiter unterstuetzt den Verantwortlichen bei der Einhaltung der Pflichten aus Art. 32-36 DS-GVO (Sicherheit, Meldepflichten, DSFA, Konsultation).
|
||||
|
||||
(7) Der Auftragsverarbeiter loescht oder gibt nach Wahl des Verantwortlichen alle personenbezogenen Daten nach Beendigung der Auftragsverarbeitung zurueck und loescht vorhandene Kopien, es sei denn, eine Aufbewahrungspflicht besteht (Art. 28 Abs. 3 lit. g).
|
||||
|
||||
(8) Der Auftragsverarbeiter stellt dem Verantwortlichen alle erforderlichen Informationen zum Nachweis der Einhaltung der Pflichten zur Verfuegung und ermoeglicht Ueberpruefungen/Audits (Art. 28 Abs. 3 lit. h).
|
||||
|
||||
(9) Der Auftragsverarbeiter informiert den Verantwortlichen unverzueglich, wenn eine Weisung nach seiner Auffassung gegen datenschutzrechtliche Vorschriften verstoesst.
|
||||
|
||||
(10) Der Auftragsverarbeiter benennt einen Ansprechpartner fuer den Datenschutz: {{KONTAKT_DATENSCHUTZ_AV}}.
|
||||
|
||||
---
|
||||
|
||||
## §7 Unterauftragsverarbeitung
|
||||
|
||||
(1) Der Auftragsverarbeiter darf Unterauftragsverarbeiter nur mit vorheriger schriftlicher Genehmigung des Verantwortlichen einsetzen. Es wird eine allgemeine Genehmigung erteilt, wobei der Auftragsverarbeiter den Verantwortlichen ueber beabsichtigte Aenderungen mindestens 14 Tage im Voraus informiert. Der Verantwortliche kann Einspruch erheben.
|
||||
|
||||
(2) Aktuelle Unterauftragsverarbeiter:
|
||||
|
||||
{{UNTERAUFTRAGSVERARBEITER_LISTE}}
|
||||
|
||||
(3) Der Auftragsverarbeiter stellt vertraglich sicher, dass die Unterauftragsverarbeiter dieselben Datenschutzpflichten einhalten.
|
||||
|
||||
{{#IF DRITTLANDTRANSFER_DETAILS}}
|
||||
---
|
||||
|
||||
## §8 Uebermittlung in Drittlaender
|
||||
|
||||
(1) Eine Uebermittlung personenbezogener Daten in Drittlaender erfolgt nur unter Einhaltung der Voraussetzungen der Art. 44-49 DS-GVO.
|
||||
|
||||
(2) Details:
|
||||
|
||||
{{DRITTLANDTRANSFER_DETAILS}}
|
||||
{{/IF}}
|
||||
|
||||
---
|
||||
|
||||
## §9 Kontrollrechte und Audits
|
||||
|
||||
(1) Der Verantwortliche hat das Recht, die Einhaltung der Vorschriften durch den Auftragsverarbeiter zu ueberpruefen. Dies umfasst Inspektionen vor Ort, Dokumentenpruefungen und die Einholung von Auskuenften.
|
||||
|
||||
(2) Der Auftragsverarbeiter unterstuetzt den Verantwortlichen bei der Durchfuehrung und gewaehrt Zugang zu relevanten Raeumlichkeiten und Systemen mit angemessener Vorankuendigung (in der Regel 14 Tage).
|
||||
|
||||
(3) Alternativ kann der Auftragsverarbeiter aktuelle Zertifizierungen (z. B. ISO 27001, SOC 2) oder Auditberichte unabhaengiger Pruefervorlegen.
|
||||
|
||||
---
|
||||
|
||||
## §10 Meldung von Datenpannen
|
||||
|
||||
(1) Der Auftragsverarbeiter informiert den Verantwortlichen unverzueglich (in der Regel innerhalb von 24 Stunden) nach Kenntniserlangung ueber eine Verletzung des Schutzes personenbezogener Daten (Art. 33 Abs. 2 DS-GVO).
|
||||
|
||||
(2) Die Meldung umfasst mindestens die Art der Datenpanne, die betroffenen Kategorien und ungefaehre Anzahl der Betroffenen, die wahrscheinlichen Folgen und die ergriffenen Gegenmassnahmen.
|
||||
|
||||
---
|
||||
|
||||
## §11 Haftung
|
||||
|
||||
Die Haftung richtet sich nach Art. 82 DS-GVO. Der Auftragsverarbeiter haftet fuer Schaeden, die durch eine nicht den Vorgaben der DS-GVO entsprechende Verarbeitung oder durch Handeln entgegen den Weisungen des Verantwortlichen verursacht wurden.
|
||||
|
||||
---
|
||||
|
||||
## §12 Laufzeit und Kuendigung
|
||||
|
||||
(1) Dieser AVV tritt mit Unterzeichnung in Kraft und endet automatisch mit Beendigung des Hauptvertrags.
|
||||
|
||||
(2) Eine ausserordentliche Kuendigung ist bei schwerem Verstoss gegen diesen Vertrag oder datenschutzrechtliche Vorschriften moeglich.
|
||||
|
||||
(3) Nach Vertragsende hat der Auftragsverarbeiter alle personenbezogenen Daten gemaess §6 Abs. 7 zu loeschen oder zurueckzugeben.
|
||||
|
||||
---
|
||||
|
||||
## §13 Schlussbestimmungen
|
||||
|
||||
(1) Aenderungen dieses Vertrags beduerfen der Schriftform.
|
||||
|
||||
(2) Sollten einzelne Bestimmungen unwirksam sein, bleibt die Wirksamkeit des uebrigen Vertrags unberuehrt.
|
||||
|
||||
(3) Es gilt das Recht der Bundesrepublik Deutschland.
|
||||
|
||||
---
|
||||
|
||||
## Anlage 1: Technische und Organisatorische Massnahmen (TOM)
|
||||
|
||||
{{TOM_ANLAGE}}
|
||||
|
||||
---
|
||||
|
||||
## Unterschriften
|
||||
|
||||
| | Verantwortlicher | Auftragsverarbeiter |
|
||||
|---|---|---|
|
||||
| **Ort, Datum** | {{ORT_DATUM}} | {{ORT_DATUM}} |
|
||||
| **Name** | {{VERANTWORTLICHER_VERTRETER}} | {{AUFTRAGSVERARBEITER_VERTRETER}} |
|
||||
| **Unterschrift** | _________________ | _________________ |
|
||||
|
||||
---
|
||||
|
||||
*Erstellt mit BreakPilot Compliance. Lizenz: MIT.*
|
||||
$template$
|
||||
) ON CONFLICT DO NOTHING;
|
||||
249
document-templates/migrations/005_additional_templates.sql
Normal file
249
document-templates/migrations/005_additional_templates.sql
Normal file
@@ -0,0 +1,249 @@
|
||||
-- Migration 005: Zusaetzliche Templates — Verpflichtungserklaerung + Art. 13/14
|
||||
|
||||
-- 1. Verpflichtungserklaerung (Vertraulichkeit Mitarbeitende)
|
||||
INSERT INTO compliance.compliance_legal_templates (
|
||||
tenant_id, document_type, title, description, language, jurisdiction,
|
||||
version, status, license_name, source_name, attribution_required,
|
||||
is_complete_document, placeholders, content
|
||||
) VALUES (
|
||||
'9282a473-5c95-4b3a-bf78-0ecc0ec71d3e'::uuid,
|
||||
'verpflichtungserklaerung',
|
||||
'Verpflichtungserklaerung auf das Datengeheimnis',
|
||||
'Vorlage zur Verpflichtung von Mitarbeitenden auf die Vertraulichkeit und das Datengeheimnis gemaess DS-GVO. Fuer Onboarding-Prozesse.',
|
||||
'de',
|
||||
'DE',
|
||||
'1.0',
|
||||
'published',
|
||||
'MIT',
|
||||
'BreakPilot Compliance',
|
||||
false,
|
||||
true,
|
||||
CAST('[
|
||||
"{{UNTERNEHMEN_NAME}}",
|
||||
"{{UNTERNEHMEN_ADRESSE}}",
|
||||
"{{MITARBEITER_NAME}}",
|
||||
"{{MITARBEITER_ABTEILUNG}}",
|
||||
"{{DSB_NAME}}",
|
||||
"{{DSB_KONTAKT}}",
|
||||
"{{ORT_DATUM}}",
|
||||
"{{SCHULUNGSDATUM}}"
|
||||
]' AS jsonb),
|
||||
$template$# Verpflichtung auf das Datengeheimnis
|
||||
**gemaess Art. 28 Abs. 3 lit. b, Art. 29, Art. 32 Abs. 4 DS-GVO**
|
||||
|
||||
---
|
||||
|
||||
## 1. Verpflichtung
|
||||
|
||||
Ich, **{{MITARBEITER_NAME}}**, Abteilung **{{MITARBEITER_ABTEILUNG}}**, werde hiermit auf die Vertraulichkeit im Umgang mit personenbezogenen Daten verpflichtet.
|
||||
|
||||
**Arbeitgeber:** {{UNTERNEHMEN_NAME}}, {{UNTERNEHMEN_ADRESSE}}
|
||||
|
||||
Ich verpflichte mich, personenbezogene Daten, die mir im Rahmen meiner Taetigkeit bekannt werden, nur gemaess den erteilten Weisungen zu verarbeiten. Diese Verpflichtung gilt auch nach Beendigung des Beschaeftigungsverhaeltnisses fort.
|
||||
|
||||
---
|
||||
|
||||
## 2. Pflichten im Einzelnen
|
||||
|
||||
Mir ist bekannt, dass ich verpflichtet bin:
|
||||
|
||||
- Personenbezogene Daten nur im Rahmen meiner Aufgaben und nach Weisung des Verantwortlichen zu verarbeiten.
|
||||
- Die Vertraulichkeit personenbezogener Daten zu wahren und diese nicht unbefugt an Dritte weiterzugeben.
|
||||
- Personenbezogene Daten vor unbefugtem Zugriff, Verlust und Missbrauch zu schuetzen.
|
||||
- Den Datenschutzbeauftragten unverzueglich ueber Datenschutzvorfaelle oder -verletzungen zu informieren.
|
||||
- Keine personenbezogenen Daten fuer private Zwecke zu verwenden.
|
||||
- Mobile Datentraeger und Zugangsmedien sorgfaeltig aufzubewahren.
|
||||
- Passwoerter nicht weiterzugeben und regelmaessig zu aendern.
|
||||
|
||||
---
|
||||
|
||||
## 3. Rechtsfolgen bei Verstoss
|
||||
|
||||
Ein Verstoss gegen das Datengeheimnis kann folgende Konsequenzen haben:
|
||||
|
||||
- **Arbeitsrechtliche Massnahmen** bis hin zur fristlosen Kuendigung
|
||||
- **Schadensersatzansprueche** des Arbeitgebers oder der Betroffenen (Art. 82 DS-GVO)
|
||||
- **Ordnungswidrigkeiten oder Straftaten** nach BDSG und StGB (§§ 42, 43 BDSG; §§ 201-206 StGB)
|
||||
|
||||
---
|
||||
|
||||
## 4. Datenschutzschulung
|
||||
|
||||
{{#IF SCHULUNGSDATUM}}
|
||||
Ich habe am **{{SCHULUNGSDATUM}}** eine Datenschutzschulung erhalten und wurde ueber die wesentlichen Grundsaetze der DS-GVO unterrichtet.
|
||||
{{/IF}}
|
||||
{{#IF_NOT SCHULUNGSDATUM}}
|
||||
Eine Datenschutzschulung wird im Rahmen des Onboarding durchgefuehrt.
|
||||
{{/IF_NOT}}
|
||||
|
||||
---
|
||||
|
||||
## 5. Ansprechpartner
|
||||
|
||||
Bei Fragen zum Datenschutz wende ich mich an den Datenschutzbeauftragten:
|
||||
**{{DSB_NAME}}** — {{DSB_KONTAKT}}
|
||||
|
||||
---
|
||||
|
||||
## 6. Bestaetigung
|
||||
|
||||
Ich habe diese Verpflichtungserklaerung gelesen und verstanden. Ich bin mir meiner Pflichten bewusst.
|
||||
|
||||
| | Mitarbeitende/r | Arbeitgeber |
|
||||
|---|---|---|
|
||||
| **Ort, Datum** | {{ORT_DATUM}} | {{ORT_DATUM}} |
|
||||
| **Name** | {{MITARBEITER_NAME}} | |
|
||||
| **Unterschrift** | _________________ | _________________ |
|
||||
|
||||
---
|
||||
|
||||
*Erstellt mit BreakPilot Compliance. Lizenz: MIT.*
|
||||
$template$
|
||||
) ON CONFLICT DO NOTHING;
|
||||
|
||||
-- 2. Art. 13/14 Informationspflichten-Muster
|
||||
INSERT INTO compliance.compliance_legal_templates (
|
||||
tenant_id, document_type, title, description, language, jurisdiction,
|
||||
version, status, license_name, source_name, attribution_required,
|
||||
is_complete_document, placeholders, content
|
||||
) VALUES (
|
||||
'9282a473-5c95-4b3a-bf78-0ecc0ec71d3e'::uuid,
|
||||
'informationspflichten',
|
||||
'Informationspflichten gemaess Art. 13/14 DS-GVO',
|
||||
'Mustertext fuer Datenschutzhinweise nach Art. 13 (Direkterhebung) und Art. 14 (Dritterhebung) DS-GVO. Mit bedingten Bloecken fuer beide Varianten.',
|
||||
'de',
|
||||
'EU/DSGVO',
|
||||
'1.0',
|
||||
'published',
|
||||
'MIT',
|
||||
'BreakPilot Compliance',
|
||||
false,
|
||||
true,
|
||||
CAST('[
|
||||
"{{VERANTWORTLICHER_NAME}}",
|
||||
"{{VERANTWORTLICHER_ADRESSE}}",
|
||||
"{{VERANTWORTLICHER_KONTAKT}}",
|
||||
"{{DSB_NAME}}",
|
||||
"{{DSB_KONTAKT}}",
|
||||
"{{VERARBEITUNGSZWECK}}",
|
||||
"{{RECHTSGRUNDLAGE}}",
|
||||
"{{BERECHTIGTES_INTERESSE}}",
|
||||
"{{DATENKATEGORIEN}}",
|
||||
"{{DATENQUELLE}}",
|
||||
"{{EMPFAENGER}}",
|
||||
"{{DRITTLANDTRANSFER}}",
|
||||
"{{SPEICHERDAUER}}",
|
||||
"{{AUFSICHTSBEHOERDE}}",
|
||||
"{{AUTOMATISIERTE_ENTSCHEIDUNG}}",
|
||||
"{{PFLICHT_ODER_FREIWILLIG}}"
|
||||
]' AS jsonb),
|
||||
$template$# Datenschutzhinweise
|
||||
**gemaess Art. 13 und Art. 14 der Datenschutz-Grundverordnung (DS-GVO)**
|
||||
|
||||
---
|
||||
|
||||
## 1. Verantwortlicher
|
||||
|
||||
{{VERANTWORTLICHER_NAME}}
|
||||
{{VERANTWORTLICHER_ADRESSE}}
|
||||
Kontakt: {{VERANTWORTLICHER_KONTAKT}}
|
||||
|
||||
{{#IF DSB_NAME}}
|
||||
## 2. Datenschutzbeauftragter
|
||||
|
||||
{{DSB_NAME}}
|
||||
{{DSB_KONTAKT}}
|
||||
{{/IF}}
|
||||
|
||||
---
|
||||
|
||||
## 3. Zweck und Rechtsgrundlage der Verarbeitung
|
||||
|
||||
Wir verarbeiten Ihre personenbezogenen Daten zu folgenden Zwecken:
|
||||
|
||||
{{VERARBEITUNGSZWECK}}
|
||||
|
||||
**Rechtsgrundlage:** {{RECHTSGRUNDLAGE}}
|
||||
|
||||
{{#IF BERECHTIGTES_INTERESSE}}
|
||||
**Berechtigtes Interesse (Art. 6 Abs. 1 lit. f DS-GVO):** {{BERECHTIGTES_INTERESSE}}
|
||||
{{/IF}}
|
||||
|
||||
---
|
||||
|
||||
## 4. Kategorien personenbezogener Daten
|
||||
|
||||
{{DATENKATEGORIEN}}
|
||||
|
||||
{{#IF DATENQUELLE}}
|
||||
## 5. Herkunft der Daten (Art. 14 DS-GVO)
|
||||
|
||||
Die Daten wurden nicht bei Ihnen direkt erhoben, sondern stammen aus folgender Quelle:
|
||||
|
||||
{{DATENQUELLE}}
|
||||
{{/IF}}
|
||||
|
||||
---
|
||||
|
||||
## 6. Empfaenger und Uebermittlung
|
||||
|
||||
Ihre Daten werden an folgende Empfaenger bzw. Kategorien von Empfaengern uebermittelt:
|
||||
|
||||
{{EMPFAENGER}}
|
||||
|
||||
{{#IF DRITTLANDTRANSFER}}
|
||||
### Uebermittlung in Drittlaender
|
||||
|
||||
{{DRITTLANDTRANSFER}}
|
||||
{{/IF}}
|
||||
|
||||
---
|
||||
|
||||
## 7. Speicherdauer
|
||||
|
||||
{{SPEICHERDAUER}}
|
||||
|
||||
---
|
||||
|
||||
## 8. Ihre Rechte
|
||||
|
||||
Sie haben gegenueber dem Verantwortlichen folgende Rechte hinsichtlich Ihrer personenbezogenen Daten:
|
||||
|
||||
- **Auskunftsrecht** (Art. 15 DS-GVO): Sie koennen Auskunft ueber die gespeicherten Daten verlangen.
|
||||
- **Berichtigungsrecht** (Art. 16 DS-GVO): Sie koennen die Berichtigung unrichtiger Daten verlangen.
|
||||
- **Loeschungsrecht** (Art. 17 DS-GVO): Sie koennen die Loeschung Ihrer Daten verlangen, sofern keine Aufbewahrungspflicht besteht.
|
||||
- **Einschraenkung** (Art. 18 DS-GVO): Sie koennen die Einschraenkung der Verarbeitung verlangen.
|
||||
- **Datenuebert ragbarkeit** (Art. 20 DS-GVO): Sie koennen Ihre Daten in einem strukturierten, maschinenlesbaren Format erhalten.
|
||||
- **Widerspruchsrecht** (Art. 21 DS-GVO): Sie koennen der Verarbeitung widersprechen, insbesondere bei Direktwerbung.
|
||||
|
||||
{{#IF RECHTSGRUNDLAGE}}
|
||||
- **Widerrufsrecht** (Art. 7 Abs. 3 DS-GVO): Sofern die Verarbeitung auf Einwilligung beruht, koennen Sie diese jederzeit widerrufen, ohne dass die Rechtmaessigkeit der bis dahin erfolgten Verarbeitung beruehrt wird.
|
||||
{{/IF}}
|
||||
|
||||
---
|
||||
|
||||
## 9. Beschwerderecht
|
||||
|
||||
Sie haben das Recht, sich bei einer Aufsichtsbehoerde zu beschweren:
|
||||
|
||||
{{AUFSICHTSBEHOERDE}}
|
||||
|
||||
---
|
||||
|
||||
{{#IF AUTOMATISIERTE_ENTSCHEIDUNG}}
|
||||
## 10. Automatisierte Entscheidungsfindung (Art. 22 DS-GVO)
|
||||
|
||||
{{AUTOMATISIERTE_ENTSCHEIDUNG}}
|
||||
{{/IF}}
|
||||
|
||||
{{#IF PFLICHT_ODER_FREIWILLIG}}
|
||||
## 11. Bereitstellung der Daten
|
||||
|
||||
{{PFLICHT_ODER_FREIWILLIG}}
|
||||
{{/IF}}
|
||||
|
||||
---
|
||||
|
||||
*Stand: Siehe Versionsdatum des Dokuments. Erstellt mit BreakPilot Compliance. Lizenz: MIT.*
|
||||
$template$
|
||||
) ON CONFLICT DO NOTHING;
|
||||
@@ -0,0 +1,350 @@
|
||||
-- Migration 006: Betriebsvereinbarung Template V1
|
||||
-- Modulare Vorlage fuer Betriebsvereinbarungen zu KI/IT-Systemen
|
||||
-- Rechtsgrundlage: §87 Abs.1 Nr.6 BetrVG, DSGVO, BDSG
|
||||
|
||||
INSERT INTO compliance.compliance_legal_templates (
|
||||
tenant_id, document_type, title, description, language, jurisdiction,
|
||||
version, status, license_name, source_name, attribution_required,
|
||||
is_complete_document, placeholders, content
|
||||
) VALUES (
|
||||
'9282a473-5c95-4b3a-bf78-0ecc0ec71d3e'::uuid,
|
||||
'betriebsvereinbarung',
|
||||
'Betriebsvereinbarung — Einfuehrung und Nutzung von KI-/IT-Systemen',
|
||||
'Modulare Vorlage fuer eine Betriebsvereinbarung gemaess §87 Abs.1 Nr.6 BetrVG zur Einfuehrung und Nutzung von IT-Systemen und KI-Anwendungen. Umfasst Datenschutz, Ueberwachungsschutz, Change-Management und Kontrollrechte des Betriebsrats. Basiert auf BAG-Rechtsprechung zu Microsoft 365, SAP ERP und Standardsoftware.',
|
||||
'de',
|
||||
'DE',
|
||||
'1.0',
|
||||
'published',
|
||||
'MIT',
|
||||
'BreakPilot Compliance',
|
||||
false,
|
||||
true,
|
||||
CAST('[
|
||||
"{{UNTERNEHMEN_NAME}}",
|
||||
"{{UNTERNEHMEN_SITZ}}",
|
||||
"{{ARBEITGEBER_VERTRETER}}",
|
||||
"{{BETRIEBSRAT_VORSITZ}}",
|
||||
"{{SYSTEM_NAME}}",
|
||||
"{{SYSTEM_BESCHREIBUNG}}",
|
||||
"{{SYSTEM_HERSTELLER}}",
|
||||
"{{GELTUNGSBEREICH_STANDORTE}}",
|
||||
"{{GELTUNGSBEREICH_BEREICHE}}",
|
||||
"{{GELTUNGSBEREICH_MODULE}}",
|
||||
"{{ZWECK_BESCHREIBUNG}}",
|
||||
"{{DATENARTEN_LISTE}}",
|
||||
"{{VERBOTENE_NUTZUNGEN}}",
|
||||
"{{ROLLEN_ADMIN}}",
|
||||
"{{ROLLEN_FUEHRUNGSKRAFT}}",
|
||||
"{{ROLLEN_REPORTING}}",
|
||||
"{{TRANSPARENZ_INFO}}",
|
||||
"{{ERLAUBTE_REPORTS}}",
|
||||
"{{SPEICHERFRIST_AUDIT_LOGS}}",
|
||||
"{{SPEICHERFRIST_NUTZUNGSDATEN}}",
|
||||
"{{SPEICHERFRIST_CHAT_PROMPTS}}",
|
||||
"{{TOM_MASSNAHMEN}}",
|
||||
"{{CHANGE_MANAGEMENT_PROZESS}}",
|
||||
"{{AUDIT_INTERVALL}}",
|
||||
"{{BESCHWERDE_ANSPRECHPARTNER}}",
|
||||
"{{LAUFZEIT}}",
|
||||
"{{KUENDIGUNGSFRIST}}",
|
||||
"{{DATUM_UNTERZEICHNUNG}}",
|
||||
"{{DSB_NAME}}",
|
||||
"{{DSB_KONTAKT}}"
|
||||
]' AS jsonb),
|
||||
$template$# Betriebsvereinbarung
|
||||
|
||||
**ueber die Einfuehrung und Nutzung von {{SYSTEM_NAME}}**
|
||||
|
||||
zwischen
|
||||
|
||||
**{{UNTERNEHMEN_NAME}}**, {{UNTERNEHMEN_SITZ}},
|
||||
vertreten durch {{ARBEITGEBER_VERTRETER}}
|
||||
(nachfolgend "Arbeitgeberin")
|
||||
|
||||
und dem
|
||||
|
||||
**Betriebsrat** der {{UNTERNEHMEN_NAME}},
|
||||
vertreten durch den/die Vorsitzende/n {{BETRIEBSRAT_VORSITZ}}
|
||||
(nachfolgend "Betriebsrat")
|
||||
|
||||
---
|
||||
|
||||
## A. Praeambel und Rechtsgrundlagen
|
||||
|
||||
Diese Betriebsvereinbarung regelt die Einfuehrung und Nutzung von **{{SYSTEM_NAME}}** ({{SYSTEM_BESCHREIBUNG}}) im Betrieb der {{UNTERNEHMEN_NAME}}.
|
||||
|
||||
**Rechtsgrundlagen:**
|
||||
- §87 Abs.1 Nr.6 BetrVG (Mitbestimmung bei technischen Ueberwachungseinrichtungen)
|
||||
- §90 BetrVG (Unterrichtung bei Planung technischer Anlagen)
|
||||
- Art. 5, 6, 32 DSGVO (Datenschutzgrundsaetze, Rechtsgrundlage, TOM)
|
||||
- §26 BDSG (Beschaeftigtendatenschutz)
|
||||
{{#IF AI_SYSTEM}}
|
||||
- Verordnung (EU) 2024/1689 (KI-Verordnung / AI Act)
|
||||
{{/IF}}
|
||||
|
||||
Die Parteien sind sich einig, dass {{SYSTEM_NAME}} eine technische Einrichtung im Sinne des §87 Abs.1 Nr.6 BetrVG darstellt, die geeignet ist, das Verhalten oder die Leistung der Beschaeftigten zu ueberwachen. Die Einigung erfolgt in Kenntnis der Rechtsprechung des Bundesarbeitsgerichts (vgl. BAG 1 ABR 20/21 — Microsoft Office 365; BAG 1 ABN 36/18 — Standardsoftware).
|
||||
|
||||
---
|
||||
|
||||
## B. Geltungsbereich
|
||||
|
||||
### B.1 Raeumlicher Geltungsbereich
|
||||
Diese Betriebsvereinbarung gilt fuer folgende Standorte:
|
||||
{{GELTUNGSBEREICH_STANDORTE}}
|
||||
|
||||
### B.2 Persoenlicher Geltungsbereich
|
||||
Die Betriebsvereinbarung gilt fuer alle Beschaeftigten der folgenden Bereiche:
|
||||
{{GELTUNGSBEREICH_BEREICHE}}
|
||||
|
||||
### B.3 Sachlicher Geltungsbereich
|
||||
Die Betriebsvereinbarung umfasst folgende Module und Dienste des Systems:
|
||||
{{GELTUNGSBEREICH_MODULE}}
|
||||
|
||||
{{#IF SYSTEM_HERSTELLER}}
|
||||
**Systemhersteller/-anbieter:** {{SYSTEM_HERSTELLER}}
|
||||
{{/IF}}
|
||||
|
||||
---
|
||||
|
||||
## C. Zweckbestimmung
|
||||
|
||||
### C.1 Erlaubte Nutzungszwecke
|
||||
{{SYSTEM_NAME}} darf ausschliesslich zu folgenden Zwecken eingesetzt werden:
|
||||
{{ZWECK_BESCHREIBUNG}}
|
||||
|
||||
### C.2 Verbotene Nutzungen
|
||||
Folgende Nutzungen sind ausdruecklich untersagt:
|
||||
|
||||
{{VERBOTENE_NUTZUNGEN}}
|
||||
|
||||
Darueber hinaus ist generell untersagt:
|
||||
- Verdeckte Leistungs- oder Verhaltenskontrolle einzelner Beschaeftigter
|
||||
- Erstellung individueller Persoenlichkeitsprofile
|
||||
- Nutzung von Prompt-, Chat- oder Nutzungshistorien zu disziplinarischen Zwecken
|
||||
- Automatisierte Personalentscheidungen ohne menschliche Ueberpruefung
|
||||
- Personenbezogene Rankings oder Leistungsvergleiche ohne gesonderte Mitbestimmung
|
||||
{{#IF AI_SYSTEM}}
|
||||
- Einsatz von KI-Funktionen zur biometrischen Echtzeit-Identifizierung
|
||||
- KI-gestuetztes Social Scoring von Beschaeftigten
|
||||
{{/IF}}
|
||||
|
||||
---
|
||||
|
||||
## D. Datenarten und Verarbeitungszwecke
|
||||
|
||||
### D.1 Verarbeitete Datenarten
|
||||
Im Rahmen der Nutzung von {{SYSTEM_NAME}} werden folgende Datenarten verarbeitet:
|
||||
{{DATENARTEN_LISTE}}
|
||||
|
||||
### D.2 Rechtsgrundlage
|
||||
Die Verarbeitung der Beschaeftigtendaten erfolgt auf Grundlage von:
|
||||
- §26 Abs.1 BDSG i.V.m. Art. 6 Abs.1 lit. b DSGVO (Durchfuehrung des Arbeitsverhaeltnisses)
|
||||
- §26 Abs.4 BDSG i.V.m. Art. 88 DSGVO (diese Betriebsvereinbarung als Kollektivvereinbarung)
|
||||
|
||||
### D.3 Keine Verarbeitung besonderer Kategorien
|
||||
Daten gemaess Art. 9 DSGVO (Gesundheitsdaten, Gewerkschaftszugehoerigkeit, biometrische Daten etc.) werden nicht verarbeitet, es sei denn, dies ist in einem gesonderten Anhang zu dieser Betriebsvereinbarung ausdruecklich geregelt.
|
||||
|
||||
---
|
||||
|
||||
## E. Rollen- und Zugriffskonzept
|
||||
|
||||
### E.1 Administratoren
|
||||
{{ROLLEN_ADMIN}}
|
||||
|
||||
### E.2 Fuehrungskraefte
|
||||
{{ROLLEN_FUEHRUNGSKRAFT}}
|
||||
|
||||
Fuehrungskraefte erhalten **keinen** Zugriff auf:
|
||||
- individuelle Nutzungsprotokolle
|
||||
- Prompt-/Chat-Historien einzelner Beschaeftigter
|
||||
- Produktivitaetskennzahlen auf Personenebene
|
||||
|
||||
### E.3 Reporting-Zugriff
|
||||
{{ROLLEN_REPORTING}}
|
||||
|
||||
### E.4 Vier-Augen-Prinzip
|
||||
Sonderauswertungen mit Personenbezug beduerfen:
|
||||
- der Zustimmung des Betriebsrats
|
||||
- der Beteiligung des Datenschutzbeauftragten ({{DSB_NAME}}, {{DSB_KONTAKT}})
|
||||
- einer dokumentierten Begruendung
|
||||
|
||||
---
|
||||
|
||||
## F. Transparenz gegenueber Beschaeftigten
|
||||
|
||||
Die Arbeitgeberin informiert alle Beschaeftigten vor Einfuehrung von {{SYSTEM_NAME}} ueber:
|
||||
{{TRANSPARENZ_INFO}}
|
||||
|
||||
Insbesondere:
|
||||
- Welche Daten verarbeitet werden
|
||||
- Welche KI-Funktionen aktiviert sind
|
||||
- Welche Protokollierung stattfindet
|
||||
- Wer Zugriff auf welche Daten hat
|
||||
- Wie lange Daten gespeichert werden
|
||||
- An wen sich Beschaeftigte bei Fragen oder Beschwerden wenden koennen
|
||||
|
||||
{{#IF AI_SYSTEM}}
|
||||
Bei KI-gestuetzten Funktionen wird zusaetzlich transparent gemacht:
|
||||
- Ob und wie KI-generierte Inhalte gekennzeichnet werden
|
||||
- Ob Eingaben fuer Modelltraining verwendet werden (Standard: Nein)
|
||||
- Welche Entscheidungsunterstuetzung die KI leistet
|
||||
{{/IF}}
|
||||
|
||||
---
|
||||
|
||||
## G. Auswertungen und Reports
|
||||
|
||||
### G.1 Erlaubte Reports
|
||||
Folgende Auswertungen sind ohne gesonderte Zustimmung zulaessig:
|
||||
{{ERLAUBTE_REPORTS}}
|
||||
|
||||
### G.2 Unzulaessige Reports
|
||||
Ohne ausdrueckliche, vorherige Zustimmung des Betriebsrats sind unzulaessig:
|
||||
- individuelle Produktivitaetsreports
|
||||
- Teamvergleiche mit Personenbezug
|
||||
- Verhaltensprofile oder Nutzungsmuster einzelner Beschaeftigter
|
||||
- Rankinglisten (auch anonymisierte, wenn Re-Identifikation moeglich)
|
||||
- Korrelation von Nutzungsdaten mit Leistungsbeurteilungen
|
||||
|
||||
### G.3 Neue Reporttypen
|
||||
Die Einfuehrung neuer Reporttypen bedarf der vorherigen Zustimmung des Betriebsrats.
|
||||
|
||||
---
|
||||
|
||||
## H. Speicher- und Loeschfristen
|
||||
|
||||
| Datenkategorie | Speicherfrist | Loeschverfahren |
|
||||
|----------------|---------------|-----------------|
|
||||
| Audit-/Admin-Logs | {{SPEICHERFRIST_AUDIT_LOGS}} | Automatische Loeschung |
|
||||
| Nutzungsdaten (aggregiert) | {{SPEICHERFRIST_NUTZUNGSDATEN}} | Automatische Loeschung |
|
||||
| Prompt-/Chat-Historien | {{SPEICHERFRIST_CHAT_PROMPTS}} | Automatische Loeschung oder deaktiviert |
|
||||
| Exportdateien | 30 Tage | Automatische Loeschung |
|
||||
|
||||
Die Speicherdauer der Audit-Logs orientiert sich am berechtigten Interesse der Arbeitgeberin an der Systemsicherheit und wird auf das erforderliche Minimum begrenzt.
|
||||
|
||||
{{#IF AI_SYSTEM}}
|
||||
**KI-spezifisch:**
|
||||
- Trainingsdaten aus Beschaeftigten-Interaktionen: **nicht zulaessig** ohne gesonderte Vereinbarung
|
||||
- Feedback-Daten zur Modellverbesserung: nur anonymisiert und aggregiert
|
||||
{{/IF}}
|
||||
|
||||
---
|
||||
|
||||
## I. Technische und organisatorische Massnahmen (TOM)
|
||||
|
||||
Zum Schutz der Beschaeftigtendaten werden folgende Massnahmen umgesetzt:
|
||||
|
||||
{{TOM_MASSNAHMEN}}
|
||||
|
||||
Ergaenzend gelten mindestens:
|
||||
- Rollen- und Rechtekonzept mit Least-Privilege-Prinzip
|
||||
- Verschluesselung der Daten bei Uebertragung und Speicherung
|
||||
- Protokollierung aller administrativen Zugriffe
|
||||
- Pseudonymisierung, wo technisch moeglich
|
||||
- Deaktivierung nicht benoetigter Telemetrie- und Diagnosefunktionen
|
||||
- Getrennte Umgebungen fuer Test und Produktion
|
||||
|
||||
---
|
||||
|
||||
## J. Change-Management
|
||||
|
||||
### J.1 Aenderungspflicht
|
||||
Folgende Aenderungen an {{SYSTEM_NAME}} beduerfen der vorherigen Information und ggf. erneuten Mitbestimmung des Betriebsrats:
|
||||
|
||||
{{CHANGE_MANAGEMENT_PROZESS}}
|
||||
|
||||
Insbesondere:
|
||||
- Aktivierung neuer Module oder Funktionen
|
||||
- Anbindung neuer Datenquellen oder Konnektoren
|
||||
- Aenderung der Reporting-Funktionalitaet
|
||||
- Updates mit neuen KI-Modellen oder -Funktionen
|
||||
- Aenderung der Datenverarbeitungsstandorte
|
||||
- Erweiterung des Nutzerkreises
|
||||
|
||||
### J.2 Informationsfrist
|
||||
Die Arbeitgeberin informiert den Betriebsrat mindestens **14 Kalendertage** vor geplanten Aenderungen schriftlich. Bei sicherheitskritischen Updates kann die Frist auf 3 Werktage verkuerzt werden.
|
||||
|
||||
### J.3 Bewertungsverfahren
|
||||
Jede Aenderung wird anhand folgender Kriterien bewertet:
|
||||
- Aendert sich die Ueberwachungseignung?
|
||||
- Werden neue Datenarten verarbeitet?
|
||||
- Aendert sich der Personenbezug?
|
||||
|
||||
Bei positiver Beantwortung einer dieser Fragen ist eine erneute Mitbestimmung erforderlich.
|
||||
|
||||
---
|
||||
|
||||
## K. Kontroll- und Audit-Rechte des Betriebsrats
|
||||
|
||||
### K.1 Laufende Kontrolle
|
||||
Der Betriebsrat hat das Recht auf:
|
||||
- Einsicht in die Systemdokumentation
|
||||
- Einsicht in den Katalog aktiver Reports und Auswertungen
|
||||
- Information ueber alle Administrationszugriffe
|
||||
- Teilnahme an Schulungen zum System
|
||||
|
||||
### K.2 Regelmaessige Reviews
|
||||
Arbeitgeberin und Betriebsrat fuehren alle **{{AUDIT_INTERVALL}}** einen gemeinsamen Review durch. Gegenstand:
|
||||
- Aktuelle Nutzung und Funktionsumfang
|
||||
- Eingehaltene/verletzte Regelungen
|
||||
- Eingegangene Beschwerden
|
||||
- Geplante Aenderungen
|
||||
- Aktualitaet der TOM
|
||||
|
||||
### K.3 Anlassbezogene Pruefung
|
||||
Bei begruendetem Verdacht auf Verstoss gegen diese Betriebsvereinbarung kann der Betriebsrat jederzeit eine Sonderpruefung verlangen. Die Arbeitgeberin stellt innerhalb von 5 Werktagen die angeforderten Informationen bereit.
|
||||
|
||||
---
|
||||
|
||||
## L. Beschwerden und Eskalation
|
||||
|
||||
### L.1 Beschwerderecht
|
||||
Beschaeftigte koennen sich bei Bedenken hinsichtlich der Datenverarbeitung wenden an:
|
||||
{{BESCHWERDE_ANSPRECHPARTNER}}
|
||||
|
||||
### L.2 Eskalation
|
||||
Bei Meinungsverschiedenheiten ueber die Auslegung oder Anwendung dieser Betriebsvereinbarung gilt:
|
||||
1. Gespraech zwischen Arbeitgeberin und Betriebsrat (Frist: 2 Wochen)
|
||||
2. Hinzuziehung des Datenschutzbeauftragten
|
||||
3. Einigungsstelle gemaess §76 BetrVG
|
||||
|
||||
### L.3 Sofortmassnahmen
|
||||
Bei schwerwiegenden Verstoessen (insbesondere unzulaessige Ueberwachung, Datenmissbrauch) kann der Betriebsrat die sofortige Aussetzung der betroffenen Funktion verlangen. Die Arbeitgeberin setzt die Funktion bis zur Klaerung aus.
|
||||
|
||||
---
|
||||
|
||||
## M. Schlussbestimmungen
|
||||
|
||||
### M.1 Inkrafttreten und Laufzeit
|
||||
Diese Betriebsvereinbarung tritt am {{DATUM_UNTERZEICHNUNG}} in Kraft und gilt fuer die Dauer von {{LAUFZEIT}}.
|
||||
|
||||
### M.2 Kuendigung
|
||||
Die Betriebsvereinbarung kann von jeder Seite mit einer Frist von {{KUENDIGUNGSFRIST}} zum Monatsende schriftlich gekuendigt werden.
|
||||
|
||||
### M.3 Nachwirkung
|
||||
Die Betriebsvereinbarung wirkt nach Kuendigung bis zum Abschluss einer neuen Vereinbarung nach (§77 Abs.6 BetrVG).
|
||||
|
||||
### M.4 Salvatorische Klausel
|
||||
Sollten einzelne Bestimmungen unwirksam sein, bleibt die Wirksamkeit der uebrigen Bestimmungen unberuehrt. Die Parteien verpflichten sich, unwirksame Bestimmungen durch wirksame zu ersetzen, die dem wirtschaftlichen Zweck am naechsten kommen.
|
||||
|
||||
### M.5 Anlagen
|
||||
Folgende Anlagen sind Bestandteil dieser Betriebsvereinbarung:
|
||||
- Anlage 1: Detaillierte Systemdokumentation
|
||||
- Anlage 2: Rollen- und Rechtekonzept
|
||||
- Anlage 3: TOM-Dokumentation
|
||||
- Anlage 4: Reportkatalog
|
||||
{{#IF AI_SYSTEM}}
|
||||
- Anlage 5: KI-Transparenzbericht
|
||||
{{/IF}}
|
||||
|
||||
---
|
||||
|
||||
**{{UNTERNEHMEN_SITZ}}, den {{DATUM_UNTERZEICHNUNG}}**
|
||||
|
||||
| | |
|
||||
|---|---|
|
||||
| _________________________ | _________________________ |
|
||||
| {{ARBEITGEBER_VERTRETER}} | {{BETRIEBSRAT_VORSITZ}} |
|
||||
| fuer die Arbeitgeberin | fuer den Betriebsrat |
|
||||
$template$
|
||||
) ON CONFLICT DO NOTHING;
|
||||
330
document-templates/migrations/007_fria_template.sql
Normal file
330
document-templates/migrations/007_fria_template.sql
Normal file
@@ -0,0 +1,330 @@
|
||||
-- Migration 007: FRIA Template V1 — Grundrechte-Folgenabschaetzung (Art. 27 KI-VO)
|
||||
-- Fundamental Rights Impact Assessment fuer Hochrisiko-KI-Systeme
|
||||
-- Rechtsgrundlage: Art. 27 Verordnung (EU) 2024/1689 (KI-Verordnung / AI Act)
|
||||
|
||||
INSERT INTO compliance.compliance_legal_templates (
|
||||
tenant_id, document_type, title, description, language, jurisdiction,
|
||||
version, status, license_name, source_name, attribution_required,
|
||||
is_complete_document, placeholders, content
|
||||
) VALUES (
|
||||
'9282a473-5c95-4b3a-bf78-0ecc0ec71d3e'::uuid,
|
||||
'fria',
|
||||
'Grundrechte-Folgenabschaetzung (FRIA) gemaess Art. 27 KI-Verordnung',
|
||||
'Vorlage fuer eine Grundrechte-Folgenabschaetzung (Fundamental Rights Impact Assessment) gemaess Art. 27 der Verordnung (EU) 2024/1689 (KI-Verordnung). Erforderlich fuer Hochrisiko-KI-Systeme, insbesondere bei oeffentlichen Stellen und in den Bereichen Beschaeftigung, Bildung und Zugang zu wesentlichen Dienstleistungen.',
|
||||
'de',
|
||||
'EU/KI-VO',
|
||||
'1.0',
|
||||
'published',
|
||||
'MIT',
|
||||
'BreakPilot Compliance',
|
||||
false,
|
||||
true,
|
||||
CAST('[
|
||||
"{{ORGANISATION_NAME}}",
|
||||
"{{ORGANISATION_ADRESSE}}",
|
||||
"{{VERANTWORTLICHER}}",
|
||||
"{{ERSTELLT_VON}}",
|
||||
"{{ERSTELLT_AM}}",
|
||||
"{{SYSTEM_NAME}}",
|
||||
"{{SYSTEM_VERSION}}",
|
||||
"{{SYSTEM_BESCHREIBUNG}}",
|
||||
"{{SYSTEM_ANBIETER}}",
|
||||
"{{EINSATZZWECK}}",
|
||||
"{{EINSATZKONTEXT}}",
|
||||
"{{BETROFFENE_GRUPPEN}}",
|
||||
"{{BETROFFENE_ANZAHL}}",
|
||||
"{{GRUNDRECHTE_ANALYSE}}",
|
||||
"{{RISIKOMATRIX}}",
|
||||
"{{MASSNAHMEN_LISTE}}",
|
||||
"{{HUMAN_OVERSIGHT_BESCHREIBUNG}}",
|
||||
"{{TRANSPARENZ_MASSNAHMEN}}",
|
||||
"{{KONSULTATION_ERGEBNISSE}}",
|
||||
"{{GENEHMIGT_VON}}",
|
||||
"{{GENEHMIGT_AM}}",
|
||||
"{{NAECHSTE_UEBERPRUEFUNG}}",
|
||||
"{{DSB_NAME}}",
|
||||
"{{DSB_KONTAKT}}",
|
||||
"{{AI_ACT_KLASSIFIKATION}}",
|
||||
"{{ANNEX_III_KATEGORIE}}"
|
||||
]' AS jsonb),
|
||||
$template$# Grundrechte-Folgenabschaetzung (FRIA)
|
||||
|
||||
**gemaess Art. 27 der Verordnung (EU) 2024/1689 (KI-Verordnung)**
|
||||
|
||||
---
|
||||
|
||||
| Feld | Wert |
|
||||
|------|------|
|
||||
| Organisation | {{ORGANISATION_NAME}} |
|
||||
| Adresse | {{ORGANISATION_ADRESSE}} |
|
||||
| KI-System | {{SYSTEM_NAME}} (Version {{SYSTEM_VERSION}}) |
|
||||
| Erstellt von | {{ERSTELLT_VON}} |
|
||||
| Erstellt am | {{ERSTELLT_AM}} |
|
||||
| Status | Entwurf |
|
||||
|
||||
---
|
||||
|
||||
## 1. Systembeschreibung und Einsatzkontext
|
||||
|
||||
### 1.1 KI-System
|
||||
|
||||
**Systemname:** {{SYSTEM_NAME}}
|
||||
**Version:** {{SYSTEM_VERSION}}
|
||||
**Anbieter:** {{SYSTEM_ANBIETER}}
|
||||
**Beschreibung:** {{SYSTEM_BESCHREIBUNG}}
|
||||
|
||||
### 1.2 AI Act Klassifikation
|
||||
|
||||
**Risikoklasse:** {{AI_ACT_KLASSIFIKATION}}
|
||||
{{#IF ANNEX_III_KATEGORIE}}
|
||||
**Annex III Kategorie:** {{ANNEX_III_KATEGORIE}}
|
||||
{{/IF}}
|
||||
|
||||
### 1.3 Einsatzzweck
|
||||
|
||||
{{EINSATZZWECK}}
|
||||
|
||||
### 1.4 Einsatzkontext
|
||||
|
||||
{{EINSATZKONTEXT}}
|
||||
|
||||
Folgende Fragen sind zu beantworten:
|
||||
- In welchem organisatorischen Kontext wird das System eingesetzt?
|
||||
- Welche Entscheidungen werden durch das System unterstuetzt oder automatisiert?
|
||||
- Wie haeufig wird das System eingesetzt?
|
||||
- Welche Rolle spielt das System im Gesamtprozess?
|
||||
|
||||
### 1.5 Betroffene Personengruppen
|
||||
|
||||
{{BETROFFENE_GRUPPEN}}
|
||||
|
||||
**Geschaetzte Anzahl betroffener Personen:** {{BETROFFENE_ANZAHL}}
|
||||
|
||||
{{#IF BILDUNGSKONTEXT}}
|
||||
**Besonderer Schutz:** Schueler, Studierende und Auszubildende geniessen als besonders schutzbeduerftiger Personenkreis erhoehten Schutz.
|
||||
{{/IF}}
|
||||
|
||||
{{#IF HR_KONTEXT}}
|
||||
**Besonderer Schutz:** Beschaeftigte und Bewerber befinden sich in einem Abhaengigkeitsverhaeltnis und beduerfen besonderen Schutzes vor diskriminierenden KI-Entscheidungen.
|
||||
{{/IF}}
|
||||
|
||||
---
|
||||
|
||||
## 2. Grundrechte-Mapping
|
||||
|
||||
### 2.1 Betroffene Grundrechte
|
||||
|
||||
Die folgenden Grundrechte der EU-Grundrechtecharta und des Grundgesetzes wurden auf Betroffenheit geprueft:
|
||||
|
||||
{{GRUNDRECHTE_ANALYSE}}
|
||||
|
||||
### 2.2 Referenz-Grundrechte
|
||||
|
||||
| Nr. | Grundrecht | EU-Charta | GG | Betroffen | Begruendung |
|
||||
|-----|-----------|-----------|-----|-----------|-------------|
|
||||
| 1 | Menschenwuerde | Art. 1 | Art. 1 | | |
|
||||
| 2 | Recht auf Privatsphaere | Art. 7 | Art. 2 Abs. 1 | | |
|
||||
| 3 | Schutz personenbezogener Daten | Art. 8 | Art. 2 Abs. 1 i.V.m. Art. 1 Abs. 1 | | |
|
||||
| 4 | Nicht-Diskriminierung | Art. 21 | Art. 3 | | |
|
||||
| 5 | Gleichheit von Frauen und Maennern | Art. 23 | Art. 3 Abs. 2 | | |
|
||||
| 6 | Rechte des Kindes | Art. 24 | Art. 6 Abs. 2 | | |
|
||||
| 7 | Recht auf Bildung | Art. 14 | Art. 12 | | |
|
||||
| 8 | Berufsfreiheit / Recht zu arbeiten | Art. 15 | Art. 12 | | |
|
||||
| 9 | Recht auf wirksamen Rechtsbehelf | Art. 47 | Art. 19 Abs. 4 | | |
|
||||
| 10 | Meinungs- und Informationsfreiheit | Art. 11 | Art. 5 | | |
|
||||
| 11 | Versammlungs- und Vereinigungsfreiheit | Art. 12 | Art. 8, 9 | | |
|
||||
| 12 | Recht auf soziale Sicherheit | Art. 34 | Art. 20 | | |
|
||||
|
||||
{{#IF OEFFENTLICHE_STELLE}}
|
||||
|
||||
### 2.3 Besondere Pflichten oeffentlicher Stellen
|
||||
|
||||
Als oeffentliche Stelle gelten zusaetzliche Anforderungen:
|
||||
- Erweiterte Transparenzpflicht gegenueber Buergern
|
||||
- Pflicht zur Barrierefreiheit des Systems
|
||||
- Beruecksichtigung des Gleichheitsgrundsatzes (Art. 3 GG)
|
||||
- Demokratische Kontrolle und Rechenschaftspflicht
|
||||
{{/IF}}
|
||||
|
||||
---
|
||||
|
||||
## 3. Risikoanalyse
|
||||
|
||||
### 3.1 Risikobewertung pro Grundrecht
|
||||
|
||||
Fuer jedes betroffene Grundrecht wird das Risiko bewertet:
|
||||
|
||||
**Eintrittswahrscheinlichkeit:**
|
||||
- 1 = Sehr unwahrscheinlich
|
||||
- 2 = Unwahrscheinlich
|
||||
- 3 = Moeglich
|
||||
- 4 = Wahrscheinlich
|
||||
- 5 = Sehr wahrscheinlich
|
||||
|
||||
**Schadensausmass:**
|
||||
- 1 = Geringfuegig
|
||||
- 2 = Begrenzt
|
||||
- 3 = Erheblich
|
||||
- 4 = Schwerwiegend
|
||||
- 5 = Katastrophal
|
||||
|
||||
### 3.2 Risikomatrix
|
||||
|
||||
{{RISIKOMATRIX}}
|
||||
|
||||
| Grundrecht | Risikoszenario | Wahrscheinlichkeit | Schwere | Risiko-Level | Begruendung |
|
||||
|-----------|----------------|--------------------:|--------:|:------------:|-------------|
|
||||
| | | | | | |
|
||||
|
||||
**Risiko-Level Berechnung:** Wahrscheinlichkeit × Schwere
|
||||
|
||||
| Risiko-Level | Punktzahl | Bedeutung |
|
||||
|:------------:|:---------:|-----------|
|
||||
| Niedrig | 1-6 | Akzeptables Risiko, Standardmassnahmen |
|
||||
| Mittel | 7-12 | Erhoehte Aufmerksamkeit, zusaetzliche Massnahmen |
|
||||
| Hoch | 13-19 | Erhebliches Risiko, umfassende Massnahmen erforderlich |
|
||||
| Kritisch | 20-25 | Nicht akzeptabel ohne fundamentale Aenderungen |
|
||||
|
||||
---
|
||||
|
||||
## 4. Massnahmen zur Risikominderung
|
||||
|
||||
### 4.1 Uebersicht der Massnahmen
|
||||
|
||||
{{MASSNAHMEN_LISTE}}
|
||||
|
||||
### 4.2 Human Oversight (Art. 14 KI-VO)
|
||||
|
||||
{{HUMAN_OVERSIGHT_BESCHREIBUNG}}
|
||||
|
||||
Folgende Massnahmen zur menschlichen Aufsicht werden umgesetzt:
|
||||
- [ ] Mensch kann KI-Entscheidung jederzeit uebersteuern
|
||||
- [ ] Mensch versteht KI-Output vollstaendig
|
||||
- [ ] Keine automatisierten Entscheidungen ohne menschliche Ueberpruefung
|
||||
- [ ] Schulung der Nutzer zu Systemgrenzen und Risiken
|
||||
- [ ] Eingriffsprotokolle werden gefuehrt
|
||||
|
||||
### 4.3 Transparenz (Art. 13 KI-VO)
|
||||
|
||||
{{TRANSPARENZ_MASSNAHMEN}}
|
||||
|
||||
Folgende Transparenzmassnahmen werden umgesetzt:
|
||||
- [ ] Betroffene werden ueber KI-Nutzung informiert
|
||||
- [ ] KI-generierte Outputs sind als solche gekennzeichnet
|
||||
- [ ] Erklaerbarkeit der Entscheidungslogik sichergestellt
|
||||
- [ ] Kontaktmoeglichkeit fuer Betroffene vorhanden
|
||||
- [ ] Informationen sind verstaendlich und zugaenglich
|
||||
|
||||
### 4.4 Logging und Audit (Art. 12 KI-VO)
|
||||
|
||||
- [ ] Alle Eingaben und Ausgaben werden protokolliert
|
||||
- [ ] Logs sind manipulationssicher
|
||||
- [ ] Aufbewahrungsfristen definiert
|
||||
- [ ] Audit-Trail fuer Entscheidungsnachvollziehbarkeit
|
||||
|
||||
### 4.5 Bias-Pruefung und Nicht-Diskriminierung
|
||||
|
||||
- [ ] Trainingsdaten auf Bias geprueft
|
||||
- [ ] Regelmaessige Bias-Audits geplant
|
||||
- [ ] Beschwerdemechanismus fuer Diskriminierungsfaelle
|
||||
{{#IF HR_KONTEXT}}
|
||||
- [ ] AGG-konforme Gestaltung (kein Bias bei Geschlecht, Alter, Herkunft, Behinderung)
|
||||
- [ ] Betriebsrat gemaess §95 BetrVG beteiligt (bei Auswahlrichtlinien)
|
||||
{{/IF}}
|
||||
{{#IF BILDUNGSKONTEXT}}
|
||||
- [ ] Chancengleichheit unabhaengig von sozioekonomischem Hintergrund
|
||||
- [ ] Keine Benachteiligung aufgrund von Sprachkenntnissen oder Behinderung
|
||||
{{/IF}}
|
||||
|
||||
---
|
||||
|
||||
## 5. Konsultation
|
||||
|
||||
### 5.1 Einbeziehung Betroffener
|
||||
|
||||
{{KONSULTATION_ERGEBNISSE}}
|
||||
|
||||
Folgende Stakeholder wurden konsultiert:
|
||||
- [ ] Datenschutzbeauftragter ({{DSB_NAME}}, {{DSB_KONTAKT}})
|
||||
- [ ] Betroffene Personengruppen oder deren Vertreter
|
||||
{{#IF HR_KONTEXT}}
|
||||
- [ ] Betriebsrat / Personalrat
|
||||
{{/IF}}
|
||||
{{#IF OEFFENTLICHE_STELLE}}
|
||||
- [ ] Buergervertreter / Ombudsstelle
|
||||
- [ ] Zustaendige Aufsichtsbehoerde
|
||||
{{/IF}}
|
||||
- [ ] Fachexperten fuer betroffene Grundrechte
|
||||
|
||||
### 5.2 Ergebnisse der Konsultation
|
||||
|
||||
| Stakeholder | Datum | Ergebnis | Massnahme |
|
||||
|------------|-------|----------|-----------|
|
||||
| | | | |
|
||||
|
||||
---
|
||||
|
||||
## 6. Gesamtbewertung und Freigabe
|
||||
|
||||
### 6.1 Gesamtrisiko-Bewertung
|
||||
|
||||
| Kriterium | Bewertung |
|
||||
|-----------|-----------|
|
||||
| Hoechstes Einzelrisiko | |
|
||||
| Anzahl betroffene Grundrechte | |
|
||||
| Anzahl betroffene Personen | {{BETROFFENE_ANZAHL}} |
|
||||
| Massnahmen ausreichend | Ja / Nein / Teilweise |
|
||||
| Restrisiko akzeptabel | Ja / Nein |
|
||||
|
||||
### 6.2 Entscheidung
|
||||
|
||||
- [ ] **Freigabe** — Restrisiko akzeptabel, Massnahmen ausreichend
|
||||
- [ ] **Freigabe mit Auflagen** — Zusaetzliche Massnahmen erforderlich (siehe unten)
|
||||
- [ ] **Ablehnung** — Grundrechtsrisiken nicht akzeptabel mitigierbar
|
||||
|
||||
### 6.3 Auflagen (falls zutreffend)
|
||||
|
||||
| Nr. | Auflage | Frist | Verantwortlich |
|
||||
|-----|---------|-------|----------------|
|
||||
| | | | |
|
||||
|
||||
---
|
||||
|
||||
## 7. Laufende Ueberwachung
|
||||
|
||||
### 7.1 Naechste Ueberpruefung
|
||||
|
||||
**Geplante Ueberpruefung:** {{NAECHSTE_UEBERPRUEFUNG}}
|
||||
|
||||
### 7.2 Trigger fuer ausserplanmaessige Ueberpruefung
|
||||
|
||||
Eine erneute FRIA ist durchzufuehren bei:
|
||||
- Wesentlicher Aenderung des KI-Systems oder seines Einsatzzwecks
|
||||
- Erweiterung auf neue Personengruppen oder Anwendungsbereiche
|
||||
- Beschwerden oder Vorfaellen mit Grundrechtsbezug
|
||||
- Aenderung der Rechtsgrundlage oder Risikoklassifikation
|
||||
- Neuen wissenschaftlichen Erkenntnissen zu Risiken
|
||||
- Aenderung des KI-Modells oder der Trainingsdaten
|
||||
|
||||
### 7.3 Dokumentation und Archivierung
|
||||
|
||||
Diese FRIA wird mindestens fuer die Dauer des Einsatzes des KI-Systems und darueberhinaus fuer 10 Jahre archiviert (Art. 18 KI-VO).
|
||||
|
||||
---
|
||||
|
||||
## 8. Unterschriften
|
||||
|
||||
| | |
|
||||
|---|---|
|
||||
| _________________________ | _________________________ |
|
||||
| {{ERSTELLT_VON}} | {{GENEHMIGT_VON}} |
|
||||
| Erstellt am {{ERSTELLT_AM}} | Genehmigt am {{GENEHMIGT_AM}} |
|
||||
|
||||
---
|
||||
|
||||
**Anhang A:** Vollstaendige Systemdokumentation (Art. 11 KI-VO)
|
||||
**Anhang B:** AI Act Decision Tree Ergebnis
|
||||
**Anhang C:** Verknuepfte DSFA (falls vorhanden)
|
||||
**Anhang D:** Konsultationsprotokolle
|
||||
$template$
|
||||
) ON CONFLICT DO NOTHING;
|
||||
137
document-templates/scripts/cleanup_temp_vorlagen.py
Normal file
137
document-templates/scripts/cleanup_temp_vorlagen.py
Normal file
@@ -0,0 +1,137 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Cleanup script: Delete temporary DPA template documents from Qdrant.
|
||||
|
||||
Removes all points with payload field `temp_vorlagen=true` from
|
||||
the bp_compliance_datenschutz collection.
|
||||
|
||||
Usage:
|
||||
python cleanup_temp_vorlagen.py --dry-run # Preview only
|
||||
python cleanup_temp_vorlagen.py # Execute deletion
|
||||
python cleanup_temp_vorlagen.py --qdrant-url http://localhost:6333
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import sys
|
||||
from typing import Optional
|
||||
from urllib.request import Request, urlopen
|
||||
from urllib.error import URLError
|
||||
|
||||
|
||||
def qdrant_request(base_url: str, method: str, path: str, body: Optional[dict] = None) -> dict:
|
||||
url = f"{base_url}{path}"
|
||||
data = json.dumps(body).encode() if body else None
|
||||
headers = {"Content-Type": "application/json"} if data else {}
|
||||
req = Request(url, data=data, headers=headers, method=method)
|
||||
with urlopen(req, timeout=30) as resp:
|
||||
return json.loads(resp.read())
|
||||
|
||||
|
||||
def count_temp_vorlagen(base_url: str, collection: str) -> int:
|
||||
"""Count points with temp_vorlagen=true."""
|
||||
body = {
|
||||
"filter": {
|
||||
"must": [
|
||||
{"key": "temp_vorlagen", "match": {"value": True}}
|
||||
]
|
||||
},
|
||||
"limit": 0,
|
||||
"exact": True,
|
||||
}
|
||||
result = qdrant_request(base_url, "POST", f"/collections/{collection}/points/count", body)
|
||||
return result.get("result", {}).get("count", 0)
|
||||
|
||||
|
||||
def list_temp_regulation_ids(base_url: str, collection: str) -> list[str]:
|
||||
"""Get distinct regulation_ids of temp documents."""
|
||||
body = {
|
||||
"filter": {
|
||||
"must": [
|
||||
{"key": "temp_vorlagen", "match": {"value": True}}
|
||||
]
|
||||
},
|
||||
"limit": 500,
|
||||
"with_payload": ["regulation_id", "title", "source"],
|
||||
}
|
||||
result = qdrant_request(base_url, "POST", f"/collections/{collection}/points/scroll", body)
|
||||
points = result.get("result", {}).get("points", [])
|
||||
|
||||
seen = {}
|
||||
for p in points:
|
||||
payload = p.get("payload", {})
|
||||
rid = payload.get("regulation_id", "unknown")
|
||||
if rid not in seen:
|
||||
seen[rid] = {
|
||||
"regulation_id": rid,
|
||||
"title": payload.get("title", ""),
|
||||
"source": payload.get("source", ""),
|
||||
}
|
||||
return list(seen.values())
|
||||
|
||||
|
||||
def delete_temp_vorlagen(base_url: str, collection: str) -> int:
|
||||
"""Delete all points with temp_vorlagen=true."""
|
||||
body = {
|
||||
"filter": {
|
||||
"must": [
|
||||
{"key": "temp_vorlagen", "match": {"value": True}}
|
||||
]
|
||||
}
|
||||
}
|
||||
result = qdrant_request(base_url, "POST", f"/collections/{collection}/points/delete", body)
|
||||
status = result.get("status", "unknown")
|
||||
return status
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Delete temp DPA templates from Qdrant")
|
||||
parser.add_argument("--qdrant-url", default="http://localhost:6333",
|
||||
help="Qdrant URL (default: http://localhost:6333)")
|
||||
parser.add_argument("--collection", default="bp_compliance_datenschutz",
|
||||
help="Qdrant collection name")
|
||||
parser.add_argument("--dry-run", action="store_true",
|
||||
help="Only count and list, do not delete")
|
||||
args = parser.parse_args()
|
||||
|
||||
print(f"Qdrant URL: {args.qdrant_url}")
|
||||
print(f"Collection: {args.collection}")
|
||||
print()
|
||||
|
||||
try:
|
||||
count = count_temp_vorlagen(args.qdrant_url, args.collection)
|
||||
except URLError as e:
|
||||
print(f"ERROR: Cannot connect to Qdrant at {args.qdrant_url}: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
print(f"Gefundene Punkte mit temp_vorlagen=true: {count}")
|
||||
|
||||
if count == 0:
|
||||
print("Nichts zu loeschen.")
|
||||
return
|
||||
|
||||
docs = list_temp_regulation_ids(args.qdrant_url, args.collection)
|
||||
print(f"\nBetroffene Dokumente ({len(docs)}):")
|
||||
for doc in sorted(docs, key=lambda d: d["regulation_id"]):
|
||||
source = f" [{doc['source']}]" if doc.get("source") else ""
|
||||
title = f" — {doc['title']}" if doc.get("title") else ""
|
||||
print(f" - {doc['regulation_id']}{title}{source}")
|
||||
|
||||
if args.dry_run:
|
||||
print(f"\n[DRY-RUN] Wuerde {count} Punkte loeschen. Keine Aenderung durchgefuehrt.")
|
||||
return
|
||||
|
||||
print(f"\nLoesche {count} Punkte ...")
|
||||
status = delete_temp_vorlagen(args.qdrant_url, args.collection)
|
||||
print(f"Status: {status}")
|
||||
|
||||
remaining = count_temp_vorlagen(args.qdrant_url, args.collection)
|
||||
print(f"Verbleibende temp_vorlagen Punkte: {remaining}")
|
||||
|
||||
if remaining == 0:
|
||||
print("Cleanup erfolgreich abgeschlossen.")
|
||||
else:
|
||||
print(f"WARNUNG: {remaining} Punkte konnten nicht geloescht werden.")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -251,14 +251,251 @@ async def rerank_cohere(query: str, documents: List[str], top_k: int = 5) -> Lis
|
||||
GERMAN_ABBREVIATIONS = {
|
||||
'bzw', 'ca', 'chr', 'd.h', 'dr', 'etc', 'evtl', 'ggf', 'inkl', 'max',
|
||||
'min', 'mio', 'mrd', 'nr', 'prof', 's', 'sog', 'u.a', 'u.ä', 'usw',
|
||||
'v.a', 'vgl', 'vs', 'z.b', 'z.t', 'zzgl'
|
||||
'v.a', 'vgl', 'vs', 'z.b', 'z.t', 'zzgl', 'abs', 'art', 'abschn',
|
||||
'anh', 'anl', 'aufl', 'bd', 'bes', 'bzgl', 'dgl', 'einschl', 'entspr',
|
||||
'erg', 'erl', 'gem', 'grds', 'hrsg', 'insb', 'ivm', 'kap', 'lit',
|
||||
'nachf', 'rdnr', 'rn', 'rz', 'ua', 'uvm', 'vorst', 'ziff'
|
||||
}
|
||||
|
||||
# English abbreviations that don't end sentences
|
||||
ENGLISH_ABBREVIATIONS = {
|
||||
'e.g', 'i.e', 'etc', 'vs', 'al', 'approx', 'avg', 'dept', 'dr', 'ed',
|
||||
'est', 'fig', 'govt', 'inc', 'jr', 'ltd', 'max', 'min', 'mr', 'mrs',
|
||||
'ms', 'no', 'prof', 'pt', 'ref', 'rev', 'sec', 'sgt', 'sr', 'st',
|
||||
'vol', 'cf', 'ch', 'cl', 'col', 'corp', 'cpl', 'def', 'dist', 'div',
|
||||
'gen', 'hon', 'illus', 'intl', 'natl', 'org', 'para', 'pp', 'repr',
|
||||
'resp', 'supp', 'tech', 'temp', 'treas', 'univ'
|
||||
}
|
||||
|
||||
# Combined abbreviations for both languages
|
||||
ALL_ABBREVIATIONS = GERMAN_ABBREVIATIONS | ENGLISH_ABBREVIATIONS
|
||||
|
||||
# Regex pattern for legal section headers (§, Art., Article, Section, etc.)
|
||||
import re
|
||||
|
||||
_LEGAL_SECTION_RE = re.compile(
|
||||
r'^(?:'
|
||||
r'§\s*\d+' # § 25, § 5a
|
||||
r'|Art(?:ikel|icle|\.)\s*\d+' # Artikel 5, Article 12, Art. 3
|
||||
r'|Section\s+\d+' # Section 4.2
|
||||
r'|Abschnitt\s+\d+' # Abschnitt III
|
||||
r'|Kapitel\s+\d+' # Kapitel 2
|
||||
r'|Chapter\s+\d+' # Chapter 3
|
||||
r'|Anhang\s+[IVXLC\d]+' # Anhang III
|
||||
r'|Annex\s+[IVXLC\d]+' # Annex XII
|
||||
r'|TEIL\s+[IVXLC\d]+' # TEIL II
|
||||
r'|Part\s+[IVXLC\d]+' # Part III
|
||||
r'|Recital\s+\d+' # Recital 42
|
||||
r'|Erwaegungsgrund\s+\d+' # Erwaegungsgrund 26
|
||||
r')',
|
||||
re.IGNORECASE | re.MULTILINE
|
||||
)
|
||||
|
||||
# Regex for any heading-like line (Markdown ## or ALL-CAPS line)
|
||||
_HEADING_RE = re.compile(
|
||||
r'^(?:'
|
||||
r'#{1,6}\s+.+' # Markdown headings
|
||||
r'|[A-ZÄÖÜ][A-ZÄÖÜ\s\-]{5,}$' # ALL-CAPS lines (>5 chars)
|
||||
r')',
|
||||
re.MULTILINE
|
||||
)
|
||||
|
||||
|
||||
def _detect_language(text: str) -> str:
|
||||
"""Simple heuristic: count German vs English marker words."""
|
||||
sample = text[:5000].lower()
|
||||
de_markers = sum(1 for w in ['der', 'die', 'das', 'und', 'ist', 'für', 'von',
|
||||
'werden', 'nach', 'gemäß', 'sowie', 'durch']
|
||||
if f' {w} ' in sample)
|
||||
en_markers = sum(1 for w in ['the', 'and', 'for', 'that', 'with', 'shall',
|
||||
'must', 'should', 'which', 'from', 'this']
|
||||
if f' {w} ' in sample)
|
||||
return 'de' if de_markers > en_markers else 'en'
|
||||
|
||||
|
||||
def _protect_abbreviations(text: str) -> str:
|
||||
"""Replace dots in abbreviations with placeholders to prevent false sentence splits."""
|
||||
protected = text
|
||||
for abbrev in ALL_ABBREVIATIONS:
|
||||
pattern = re.compile(r'\b(' + re.escape(abbrev) + r')\.', re.IGNORECASE)
|
||||
# Use lambda to preserve original case of the matched abbreviation
|
||||
protected = pattern.sub(lambda m: m.group(1).replace('.', '<DOT>') + '<ABBR>', protected)
|
||||
# Protect decimals (3.14) and ordinals (1. Absatz)
|
||||
protected = re.sub(r'(\d)\.(\d)', r'\1<DECIMAL>\2', protected)
|
||||
protected = re.sub(r'(\d+)\.\s', r'\1<ORD> ', protected)
|
||||
return protected
|
||||
|
||||
|
||||
def _restore_abbreviations(text: str) -> str:
|
||||
"""Restore placeholders back to dots."""
|
||||
return (text
|
||||
.replace('<DOT>', '.')
|
||||
.replace('<ABBR>', '.')
|
||||
.replace('<DECIMAL>', '.')
|
||||
.replace('<ORD>', '.'))
|
||||
|
||||
|
||||
def _split_sentences(text: str) -> List[str]:
|
||||
"""Split text into sentences, respecting abbreviations in DE and EN."""
|
||||
protected = _protect_abbreviations(text)
|
||||
# Split after sentence-ending punctuation followed by uppercase or newline
|
||||
sentence_pattern = r'(?<=[.!?])\s+(?=[A-ZÄÖÜÀ-Ý])|(?<=[.!?])\s*\n'
|
||||
raw = re.split(sentence_pattern, protected)
|
||||
sentences = []
|
||||
for s in raw:
|
||||
s = _restore_abbreviations(s).strip()
|
||||
if s:
|
||||
sentences.append(s)
|
||||
return sentences
|
||||
|
||||
|
||||
def _extract_section_header(line: str) -> Optional[str]:
|
||||
"""Extract a legal section header from a line, or None."""
|
||||
m = _LEGAL_SECTION_RE.match(line.strip())
|
||||
if m:
|
||||
return line.strip()
|
||||
m = _HEADING_RE.match(line.strip())
|
||||
if m:
|
||||
return line.strip()
|
||||
return None
|
||||
|
||||
|
||||
def chunk_text_legal(text: str, chunk_size: int, overlap: int) -> List[str]:
|
||||
"""
|
||||
Legal-document-aware chunking.
|
||||
|
||||
Strategy:
|
||||
1. Split on legal section boundaries (§, Art., Section, Chapter, etc.)
|
||||
2. Within each section, split on paragraph boundaries (double newline)
|
||||
3. Within each paragraph, split on sentence boundaries
|
||||
4. Prepend section header as context prefix to every chunk
|
||||
5. Add overlap from previous chunk
|
||||
|
||||
Works for both German (DSGVO, BGB, AI Act DE) and English (NIST, SLSA, CRA EN) texts.
|
||||
"""
|
||||
if not text or len(text) <= chunk_size:
|
||||
return [text.strip()] if text and text.strip() else []
|
||||
|
||||
# --- Phase 1: Split into sections by legal headers ---
|
||||
lines = text.split('\n')
|
||||
sections = [] # list of (header, content)
|
||||
current_header = None
|
||||
current_lines = []
|
||||
|
||||
for line in lines:
|
||||
header = _extract_section_header(line)
|
||||
if header and current_lines:
|
||||
sections.append((current_header, '\n'.join(current_lines)))
|
||||
current_header = header
|
||||
current_lines = [line]
|
||||
elif header and not current_lines:
|
||||
current_header = header
|
||||
current_lines = [line]
|
||||
else:
|
||||
current_lines.append(line)
|
||||
|
||||
if current_lines:
|
||||
sections.append((current_header, '\n'.join(current_lines)))
|
||||
|
||||
# --- Phase 2: Within each section, split on paragraphs, then sentences ---
|
||||
raw_chunks = []
|
||||
|
||||
for section_header, section_text in sections:
|
||||
# Build context prefix (max 120 chars to leave room for content)
|
||||
prefix = ""
|
||||
if section_header:
|
||||
truncated = section_header[:120]
|
||||
prefix = f"[{truncated}] "
|
||||
|
||||
paragraphs = re.split(r'\n\s*\n', section_text)
|
||||
|
||||
current_chunk = prefix
|
||||
current_length = len(prefix)
|
||||
|
||||
for para in paragraphs:
|
||||
para = para.strip()
|
||||
if not para:
|
||||
continue
|
||||
|
||||
# If paragraph fits in remaining space, append
|
||||
if current_length + len(para) + 1 <= chunk_size:
|
||||
if current_chunk and not current_chunk.endswith(' '):
|
||||
current_chunk += '\n\n'
|
||||
current_chunk += para
|
||||
current_length = len(current_chunk)
|
||||
continue
|
||||
|
||||
# Paragraph doesn't fit — flush current chunk if non-empty
|
||||
if current_chunk.strip() and current_chunk.strip() != prefix.strip():
|
||||
raw_chunks.append(current_chunk.strip())
|
||||
|
||||
# If entire paragraph fits in a fresh chunk, start new chunk
|
||||
if len(prefix) + len(para) <= chunk_size:
|
||||
current_chunk = prefix + para
|
||||
current_length = len(current_chunk)
|
||||
continue
|
||||
|
||||
# Paragraph too long — split by sentences
|
||||
sentences = _split_sentences(para)
|
||||
current_chunk = prefix
|
||||
current_length = len(prefix)
|
||||
|
||||
for sentence in sentences:
|
||||
sentence_len = len(sentence)
|
||||
|
||||
# Single sentence exceeds chunk_size — force-split
|
||||
if len(prefix) + sentence_len > chunk_size:
|
||||
if current_chunk.strip() and current_chunk.strip() != prefix.strip():
|
||||
raw_chunks.append(current_chunk.strip())
|
||||
# Hard split the long sentence
|
||||
remaining = sentence
|
||||
while remaining:
|
||||
take = chunk_size - len(prefix)
|
||||
chunk_part = prefix + remaining[:take]
|
||||
raw_chunks.append(chunk_part.strip())
|
||||
remaining = remaining[take:]
|
||||
current_chunk = prefix
|
||||
current_length = len(prefix)
|
||||
continue
|
||||
|
||||
if current_length + sentence_len + 1 > chunk_size:
|
||||
if current_chunk.strip() and current_chunk.strip() != prefix.strip():
|
||||
raw_chunks.append(current_chunk.strip())
|
||||
current_chunk = prefix + sentence
|
||||
current_length = len(current_chunk)
|
||||
else:
|
||||
if current_chunk and not current_chunk.endswith(' '):
|
||||
current_chunk += ' '
|
||||
current_chunk += sentence
|
||||
current_length = len(current_chunk)
|
||||
|
||||
# Flush remaining content for this section
|
||||
if current_chunk.strip() and current_chunk.strip() != prefix.strip():
|
||||
raw_chunks.append(current_chunk.strip())
|
||||
|
||||
if not raw_chunks:
|
||||
return [text.strip()] if text.strip() else []
|
||||
|
||||
# --- Phase 3: Add overlap ---
|
||||
final_chunks = []
|
||||
for i, chunk in enumerate(raw_chunks):
|
||||
if i > 0 and overlap > 0:
|
||||
prev = raw_chunks[i - 1]
|
||||
# Take overlap from end of previous chunk (but not the prefix)
|
||||
overlap_text = prev[-min(overlap, len(prev)):]
|
||||
# Only add overlap if it doesn't start mid-word
|
||||
space_idx = overlap_text.find(' ')
|
||||
if space_idx > 0:
|
||||
overlap_text = overlap_text[space_idx + 1:]
|
||||
if overlap_text:
|
||||
chunk = overlap_text + ' ' + chunk
|
||||
final_chunks.append(chunk.strip())
|
||||
|
||||
return [c for c in final_chunks if c]
|
||||
|
||||
|
||||
def chunk_text_recursive(text: str, chunk_size: int, overlap: int) -> List[str]:
|
||||
"""Recursive character-based chunking."""
|
||||
import re
|
||||
|
||||
"""Recursive character-based chunking (legacy, use legal_recursive for legal docs)."""
|
||||
if not text or len(text) <= chunk_size:
|
||||
return [text] if text else []
|
||||
|
||||
@@ -315,36 +552,23 @@ def chunk_text_recursive(text: str, chunk_size: int, overlap: int) -> List[str]:
|
||||
|
||||
def chunk_text_semantic(text: str, chunk_size: int, overlap_sentences: int = 1) -> List[str]:
|
||||
"""Semantic sentence-aware chunking."""
|
||||
import re
|
||||
|
||||
if not text:
|
||||
return []
|
||||
|
||||
if len(text) <= chunk_size:
|
||||
return [text.strip()]
|
||||
|
||||
# Split into sentences (simplified for German)
|
||||
text = re.sub(r'\s+', ' ', text).strip()
|
||||
|
||||
# Protect abbreviations
|
||||
protected = text
|
||||
for abbrev in GERMAN_ABBREVIATIONS:
|
||||
pattern = re.compile(r'\b' + re.escape(abbrev) + r'\.', re.IGNORECASE)
|
||||
protected = pattern.sub(abbrev.replace('.', '<DOT>') + '<ABBR>', protected)
|
||||
|
||||
# Protect decimals and ordinals
|
||||
protected = re.sub(r'(\d)\.(\d)', r'\1<DECIMAL>\2', protected)
|
||||
protected = re.sub(r'(\d+)\.(\s)', r'\1<ORD>\2', protected)
|
||||
protected = _protect_abbreviations(text)
|
||||
|
||||
# Split on sentence endings
|
||||
sentence_pattern = r'(?<=[.!?])\s+(?=[A-ZÄÖÜ])|(?<=[.!?])$'
|
||||
sentence_pattern = r'(?<=[.!?])\s+(?=[A-ZÄÖÜÀ-Ý])|(?<=[.!?])$'
|
||||
raw_sentences = re.split(sentence_pattern, protected)
|
||||
|
||||
# Restore protected characters
|
||||
sentences = []
|
||||
for s in raw_sentences:
|
||||
s = s.replace('<DOT>', '.').replace('<ABBR>', '.').replace('<DECIMAL>', '.').replace('<ORD>', '.')
|
||||
s = s.strip()
|
||||
s = _restore_abbreviations(s).strip()
|
||||
if s:
|
||||
sentences.append(s)
|
||||
|
||||
@@ -638,7 +862,16 @@ async def rerank_documents(request: RerankRequest):
|
||||
|
||||
@app.post("/chunk", response_model=ChunkResponse)
|
||||
async def chunk_text(request: ChunkRequest):
|
||||
"""Chunk text into smaller pieces."""
|
||||
"""Chunk text into smaller pieces.
|
||||
|
||||
Strategies:
|
||||
- "recursive" (default): Legal-document-aware chunking with §/Art./Section
|
||||
boundary detection, section context headers, paragraph-level splitting,
|
||||
and sentence-level splitting respecting DE + EN abbreviations.
|
||||
- "semantic": Sentence-aware chunking with overlap by sentence count.
|
||||
|
||||
The old plain recursive chunker has been retired and is no longer available.
|
||||
"""
|
||||
if not request.text:
|
||||
return ChunkResponse(chunks=[], count=0, strategy=request.strategy)
|
||||
|
||||
@@ -647,7 +880,9 @@ async def chunk_text(request: ChunkRequest):
|
||||
overlap_sentences = max(1, request.overlap // 100)
|
||||
chunks = chunk_text_semantic(request.text, request.chunk_size, overlap_sentences)
|
||||
else:
|
||||
chunks = chunk_text_recursive(request.text, request.chunk_size, request.overlap)
|
||||
# All strategies (recursive, legal_recursive, etc.) use the legal-aware chunker.
|
||||
# The old plain recursive chunker is no longer exposed via the API.
|
||||
chunks = chunk_text_legal(request.text, request.chunk_size, request.overlap)
|
||||
|
||||
return ChunkResponse(
|
||||
chunks=chunks,
|
||||
|
||||
288
embedding-service/test_chunking.py
Normal file
288
embedding-service/test_chunking.py
Normal file
@@ -0,0 +1,288 @@
|
||||
"""
|
||||
Tests for the legal-aware chunking pipeline.
|
||||
|
||||
Covers:
|
||||
- Legal section header detection (§, Art., Section, Chapter, Annex)
|
||||
- Section context prefix in every chunk
|
||||
- Paragraph boundary splitting
|
||||
- Sentence splitting with DE and EN abbreviation protection
|
||||
- Overlap between chunks
|
||||
- Fallback for non-legal text
|
||||
- Long sentence force-splitting
|
||||
"""
|
||||
|
||||
import pytest
|
||||
from main import (
|
||||
chunk_text_legal,
|
||||
chunk_text_recursive,
|
||||
chunk_text_semantic,
|
||||
_extract_section_header,
|
||||
_split_sentences,
|
||||
_detect_language,
|
||||
_protect_abbreviations,
|
||||
_restore_abbreviations,
|
||||
)
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Section header detection
|
||||
# =========================================================================
|
||||
|
||||
class TestSectionHeaderDetection:
|
||||
|
||||
def test_german_paragraph(self):
|
||||
assert _extract_section_header("§ 25 Informationspflichten") is not None
|
||||
|
||||
def test_german_paragraph_with_letter(self):
|
||||
assert _extract_section_header("§ 5a Elektronischer Geschaeftsverkehr") is not None
|
||||
|
||||
def test_german_artikel(self):
|
||||
assert _extract_section_header("Artikel 5 Grundsaetze") is not None
|
||||
|
||||
def test_english_article(self):
|
||||
assert _extract_section_header("Article 12 Transparency") is not None
|
||||
|
||||
def test_article_abbreviated(self):
|
||||
assert _extract_section_header("Art. 3 Definitions") is not None
|
||||
|
||||
def test_english_section(self):
|
||||
assert _extract_section_header("Section 4.2 Risk Assessment") is not None
|
||||
|
||||
def test_german_abschnitt(self):
|
||||
assert _extract_section_header("Abschnitt 3 Pflichten") is not None
|
||||
|
||||
def test_chapter(self):
|
||||
assert _extract_section_header("Chapter 5 Obligations") is not None
|
||||
|
||||
def test_german_kapitel(self):
|
||||
assert _extract_section_header("Kapitel 2 Anwendungsbereich") is not None
|
||||
|
||||
def test_annex_roman(self):
|
||||
assert _extract_section_header("Annex XII Technical Documentation") is not None
|
||||
|
||||
def test_german_anhang(self):
|
||||
assert _extract_section_header("Anhang III Hochrisiko-KI") is not None
|
||||
|
||||
def test_part(self):
|
||||
assert _extract_section_header("Part III Requirements") is not None
|
||||
|
||||
def test_markdown_heading(self):
|
||||
assert _extract_section_header("## 3.1 Overview") is not None
|
||||
|
||||
def test_normal_text_not_header(self):
|
||||
assert _extract_section_header("This is a normal sentence.") is None
|
||||
|
||||
def test_short_caps_not_header(self):
|
||||
assert _extract_section_header("OK") is None
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Language detection
|
||||
# =========================================================================
|
||||
|
||||
class TestLanguageDetection:
|
||||
|
||||
def test_german_text(self):
|
||||
text = "Die Verordnung ist für alle Mitgliedstaaten verbindlich und gilt nach dem Grundsatz der unmittelbaren Anwendbarkeit."
|
||||
assert _detect_language(text) == 'de'
|
||||
|
||||
def test_english_text(self):
|
||||
text = "This regulation shall be binding in its entirety and directly applicable in all Member States."
|
||||
assert _detect_language(text) == 'en'
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Abbreviation protection
|
||||
# =========================================================================
|
||||
|
||||
class TestAbbreviationProtection:
|
||||
|
||||
def test_german_abbreviations(self):
|
||||
text = "gem. § 5 Abs. 1 bzw. § 6 Abs. 2 z.B. die Pflicht"
|
||||
protected = _protect_abbreviations(text)
|
||||
assert "." not in protected.replace("<DOT>", "").replace("<DECIMAL>", "").replace("<ORD>", "").replace("<ABBR>", "")
|
||||
restored = _restore_abbreviations(protected)
|
||||
assert "gem." in restored
|
||||
assert "z.B." in restored.replace("z.b.", "z.B.") or "z.b." in restored
|
||||
|
||||
def test_english_abbreviations(self):
|
||||
text = "e.g. section 4.2, i.e. the requirements in vol. 1 ref. NIST SP 800-30."
|
||||
protected = _protect_abbreviations(text)
|
||||
# "e.g" and "i.e" should be protected
|
||||
restored = _restore_abbreviations(protected)
|
||||
assert "e.g." in restored
|
||||
|
||||
def test_decimals_protected(self):
|
||||
text = "Version 3.14 of the specification requires 2.5 GB."
|
||||
protected = _protect_abbreviations(text)
|
||||
assert "<DECIMAL>" in protected
|
||||
restored = _restore_abbreviations(protected)
|
||||
assert "3.14" in restored
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Sentence splitting
|
||||
# =========================================================================
|
||||
|
||||
class TestSentenceSplitting:
|
||||
|
||||
def test_simple_german(self):
|
||||
text = "Erster Satz. Zweiter Satz. Dritter Satz."
|
||||
sentences = _split_sentences(text)
|
||||
assert len(sentences) >= 2
|
||||
|
||||
def test_simple_english(self):
|
||||
text = "First sentence. Second sentence. Third sentence."
|
||||
sentences = _split_sentences(text)
|
||||
assert len(sentences) >= 2
|
||||
|
||||
def test_german_abbreviation_not_split(self):
|
||||
text = "Gem. Art. 5 Abs. 1 DSGVO ist die Verarbeitung rechtmaessig. Der Verantwortliche muss dies nachweisen."
|
||||
sentences = _split_sentences(text)
|
||||
# Should NOT split at "Gem." or "Art." or "Abs."
|
||||
assert any("Gem" in s and "DSGVO" in s for s in sentences)
|
||||
|
||||
def test_english_abbreviation_not_split(self):
|
||||
text = "See e.g. Section 4.2 for details. The standard also references vol. 1 of the NIST SP series."
|
||||
sentences = _split_sentences(text)
|
||||
assert any("e.g" in s and "Section" in s for s in sentences)
|
||||
|
||||
def test_exclamation_and_question(self):
|
||||
text = "Is this valid? Yes it is! Continue processing."
|
||||
sentences = _split_sentences(text)
|
||||
assert len(sentences) >= 2
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Legal chunking
|
||||
# =========================================================================
|
||||
|
||||
class TestChunkTextLegal:
|
||||
|
||||
def test_small_text_single_chunk(self):
|
||||
text = "Short text."
|
||||
chunks = chunk_text_legal(text, chunk_size=1024, overlap=128)
|
||||
assert len(chunks) == 1
|
||||
assert chunks[0] == "Short text."
|
||||
|
||||
def test_section_header_as_prefix(self):
|
||||
text = "§ 25 Informationspflichten\n\nDer Betreiber muss den Nutzer informieren. " * 20
|
||||
chunks = chunk_text_legal(text, chunk_size=200, overlap=0)
|
||||
assert len(chunks) > 1
|
||||
# Every chunk should have the section prefix
|
||||
for chunk in chunks:
|
||||
assert "[§ 25" in chunk or "§ 25" in chunk
|
||||
|
||||
def test_article_prefix_english(self):
|
||||
text = "Article 12 Transparency\n\n" + "The provider shall ensure transparency of AI systems. " * 30
|
||||
chunks = chunk_text_legal(text, chunk_size=300, overlap=0)
|
||||
assert len(chunks) > 1
|
||||
for chunk in chunks:
|
||||
assert "Article 12" in chunk
|
||||
|
||||
def test_multiple_sections(self):
|
||||
text = (
|
||||
"§ 1 Anwendungsbereich\n\nDieses Gesetz gilt fuer alle Betreiber.\n\n"
|
||||
"§ 2 Begriffsbestimmungen\n\nIm Sinne dieses Gesetzes ist Betreiber, wer eine Anlage betreibt.\n\n"
|
||||
"§ 3 Pflichten\n\nDer Betreiber hat die Pflicht, die Anlage sicher zu betreiben."
|
||||
)
|
||||
chunks = chunk_text_legal(text, chunk_size=200, overlap=0)
|
||||
# Should have chunks from different sections
|
||||
section_headers = set()
|
||||
for chunk in chunks:
|
||||
if "[§ 1" in chunk:
|
||||
section_headers.add("§ 1")
|
||||
if "[§ 2" in chunk:
|
||||
section_headers.add("§ 2")
|
||||
if "[§ 3" in chunk:
|
||||
section_headers.add("§ 3")
|
||||
assert len(section_headers) >= 2
|
||||
|
||||
def test_paragraph_boundaries_respected(self):
|
||||
para1 = "First paragraph with enough text to matter. " * 5
|
||||
para2 = "Second paragraph also with content. " * 5
|
||||
text = para1.strip() + "\n\n" + para2.strip()
|
||||
chunks = chunk_text_legal(text, chunk_size=300, overlap=0)
|
||||
# Paragraphs should not be merged mid-sentence across chunk boundary
|
||||
assert len(chunks) >= 2
|
||||
|
||||
def test_overlap_present(self):
|
||||
text = "Sentence one about topic A. " * 10 + "\n\n" + "Sentence two about topic B. " * 10
|
||||
chunks = chunk_text_legal(text, chunk_size=200, overlap=50)
|
||||
if len(chunks) > 1:
|
||||
# Second chunk should contain some text from end of first chunk
|
||||
end_of_first = chunks[0][-30:]
|
||||
# At least some overlap words should appear
|
||||
overlap_words = set(end_of_first.split())
|
||||
second_start_words = set(chunks[1][:80].split())
|
||||
assert len(overlap_words & second_start_words) > 0
|
||||
|
||||
def test_nist_style_sections(self):
|
||||
text = (
|
||||
"Section 2.1 Risk Framing\n\n"
|
||||
"Risk framing establishes the context for risk-based decisions. "
|
||||
"Organizations must define their risk tolerance. " * 10 + "\n\n"
|
||||
"Section 2.2 Risk Assessment\n\n"
|
||||
"Risk assessment identifies threats and vulnerabilities. " * 10
|
||||
)
|
||||
chunks = chunk_text_legal(text, chunk_size=400, overlap=0)
|
||||
has_21 = any("Section 2.1" in c for c in chunks)
|
||||
has_22 = any("Section 2.2" in c for c in chunks)
|
||||
assert has_21 and has_22
|
||||
|
||||
def test_markdown_heading_as_context(self):
|
||||
text = (
|
||||
"## 3.1 Overview\n\n"
|
||||
"This section provides an overview of the specification. " * 15
|
||||
)
|
||||
chunks = chunk_text_legal(text, chunk_size=300, overlap=0)
|
||||
assert len(chunks) > 1
|
||||
for chunk in chunks:
|
||||
assert "3.1 Overview" in chunk
|
||||
|
||||
def test_empty_text(self):
|
||||
assert chunk_text_legal("", 1024, 128) == []
|
||||
|
||||
def test_whitespace_only(self):
|
||||
assert chunk_text_legal(" \n\n ", 1024, 128) == []
|
||||
|
||||
def test_long_sentence_force_split(self):
|
||||
long_sentence = "A" * 2000
|
||||
chunks = chunk_text_legal(long_sentence, chunk_size=500, overlap=0)
|
||||
assert len(chunks) >= 4
|
||||
for chunk in chunks:
|
||||
assert len(chunk) <= 500 + 20 # small margin for prefix
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Legacy recursive chunking still works
|
||||
# =========================================================================
|
||||
|
||||
class TestChunkTextRecursive:
|
||||
|
||||
def test_basic_split(self):
|
||||
text = "Hello world. " * 200
|
||||
chunks = chunk_text_recursive(text, chunk_size=500, overlap=50)
|
||||
assert len(chunks) > 1
|
||||
for chunk in chunks:
|
||||
assert len(chunk) <= 600 # some margin for overlap
|
||||
|
||||
def test_small_text(self):
|
||||
chunks = chunk_text_recursive("Short.", chunk_size=1024, overlap=128)
|
||||
assert chunks == ["Short."]
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Semantic chunking still works
|
||||
# =========================================================================
|
||||
|
||||
class TestChunkTextSemantic:
|
||||
|
||||
def test_basic_split(self):
|
||||
text = "First sentence. Second sentence. Third sentence. Fourth sentence. Fifth sentence."
|
||||
chunks = chunk_text_semantic(text, chunk_size=50, overlap_sentences=1)
|
||||
assert len(chunks) >= 2
|
||||
|
||||
def test_small_text(self):
|
||||
chunks = chunk_text_semantic("Short.", chunk_size=1024, overlap_sentences=1)
|
||||
assert chunks == ["Short."]
|
||||
@@ -578,6 +578,33 @@ server {
|
||||
}
|
||||
}
|
||||
|
||||
# =========================================================
|
||||
# CORE: Control Pipeline on port 8098 (Entwickler-only)
|
||||
# =========================================================
|
||||
server {
|
||||
listen 8098 ssl;
|
||||
http2 on;
|
||||
server_name macmini localhost;
|
||||
|
||||
ssl_certificate /etc/nginx/certs/macmini.crt;
|
||||
ssl_certificate_key /etc/nginx/certs/macmini.key;
|
||||
ssl_protocols TLSv1.2 TLSv1.3;
|
||||
ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256;
|
||||
ssl_prefer_server_ciphers off;
|
||||
|
||||
location / {
|
||||
set $upstream_pipeline bp-core-control-pipeline:8098;
|
||||
proxy_pass http://$upstream_pipeline;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto https;
|
||||
proxy_read_timeout 1800s;
|
||||
proxy_send_timeout 1800s;
|
||||
}
|
||||
}
|
||||
|
||||
# =========================================================
|
||||
# CORE: Edu-Search on port 8089
|
||||
# =========================================================
|
||||
@@ -733,3 +760,33 @@ server {
|
||||
try_files $uri $uri/ /index.html;
|
||||
}
|
||||
}
|
||||
|
||||
# =========================================================
|
||||
# PITCH DECK: Investor Presentation on port 3012
|
||||
# =========================================================
|
||||
server {
|
||||
listen 3012 ssl;
|
||||
http2 on;
|
||||
server_name macmini localhost;
|
||||
|
||||
ssl_certificate /etc/nginx/certs/macmini.crt;
|
||||
ssl_certificate_key /etc/nginx/certs/macmini.key;
|
||||
ssl_protocols TLSv1.2 TLSv1.3;
|
||||
ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256;
|
||||
ssl_prefer_server_ciphers off;
|
||||
|
||||
location / {
|
||||
set $upstream_pitch bp-core-pitch-deck:3000;
|
||||
proxy_pass http://$upstream_pitch;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto https;
|
||||
proxy_read_timeout 300s;
|
||||
proxy_connect_timeout 60s;
|
||||
proxy_send_timeout 300s;
|
||||
}
|
||||
}
|
||||
|
||||
16
paddleocr-service/Dockerfile
Normal file
16
paddleocr-service/Dockerfile
Normal file
@@ -0,0 +1,16 @@
|
||||
FROM python:3.11-slim
|
||||
WORKDIR /app
|
||||
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
libgl1 libglib2.0-0 libgomp1 curl \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
COPY requirements.txt .
|
||||
RUN pip install --no-cache-dir -r requirements.txt
|
||||
|
||||
COPY . .
|
||||
|
||||
EXPOSE 8095
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=120s --retries=3 \
|
||||
CMD curl -f http://127.0.0.1:8095/health || exit 1
|
||||
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8095"]
|
||||
110
paddleocr-service/main.py
Normal file
110
paddleocr-service/main.py
Normal file
@@ -0,0 +1,110 @@
|
||||
"""PaddleOCR Remote Service — PP-OCRv4 on x86_64 (CPU)."""
|
||||
|
||||
import io
|
||||
import logging
|
||||
import os
|
||||
import threading
|
||||
|
||||
import numpy as np
|
||||
from fastapi import FastAPI, File, Header, HTTPException, UploadFile
|
||||
from PIL import Image
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
app = FastAPI(title="PaddleOCR Service")
|
||||
|
||||
_engine = None
|
||||
_ready = False
|
||||
_loading = False
|
||||
API_KEY = os.environ.get("PADDLEOCR_API_KEY", "")
|
||||
|
||||
|
||||
def _load_model():
|
||||
"""Load PaddleOCR model in background thread."""
|
||||
global _engine, _ready
|
||||
try:
|
||||
logger.info("Importing paddleocr...")
|
||||
from paddleocr import PaddleOCR
|
||||
|
||||
logger.info("Loading PaddleOCR model (PP-OCRv4, lang=en)...")
|
||||
_engine = PaddleOCR(
|
||||
lang="en",
|
||||
use_angle_cls=True,
|
||||
show_log=False,
|
||||
enable_mkldnn=False,
|
||||
use_gpu=False,
|
||||
)
|
||||
logger.info("PaddleOCR model loaded — running warmup...")
|
||||
# Warmup with tiny image to trigger any lazy init
|
||||
dummy = np.ones((30, 100, 3), dtype=np.uint8) * 255
|
||||
_engine.ocr(dummy)
|
||||
_ready = True
|
||||
logger.info("PaddleOCR ready to serve")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to load PaddleOCR: {e}", exc_info=True)
|
||||
|
||||
|
||||
@app.on_event("startup")
|
||||
def startup_load_model():
|
||||
"""Start model loading in background so health check passes immediately."""
|
||||
global _loading
|
||||
_loading = True
|
||||
threading.Thread(target=_load_model, daemon=True).start()
|
||||
logger.info("Model loading started in background thread")
|
||||
|
||||
|
||||
@app.get("/health")
|
||||
def health():
|
||||
if _ready:
|
||||
return {"status": "ok", "model": "PP-OCRv4"}
|
||||
if _loading:
|
||||
return {"status": "loading"}
|
||||
return {"status": "error"}
|
||||
|
||||
|
||||
@app.post("/ocr")
|
||||
async def ocr(
|
||||
file: UploadFile = File(...),
|
||||
x_api_key: str = Header(default=""),
|
||||
):
|
||||
if API_KEY and x_api_key != API_KEY:
|
||||
raise HTTPException(status_code=401, detail="Invalid API key")
|
||||
|
||||
if not _ready:
|
||||
raise HTTPException(status_code=503, detail="Model still loading")
|
||||
|
||||
img_bytes = await file.read()
|
||||
img = Image.open(io.BytesIO(img_bytes)).convert("RGB")
|
||||
img_np = np.array(img)
|
||||
|
||||
try:
|
||||
result = _engine.ocr(img_np)
|
||||
except Exception as e:
|
||||
logger.error(f"OCR failed: {e}", exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=f"OCR failed: {e}")
|
||||
|
||||
if not result or not result[0]:
|
||||
return {"words": [], "image_width": img_np.shape[1], "image_height": img_np.shape[0]}
|
||||
|
||||
words = []
|
||||
for line in result[0]:
|
||||
box, (text, conf) = line[0], line[1]
|
||||
x_min = min(p[0] for p in box)
|
||||
y_min = min(p[1] for p in box)
|
||||
x_max = max(p[0] for p in box)
|
||||
y_max = max(p[1] for p in box)
|
||||
words.append({
|
||||
"text": str(text).strip(),
|
||||
"left": int(x_min),
|
||||
"top": int(y_min),
|
||||
"width": int(x_max - x_min),
|
||||
"height": int(y_max - y_min),
|
||||
"conf": round(float(conf) * 100, 1),
|
||||
})
|
||||
|
||||
return {
|
||||
"words": words,
|
||||
"image_width": img_np.shape[1],
|
||||
"image_height": img_np.shape[0],
|
||||
}
|
||||
7
paddleocr-service/requirements.txt
Normal file
7
paddleocr-service/requirements.txt
Normal file
@@ -0,0 +1,7 @@
|
||||
paddlepaddle>=2.6.0,<3.0.0
|
||||
paddleocr>=2.7.0,<3.0.0
|
||||
fastapi>=0.110.0
|
||||
uvicorn>=0.25.0
|
||||
python-multipart>=0.0.6
|
||||
Pillow>=10.0.0
|
||||
numpy>=1.24.0
|
||||
@@ -12,6 +12,10 @@ RUN npm install
|
||||
# Copy source code
|
||||
COPY . .
|
||||
|
||||
# Embed git commit hash into build
|
||||
ARG GIT_SHA=dev
|
||||
ENV GIT_SHA=$GIT_SHA
|
||||
|
||||
# Build the application
|
||||
RUN npm run build
|
||||
|
||||
@@ -28,7 +32,7 @@ RUN addgroup --system --gid 1001 nodejs
|
||||
RUN adduser --system --uid 1001 nextjs
|
||||
|
||||
# Copy built assets
|
||||
COPY --from=builder /app/public ./public
|
||||
COPY --from=builder --chown=nextjs:nodejs /app/public ./public
|
||||
COPY --from=builder --chown=nextjs:nodejs /app/.next/standalone ./
|
||||
COPY --from=builder --chown=nextjs:nodejs /app/.next/static ./.next/static
|
||||
|
||||
|
||||
7
pitch-deck/README.md
Normal file
7
pitch-deck/README.md
Normal file
@@ -0,0 +1,7 @@
|
||||
|
||||
Tue Apr 14 09:22:10 AM CEST 2026
|
||||
|
||||
Tue Apr 14 09:27:05 AM CEST 2026
|
||||
Tue Apr 14 09:32:36 AM CEST 2026
|
||||
Tue Apr 15 rebuild trigger
|
||||
Tue Apr 15 rebuild 2
|
||||
294
pitch-deck/__tests__/api/reissue-regression.test.ts
Normal file
294
pitch-deck/__tests__/api/reissue-regression.test.ts
Normal file
@@ -0,0 +1,294 @@
|
||||
/**
|
||||
* Regression test for the "lost access" scenario:
|
||||
*
|
||||
* 1. Admin invites investor A → token T1 is created and emailed.
|
||||
* 2. Investor A opens the link successfully → T1 is marked used_at.
|
||||
* 3. Investor A clears their session (or a redeploy drops cookies).
|
||||
* 4. Investor A returns to / — redirected to /auth.
|
||||
* 5. Without this feature, A is stuck: T1 is already used, expired, or the
|
||||
* session is gone, and there is no self-service way to get back in.
|
||||
* 6. With this feature, A enters their email on /auth and the endpoint
|
||||
* issues a brand new, unused magic link T2 for the same investor row.
|
||||
*
|
||||
* This test wires together the request-link handler with the real verify
|
||||
* handler against an in-memory fake of the two tables the flow touches
|
||||
* (pitch_investors, pitch_magic_links) so we can assert end-to-end that a
|
||||
* second link works after the first one was used.
|
||||
*/
|
||||
|
||||
import { describe, it, expect, beforeEach, vi } from 'vitest'
|
||||
import { NextRequest } from 'next/server'
|
||||
|
||||
// ---- In-memory fake of the two tables touched by this flow ----
|
||||
|
||||
interface InvestorRow {
|
||||
id: string
|
||||
email: string
|
||||
name: string | null
|
||||
company: string | null
|
||||
status: 'invited' | 'active' | 'revoked'
|
||||
last_login_at: Date | null
|
||||
login_count: number
|
||||
}
|
||||
interface MagicLinkRow {
|
||||
id: string
|
||||
investor_id: string
|
||||
token: string
|
||||
expires_at: Date
|
||||
used_at: Date | null
|
||||
ip_address: string | null
|
||||
user_agent: string | null
|
||||
}
|
||||
|
||||
const db = {
|
||||
investors: [] as InvestorRow[],
|
||||
magicLinks: [] as MagicLinkRow[],
|
||||
sessions: [] as { id: string; investor_id: string; ip_address: string | null }[],
|
||||
}
|
||||
|
||||
let idCounter = 0
|
||||
const nextId = () => `row-${++idCounter}`
|
||||
|
||||
// A tiny query router: match the SQL fragment we care about, ignore the rest.
|
||||
const queryMock = vi.fn(async (sql: string, params: unknown[] = []) => {
|
||||
const s = sql.replace(/\s+/g, ' ').trim()
|
||||
|
||||
// Investor lookup by email (used by request-link)
|
||||
if (/SELECT id, email, name, status FROM pitch_investors WHERE email = \$1/i.test(s)) {
|
||||
const row = db.investors.find(i => i.email === params[0])
|
||||
return { rows: row ? [row] : [] }
|
||||
}
|
||||
|
||||
// Insert magic link
|
||||
if (/INSERT INTO pitch_magic_links \(investor_id, token, expires_at\)/i.test(s)) {
|
||||
db.magicLinks.push({
|
||||
id: nextId(),
|
||||
investor_id: params[0] as string,
|
||||
token: params[1] as string,
|
||||
expires_at: params[2] as Date,
|
||||
used_at: null,
|
||||
ip_address: null,
|
||||
user_agent: null,
|
||||
})
|
||||
return { rows: [] }
|
||||
}
|
||||
|
||||
// Verify: magic link + investor JOIN lookup
|
||||
if (/FROM pitch_magic_links ml JOIN pitch_investors i/i.test(s)) {
|
||||
const link = db.magicLinks.find(ml => ml.token === params[0])
|
||||
if (!link) return { rows: [] }
|
||||
const inv = db.investors.find(i => i.id === link.investor_id)!
|
||||
return {
|
||||
rows: [{
|
||||
id: link.id,
|
||||
investor_id: link.investor_id,
|
||||
expires_at: link.expires_at,
|
||||
used_at: link.used_at,
|
||||
email: inv.email,
|
||||
investor_status: inv.status,
|
||||
}],
|
||||
}
|
||||
}
|
||||
|
||||
// Mark magic link used
|
||||
if (/UPDATE pitch_magic_links SET used_at = NOW/i.test(s)) {
|
||||
const link = db.magicLinks.find(ml => ml.id === params[2])
|
||||
if (link) {
|
||||
link.used_at = new Date()
|
||||
link.ip_address = params[0] as string | null
|
||||
link.user_agent = params[1] as string | null
|
||||
}
|
||||
return { rows: [] }
|
||||
}
|
||||
|
||||
// Activate investor
|
||||
if (/UPDATE pitch_investors SET status = 'active'/i.test(s)) {
|
||||
const inv = db.investors.find(i => i.id === params[0])
|
||||
if (inv) {
|
||||
inv.status = 'active'
|
||||
inv.last_login_at = new Date()
|
||||
inv.login_count += 1
|
||||
}
|
||||
return { rows: [] }
|
||||
}
|
||||
|
||||
// createSession: revoke prior sessions (no-op in fake)
|
||||
if (/UPDATE pitch_sessions SET revoked = true WHERE investor_id/i.test(s)) {
|
||||
return { rows: [] }
|
||||
}
|
||||
|
||||
// createSession: insert
|
||||
if (/INSERT INTO pitch_sessions/i.test(s)) {
|
||||
const id = nextId()
|
||||
db.sessions.push({ id, investor_id: params[0] as string, ip_address: params[2] as string | null })
|
||||
return { rows: [{ id }] }
|
||||
}
|
||||
|
||||
// createSession: fetch investor email for JWT
|
||||
if (/SELECT email FROM pitch_investors WHERE id = \$1/i.test(s)) {
|
||||
const inv = db.investors.find(i => i.id === params[0])
|
||||
return { rows: inv ? [{ email: inv.email }] : [] }
|
||||
}
|
||||
|
||||
// new-ip detection query (verify route)
|
||||
if (/SELECT DISTINCT ip_address FROM pitch_sessions/i.test(s)) {
|
||||
return { rows: [] }
|
||||
}
|
||||
|
||||
// Audit log insert — accept everything
|
||||
if (/INSERT INTO pitch_audit_logs/i.test(s)) {
|
||||
return { rows: [] }
|
||||
}
|
||||
|
||||
throw new Error(`Unmocked query: ${s.slice(0, 120)}…`)
|
||||
})
|
||||
|
||||
vi.mock('@/lib/db', () => ({
|
||||
default: { query: (...args: unknown[]) => queryMock(args[0] as string, args[1] as unknown[]) },
|
||||
}))
|
||||
|
||||
// Capture emails instead of sending them
|
||||
const sentEmails: Array<{ to: string; url: string }> = []
|
||||
vi.mock('@/lib/email', () => ({
|
||||
sendMagicLinkEmail: vi.fn(async (to: string, _name: string | null, url: string) => {
|
||||
sentEmails.push({ to, url })
|
||||
}),
|
||||
}))
|
||||
|
||||
// next/headers cookies() needs to be stubbed — setSessionCookie calls it.
|
||||
vi.mock('next/headers', () => ({
|
||||
cookies: async () => ({
|
||||
set: vi.fn(),
|
||||
get: vi.fn(),
|
||||
delete: vi.fn(),
|
||||
}),
|
||||
}))
|
||||
|
||||
// Import the handlers AFTER mocks are set up
|
||||
import { POST as requestLink } from '@/app/api/auth/request-link/route'
|
||||
import { POST as verifyLink } from '@/app/api/auth/verify/route'
|
||||
|
||||
function makeJsonRequest(url: string, body: unknown, ip = '203.0.113.1'): NextRequest {
|
||||
return new NextRequest(url, {
|
||||
method: 'POST',
|
||||
headers: { 'content-type': 'application/json', 'x-forwarded-for': ip },
|
||||
body: JSON.stringify(body),
|
||||
})
|
||||
}
|
||||
|
||||
function extractToken(url: string): string {
|
||||
const m = url.match(/token=([0-9a-f]+)/)
|
||||
if (!m) throw new Error(`No token in url: ${url}`)
|
||||
return m[1]
|
||||
}
|
||||
|
||||
beforeEach(() => {
|
||||
db.investors = []
|
||||
db.magicLinks = []
|
||||
db.sessions = []
|
||||
sentEmails.length = 0
|
||||
idCounter = 0
|
||||
queryMock.mockClear()
|
||||
})
|
||||
|
||||
describe('Regression: investor can re-request a working magic link after the first is consumed', () => {
|
||||
it('full flow — invite → use → request-link → new link works', async () => {
|
||||
// --- Setup: admin has already invited the investor (simulate the outcome) ---
|
||||
const investorId = 'investor-42'
|
||||
db.investors.push({
|
||||
id: investorId,
|
||||
email: 'vc@example.com',
|
||||
name: 'VC Partner',
|
||||
company: 'Acme Capital',
|
||||
status: 'invited',
|
||||
last_login_at: null,
|
||||
login_count: 0,
|
||||
})
|
||||
db.magicLinks.push({
|
||||
id: 'ml-original',
|
||||
investor_id: investorId,
|
||||
token: 'a'.repeat(96), // original invite token
|
||||
expires_at: new Date(Date.now() + 72 * 60 * 60 * 1000),
|
||||
used_at: null,
|
||||
ip_address: null,
|
||||
user_agent: null,
|
||||
})
|
||||
|
||||
// --- Step 1: investor uses the original invite link ---
|
||||
const firstVerify = await verifyLink(makeJsonRequest('http://localhost/api/auth/verify', { token: 'a'.repeat(96) }))
|
||||
expect(firstVerify.status).toBe(200)
|
||||
const first = db.magicLinks.find(ml => ml.id === 'ml-original')!
|
||||
expect(first.used_at).not.toBeNull()
|
||||
|
||||
// --- Step 2: investor comes back later; clicks the same link → rejected ---
|
||||
const replay = await verifyLink(makeJsonRequest('http://localhost/api/auth/verify', { token: 'a'.repeat(96) }))
|
||||
expect(replay.status).toBe(401)
|
||||
const replayBody = await replay.json()
|
||||
expect(replayBody.error).toMatch(/already been used/i)
|
||||
|
||||
// --- Step 3: investor visits /auth and submits their email ---
|
||||
const reissue = await requestLink(
|
||||
makeJsonRequest('http://localhost/api/auth/request-link', { email: 'vc@example.com' }, '203.0.113.99'),
|
||||
)
|
||||
expect(reissue.status).toBe(200)
|
||||
const reissueBody = await reissue.json()
|
||||
expect(reissueBody.success).toBe(true)
|
||||
|
||||
// --- Step 4: a fresh email was dispatched to the investor ---
|
||||
expect(sentEmails).toHaveLength(1)
|
||||
expect(sentEmails[0].to).toBe('vc@example.com')
|
||||
const newToken = extractToken(sentEmails[0].url)
|
||||
expect(newToken).not.toBe('a'.repeat(96))
|
||||
expect(newToken).toMatch(/^[0-9a-f]{96}$/)
|
||||
|
||||
// A second unused magic link row exists for the same investor
|
||||
const links = db.magicLinks.filter(ml => ml.investor_id === investorId)
|
||||
expect(links).toHaveLength(2)
|
||||
const newLink = links.find(ml => ml.token === newToken)!
|
||||
expect(newLink.used_at).toBeNull()
|
||||
|
||||
// --- Step 5: the new token validates successfully ---
|
||||
const secondVerify = await verifyLink(makeJsonRequest('http://localhost/api/auth/verify', { token: newToken }))
|
||||
expect(secondVerify.status).toBe(200)
|
||||
const secondBody = await secondVerify.json()
|
||||
expect(secondBody.success).toBe(true)
|
||||
expect(secondBody.redirect).toBe('/')
|
||||
|
||||
// And the new link is now used, mirroring the one-time-use contract
|
||||
expect(newLink.used_at).not.toBeNull()
|
||||
})
|
||||
|
||||
it('unknown emails do not create magic links or send email (prevents enumeration & abuse)', async () => {
|
||||
// No investors in the DB
|
||||
const res = await requestLink(
|
||||
makeJsonRequest('http://localhost/api/auth/request-link', { email: 'stranger@example.com' }),
|
||||
)
|
||||
expect(res.status).toBe(200)
|
||||
const body = await res.json()
|
||||
// Same generic message as the happy path
|
||||
expect(body.success).toBe(true)
|
||||
expect(body.message).toMatch(/if this email was invited/i)
|
||||
|
||||
expect(sentEmails).toHaveLength(0)
|
||||
expect(db.magicLinks).toHaveLength(0)
|
||||
})
|
||||
|
||||
it('revoked investors cannot self-serve a new link', async () => {
|
||||
db.investors.push({
|
||||
id: 'revoked-1',
|
||||
email: 'gone@example.com',
|
||||
name: null,
|
||||
company: null,
|
||||
status: 'revoked',
|
||||
last_login_at: null,
|
||||
login_count: 0,
|
||||
})
|
||||
|
||||
const res = await requestLink(
|
||||
makeJsonRequest('http://localhost/api/auth/request-link', { email: 'gone@example.com' }),
|
||||
)
|
||||
expect(res.status).toBe(200) // generic success (no info leak)
|
||||
expect(sentEmails).toHaveLength(0)
|
||||
expect(db.magicLinks).toHaveLength(0)
|
||||
})
|
||||
})
|
||||
213
pitch-deck/__tests__/api/request-link.test.ts
Normal file
213
pitch-deck/__tests__/api/request-link.test.ts
Normal file
@@ -0,0 +1,213 @@
|
||||
import { describe, it, expect, beforeEach, vi } from 'vitest'
|
||||
import { NextRequest } from 'next/server'
|
||||
|
||||
// Mock the DB pool before the route is imported
|
||||
const queryMock = vi.fn()
|
||||
vi.mock('@/lib/db', () => ({
|
||||
default: { query: (...args: unknown[]) => queryMock(...args) },
|
||||
}))
|
||||
|
||||
// Mock the email sender so no SMTP is attempted
|
||||
const sendMagicLinkEmailMock = vi.fn().mockResolvedValue(undefined)
|
||||
vi.mock('@/lib/email', () => ({
|
||||
sendMagicLinkEmail: (...args: unknown[]) => sendMagicLinkEmailMock(...args),
|
||||
}))
|
||||
|
||||
// Import after mocks are registered
|
||||
import { POST } from '@/app/api/auth/request-link/route'
|
||||
|
||||
// Unique suffix per test so the rate-limit store (keyed by IP / email) doesn't
|
||||
// bleed across cases — the rate-limiter holds state at module scope.
|
||||
let testId = 0
|
||||
function uniqueIp() {
|
||||
testId++
|
||||
return `10.0.${Math.floor(testId / 250)}.${testId % 250}`
|
||||
}
|
||||
|
||||
function makeRequest(body: unknown, ip = uniqueIp()): NextRequest {
|
||||
return new NextRequest('http://localhost/api/auth/request-link', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'content-type': 'application/json',
|
||||
'x-forwarded-for': ip,
|
||||
},
|
||||
body: JSON.stringify(body),
|
||||
})
|
||||
}
|
||||
|
||||
function investorRow(overrides: Partial<{ id: string; email: string; name: string | null; status: string }> = {}) {
|
||||
return {
|
||||
id: overrides.id ?? 'investor-1',
|
||||
email: overrides.email ?? 'invited@example.com',
|
||||
name: overrides.name ?? 'Alice',
|
||||
status: overrides.status ?? 'invited',
|
||||
}
|
||||
}
|
||||
|
||||
beforeEach(() => {
|
||||
queryMock.mockReset()
|
||||
sendMagicLinkEmailMock.mockReset()
|
||||
sendMagicLinkEmailMock.mockResolvedValue(undefined)
|
||||
})
|
||||
|
||||
describe('POST /api/auth/request-link — input validation', () => {
|
||||
it('returns 400 when email is missing', async () => {
|
||||
const res = await POST(makeRequest({}))
|
||||
expect(res.status).toBe(400)
|
||||
const body = await res.json()
|
||||
expect(body.error).toBe('Email required')
|
||||
expect(queryMock).not.toHaveBeenCalled()
|
||||
expect(sendMagicLinkEmailMock).not.toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it('returns 400 when email is not a string', async () => {
|
||||
const res = await POST(makeRequest({ email: 12345 }))
|
||||
expect(res.status).toBe(400)
|
||||
expect(sendMagicLinkEmailMock).not.toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it('handles malformed JSON body as missing email (400)', async () => {
|
||||
const req = new NextRequest('http://localhost/api/auth/request-link', {
|
||||
method: 'POST',
|
||||
headers: { 'content-type': 'application/json', 'x-forwarded-for': uniqueIp() },
|
||||
body: 'not-json',
|
||||
})
|
||||
const res = await POST(req)
|
||||
expect(res.status).toBe(400)
|
||||
})
|
||||
})
|
||||
|
||||
describe('POST /api/auth/request-link — unknown email (enumeration resistance)', () => {
|
||||
it('returns the generic success response without sending email', async () => {
|
||||
// First query: investor lookup → empty rows
|
||||
queryMock.mockResolvedValueOnce({ rows: [] })
|
||||
// Second query: the audit log insert
|
||||
queryMock.mockResolvedValueOnce({ rows: [] })
|
||||
|
||||
const res = await POST(makeRequest({ email: 'unknown@example.com' }))
|
||||
expect(res.status).toBe(200)
|
||||
const body = await res.json()
|
||||
expect(body.success).toBe(true)
|
||||
expect(body.message).toMatch(/if this email was invited/i)
|
||||
expect(sendMagicLinkEmailMock).not.toHaveBeenCalled()
|
||||
|
||||
// Verify the investor-lookup SQL was issued with the normalized email
|
||||
const [sql, params] = queryMock.mock.calls[0]
|
||||
expect(sql).toMatch(/FROM pitch_investors WHERE email/i)
|
||||
expect(params).toEqual(['unknown@example.com'])
|
||||
})
|
||||
|
||||
it('normalizes email (trim + lowercase) before lookup', async () => {
|
||||
queryMock.mockResolvedValueOnce({ rows: [] })
|
||||
queryMock.mockResolvedValueOnce({ rows: [] })
|
||||
|
||||
await POST(makeRequest({ email: ' Mixed@Example.COM ' }))
|
||||
|
||||
const [, params] = queryMock.mock.calls[0]
|
||||
expect(params).toEqual(['mixed@example.com'])
|
||||
})
|
||||
})
|
||||
|
||||
describe('POST /api/auth/request-link — known investor', () => {
|
||||
it('creates a new magic link and sends the email with generic response', async () => {
|
||||
// 1st: investor lookup → found
|
||||
queryMock.mockResolvedValueOnce({ rows: [investorRow()] })
|
||||
// 2nd: magic link insert
|
||||
queryMock.mockResolvedValueOnce({ rows: [] })
|
||||
// 3rd: audit log insert
|
||||
queryMock.mockResolvedValueOnce({ rows: [] })
|
||||
|
||||
const res = await POST(makeRequest({ email: 'invited@example.com' }))
|
||||
expect(res.status).toBe(200)
|
||||
const body = await res.json()
|
||||
expect(body.success).toBe(true)
|
||||
// Response is identical to the unknown-email case (no information leak)
|
||||
expect(body.message).toMatch(/if this email was invited/i)
|
||||
|
||||
// Verify magic link insert
|
||||
const [insertSql, insertParams] = queryMock.mock.calls[1]
|
||||
expect(insertSql).toMatch(/INSERT INTO pitch_magic_links/i)
|
||||
expect(insertParams[0]).toBe('investor-1')
|
||||
expect(insertParams[1]).toMatch(/^[0-9a-f]{96}$/) // 96-char hex token
|
||||
expect(insertParams[2]).toBeInstanceOf(Date)
|
||||
|
||||
// Verify email was sent with the fresh token URL
|
||||
expect(sendMagicLinkEmailMock).toHaveBeenCalledTimes(1)
|
||||
const [emailTo, emailName, magicLinkUrl] = sendMagicLinkEmailMock.mock.calls[0]
|
||||
expect(emailTo).toBe('invited@example.com')
|
||||
expect(emailName).toBe('Alice')
|
||||
expect(magicLinkUrl).toMatch(/\/auth\/verify\?token=[0-9a-f]{96}$/)
|
||||
})
|
||||
|
||||
it('generates a different token on each call (re-invite is always fresh)', async () => {
|
||||
// Call 1
|
||||
queryMock.mockResolvedValueOnce({ rows: [investorRow({ email: 'a@x.com' })] })
|
||||
queryMock.mockResolvedValueOnce({ rows: [] })
|
||||
queryMock.mockResolvedValueOnce({ rows: [] })
|
||||
await POST(makeRequest({ email: 'a@x.com' }))
|
||||
|
||||
// Call 2 — different email to avoid the per-email rate limit
|
||||
queryMock.mockResolvedValueOnce({ rows: [investorRow({ email: 'b@x.com' })] })
|
||||
queryMock.mockResolvedValueOnce({ rows: [] })
|
||||
queryMock.mockResolvedValueOnce({ rows: [] })
|
||||
await POST(makeRequest({ email: 'b@x.com' }))
|
||||
|
||||
const token1 = queryMock.mock.calls[1][1][1]
|
||||
const token2 = queryMock.mock.calls[4][1][1]
|
||||
expect(token1).not.toBe(token2)
|
||||
})
|
||||
|
||||
it('skips email send for a revoked investor (returns generic response)', async () => {
|
||||
queryMock.mockResolvedValueOnce({ rows: [investorRow({ status: 'revoked' })] })
|
||||
queryMock.mockResolvedValueOnce({ rows: [] }) // audit log
|
||||
|
||||
const res = await POST(makeRequest({ email: 'invited@example.com' }))
|
||||
expect(res.status).toBe(200)
|
||||
const body = await res.json()
|
||||
expect(body.success).toBe(true)
|
||||
expect(sendMagicLinkEmailMock).not.toHaveBeenCalled()
|
||||
|
||||
// Ensure no magic link was inserted
|
||||
const inserts = queryMock.mock.calls.filter(c => /INSERT INTO pitch_magic_links/i.test(c[0]))
|
||||
expect(inserts.length).toBe(0)
|
||||
})
|
||||
})
|
||||
|
||||
describe('POST /api/auth/request-link — rate limiting', () => {
|
||||
it('throttles after N requests per email and returns generic success (silent throttle)', async () => {
|
||||
const email = `throttle-${Date.now()}@example.com`
|
||||
|
||||
// First 3 requests succeed (RATE_LIMITS.magicLink.limit = 3)
|
||||
for (let i = 0; i < 3; i++) {
|
||||
queryMock.mockResolvedValueOnce({ rows: [investorRow({ email })] })
|
||||
queryMock.mockResolvedValueOnce({ rows: [] }) // magic link insert
|
||||
queryMock.mockResolvedValueOnce({ rows: [] }) // audit log
|
||||
const res = await POST(makeRequest({ email }))
|
||||
expect(res.status).toBe(200)
|
||||
}
|
||||
expect(sendMagicLinkEmailMock).toHaveBeenCalledTimes(3)
|
||||
|
||||
// 4th request is silently throttled — same generic response, no email sent
|
||||
queryMock.mockResolvedValueOnce({ rows: [] }) // audit log only
|
||||
const res4 = await POST(makeRequest({ email }))
|
||||
expect(res4.status).toBe(200)
|
||||
const body4 = await res4.json()
|
||||
expect(body4.success).toBe(true)
|
||||
// Still exactly 3 emails sent — nothing new
|
||||
expect(sendMagicLinkEmailMock).toHaveBeenCalledTimes(3)
|
||||
})
|
||||
|
||||
it('throttles with 429 after too many attempts from the same IP', async () => {
|
||||
const ip = '172.31.99.99'
|
||||
// RATE_LIMITS.authVerify.limit = 10 for IP-scoped checks
|
||||
for (let i = 0; i < 10; i++) {
|
||||
queryMock.mockResolvedValueOnce({ rows: [] }) // investor lookup returns empty
|
||||
queryMock.mockResolvedValueOnce({ rows: [] }) // audit
|
||||
const res = await POST(makeRequest({ email: `ip-test-${i}@example.com` }, ip))
|
||||
expect(res.status).toBe(200)
|
||||
}
|
||||
|
||||
const res = await POST(makeRequest({ email: 'final@example.com' }, ip))
|
||||
expect(res.status).toBe(429)
|
||||
})
|
||||
})
|
||||
96
pitch-deck/__tests__/lib/admin-auth.test.ts
Normal file
96
pitch-deck/__tests__/lib/admin-auth.test.ts
Normal file
@@ -0,0 +1,96 @@
|
||||
import { describe, it, expect } from 'vitest'
|
||||
import {
|
||||
hashPassword,
|
||||
verifyPassword,
|
||||
createAdminJwt,
|
||||
verifyAdminJwt,
|
||||
} from '@/lib/admin-auth'
|
||||
import { createJwt, verifyJwt } from '@/lib/auth'
|
||||
|
||||
describe('admin-auth: password hashing', () => {
|
||||
it('hashPassword produces a bcrypt hash', async () => {
|
||||
const hash = await hashPassword('correct-horse-battery-staple')
|
||||
expect(hash).toMatch(/^\$2[aby]\$/)
|
||||
expect(hash.length).toBeGreaterThanOrEqual(50)
|
||||
})
|
||||
|
||||
it('hashPassword is non-deterministic (different salt each call)', async () => {
|
||||
const a = await hashPassword('same-password')
|
||||
const b = await hashPassword('same-password')
|
||||
expect(a).not.toBe(b)
|
||||
})
|
||||
|
||||
it('verifyPassword accepts the original password', async () => {
|
||||
const hash = await hashPassword('correct-horse-battery-staple')
|
||||
expect(await verifyPassword('correct-horse-battery-staple', hash)).toBe(true)
|
||||
})
|
||||
|
||||
it('verifyPassword rejects a wrong password', async () => {
|
||||
const hash = await hashPassword('correct-horse-battery-staple')
|
||||
expect(await verifyPassword('wrong-password', hash)).toBe(false)
|
||||
})
|
||||
|
||||
it('verifyPassword rejects empty input against any hash', async () => {
|
||||
const hash = await hashPassword('something')
|
||||
expect(await verifyPassword('', hash)).toBe(false)
|
||||
})
|
||||
|
||||
it('verifyPassword is case-sensitive', async () => {
|
||||
const hash = await hashPassword('CaseSensitive')
|
||||
expect(await verifyPassword('casesensitive', hash)).toBe(false)
|
||||
expect(await verifyPassword('CaseSensitive', hash)).toBe(true)
|
||||
})
|
||||
})
|
||||
|
||||
describe('admin-auth: JWT roundtrip', () => {
|
||||
const payload = {
|
||||
sub: 'admin-uuid-123',
|
||||
email: 'admin@example.com',
|
||||
sessionId: 'session-uuid-456',
|
||||
}
|
||||
|
||||
it('createAdminJwt + verifyAdminJwt roundtrip preserves payload', async () => {
|
||||
const jwt = await createAdminJwt(payload)
|
||||
const decoded = await verifyAdminJwt(jwt)
|
||||
expect(decoded).not.toBeNull()
|
||||
expect(decoded?.sub).toBe(payload.sub)
|
||||
expect(decoded?.email).toBe(payload.email)
|
||||
expect(decoded?.sessionId).toBe(payload.sessionId)
|
||||
})
|
||||
|
||||
it('verifyAdminJwt rejects a tampered token', async () => {
|
||||
const jwt = await createAdminJwt(payload)
|
||||
const tampered = jwt.slice(0, -2) + 'XX'
|
||||
expect(await verifyAdminJwt(tampered)).toBeNull()
|
||||
})
|
||||
|
||||
it('verifyAdminJwt rejects garbage input', async () => {
|
||||
expect(await verifyAdminJwt('not-a-jwt')).toBeNull()
|
||||
expect(await verifyAdminJwt('')).toBeNull()
|
||||
expect(await verifyAdminJwt('a.b.c')).toBeNull()
|
||||
})
|
||||
})
|
||||
|
||||
describe('admin-auth: audience claim isolation', () => {
|
||||
// This is the security boundary: an investor JWT must NEVER validate as an admin JWT
|
||||
// (and vice versa). They share the same secret but use audience claims to stay distinct.
|
||||
|
||||
const payload = { sub: 'user-id', email: 'user@example.com', sessionId: 'session' }
|
||||
|
||||
it('an investor JWT (no admin audience) is rejected by verifyAdminJwt', async () => {
|
||||
const investorJwt = await createJwt(payload)
|
||||
const result = await verifyAdminJwt(investorJwt)
|
||||
expect(result).toBeNull()
|
||||
})
|
||||
|
||||
it('an admin JWT is rejected by verifyJwt (because verifyJwt does not enforce audience, but admin JWT has audience that investor token does not)', async () => {
|
||||
// Note: verifyJwt does not enforce audience, so an admin JWT with an audience claim
|
||||
// technically *could* parse — but the cookie is on a different name (pitch_admin_session)
|
||||
// so this can't happen in practice. We document the expectation here:
|
||||
const adminJwt = await createAdminJwt(payload)
|
||||
const result = await verifyJwt(adminJwt)
|
||||
// jose parses it but the payload is the same shape, so this would actually succeed.
|
||||
// The real isolation is: cookies. We assert the JWT itself is different.
|
||||
expect(adminJwt).not.toBe(await createJwt(payload))
|
||||
})
|
||||
})
|
||||
118
pitch-deck/__tests__/lib/auth.test.ts
Normal file
118
pitch-deck/__tests__/lib/auth.test.ts
Normal file
@@ -0,0 +1,118 @@
|
||||
import { describe, it, expect } from 'vitest'
|
||||
import {
|
||||
hashToken,
|
||||
generateToken,
|
||||
validateAdminSecret,
|
||||
getClientIp,
|
||||
createJwt,
|
||||
verifyJwt,
|
||||
} from '@/lib/auth'
|
||||
|
||||
describe('auth: token utilities', () => {
|
||||
it('generateToken produces a 96-character hex string (48 random bytes)', () => {
|
||||
const t = generateToken()
|
||||
expect(t).toMatch(/^[0-9a-f]{96}$/)
|
||||
})
|
||||
|
||||
it('generateToken produces unique values across calls', () => {
|
||||
const seen = new Set()
|
||||
for (let i = 0; i < 100; i++) seen.add(generateToken())
|
||||
expect(seen.size).toBe(100)
|
||||
})
|
||||
|
||||
it('hashToken is deterministic for the same input', () => {
|
||||
const a = hashToken('input')
|
||||
const b = hashToken('input')
|
||||
expect(a).toBe(b)
|
||||
})
|
||||
|
||||
it('hashToken produces a 64-char hex SHA-256 digest', () => {
|
||||
expect(hashToken('anything')).toMatch(/^[0-9a-f]{64}$/)
|
||||
})
|
||||
|
||||
it('hashToken produces different output for different input', () => {
|
||||
expect(hashToken('a')).not.toBe(hashToken('b'))
|
||||
})
|
||||
})
|
||||
|
||||
describe('auth: validateAdminSecret (CLI bearer fallback)', () => {
|
||||
it('accepts the correct bearer header', () => {
|
||||
const req = new Request('http://x', {
|
||||
headers: { authorization: `Bearer ${process.env.PITCH_ADMIN_SECRET}` },
|
||||
})
|
||||
expect(validateAdminSecret(req)).toBe(true)
|
||||
})
|
||||
|
||||
it('rejects a wrong bearer secret', () => {
|
||||
const req = new Request('http://x', {
|
||||
headers: { authorization: 'Bearer wrong-secret' },
|
||||
})
|
||||
expect(validateAdminSecret(req)).toBe(false)
|
||||
})
|
||||
|
||||
it('rejects requests with no Authorization header', () => {
|
||||
const req = new Request('http://x')
|
||||
expect(validateAdminSecret(req)).toBe(false)
|
||||
})
|
||||
|
||||
it('rejects bare secret without Bearer prefix', () => {
|
||||
const req = new Request('http://x', {
|
||||
headers: { authorization: process.env.PITCH_ADMIN_SECRET || '' },
|
||||
})
|
||||
expect(validateAdminSecret(req)).toBe(false)
|
||||
})
|
||||
})
|
||||
|
||||
describe('auth: getClientIp', () => {
|
||||
it('parses x-forwarded-for', () => {
|
||||
const req = new Request('http://x', {
|
||||
headers: { 'x-forwarded-for': '10.0.0.1' },
|
||||
})
|
||||
expect(getClientIp(req)).toBe('10.0.0.1')
|
||||
})
|
||||
|
||||
it('takes the first hop from a comma-separated x-forwarded-for', () => {
|
||||
const req = new Request('http://x', {
|
||||
headers: { 'x-forwarded-for': '10.0.0.1, 192.168.1.1, 172.16.0.1' },
|
||||
})
|
||||
expect(getClientIp(req)).toBe('10.0.0.1')
|
||||
})
|
||||
|
||||
it('trims whitespace around the first IP', () => {
|
||||
const req = new Request('http://x', {
|
||||
headers: { 'x-forwarded-for': ' 10.0.0.1 , 192.168.1.1' },
|
||||
})
|
||||
expect(getClientIp(req)).toBe('10.0.0.1')
|
||||
})
|
||||
|
||||
it('returns null when the header is absent', () => {
|
||||
const req = new Request('http://x')
|
||||
expect(getClientIp(req)).toBeNull()
|
||||
})
|
||||
})
|
||||
|
||||
describe('auth: investor JWT roundtrip', () => {
|
||||
const payload = {
|
||||
sub: 'investor-id',
|
||||
email: 'investor@example.com',
|
||||
sessionId: 'session-id',
|
||||
}
|
||||
|
||||
it('createJwt + verifyJwt roundtrip preserves payload', async () => {
|
||||
const jwt = await createJwt(payload)
|
||||
const decoded = await verifyJwt(jwt)
|
||||
expect(decoded?.sub).toBe(payload.sub)
|
||||
expect(decoded?.email).toBe(payload.email)
|
||||
expect(decoded?.sessionId).toBe(payload.sessionId)
|
||||
})
|
||||
|
||||
it('verifyJwt rejects garbage', async () => {
|
||||
expect(await verifyJwt('not-a-jwt')).toBeNull()
|
||||
})
|
||||
|
||||
it('verifyJwt rejects a tampered signature', async () => {
|
||||
const jwt = await createJwt(payload)
|
||||
const tampered = jwt.slice(0, -2) + 'XX'
|
||||
expect(await verifyJwt(tampered)).toBeNull()
|
||||
})
|
||||
})
|
||||
83
pitch-deck/__tests__/lib/rate-limit.test.ts
Normal file
83
pitch-deck/__tests__/lib/rate-limit.test.ts
Normal file
@@ -0,0 +1,83 @@
|
||||
import { describe, it, expect, beforeEach, vi, afterEach } from 'vitest'
|
||||
import { checkRateLimit, RATE_LIMITS } from '@/lib/rate-limit'
|
||||
|
||||
describe('rate-limit', () => {
|
||||
beforeEach(() => {
|
||||
vi.useFakeTimers()
|
||||
})
|
||||
afterEach(() => {
|
||||
vi.useRealTimers()
|
||||
})
|
||||
|
||||
it('allows the first request', () => {
|
||||
const result = checkRateLimit('test-key-1', { limit: 5, windowSec: 60 })
|
||||
expect(result.allowed).toBe(true)
|
||||
expect(result.remaining).toBe(4)
|
||||
})
|
||||
|
||||
it('allows up to the limit, then rejects', () => {
|
||||
const key = 'test-key-2'
|
||||
const config = { limit: 3, windowSec: 60 }
|
||||
|
||||
expect(checkRateLimit(key, config).allowed).toBe(true)
|
||||
expect(checkRateLimit(key, config).allowed).toBe(true)
|
||||
expect(checkRateLimit(key, config).allowed).toBe(true)
|
||||
expect(checkRateLimit(key, config).allowed).toBe(false)
|
||||
expect(checkRateLimit(key, config).allowed).toBe(false)
|
||||
})
|
||||
|
||||
it('decrements the remaining counter on each call', () => {
|
||||
const key = 'test-key-3'
|
||||
const config = { limit: 3, windowSec: 60 }
|
||||
|
||||
expect(checkRateLimit(key, config).remaining).toBe(2)
|
||||
expect(checkRateLimit(key, config).remaining).toBe(1)
|
||||
expect(checkRateLimit(key, config).remaining).toBe(0)
|
||||
})
|
||||
|
||||
it('keys are isolated from each other', () => {
|
||||
const config = { limit: 1, windowSec: 60 }
|
||||
expect(checkRateLimit('key-a', config).allowed).toBe(true)
|
||||
expect(checkRateLimit('key-a', config).allowed).toBe(false)
|
||||
// Different key still has its quota
|
||||
expect(checkRateLimit('key-b', config).allowed).toBe(true)
|
||||
})
|
||||
|
||||
it('resets after the window expires', () => {
|
||||
const key = 'test-key-reset'
|
||||
const config = { limit: 2, windowSec: 1 }
|
||||
|
||||
expect(checkRateLimit(key, config).allowed).toBe(true)
|
||||
expect(checkRateLimit(key, config).allowed).toBe(true)
|
||||
expect(checkRateLimit(key, config).allowed).toBe(false)
|
||||
|
||||
// Advance past the window
|
||||
vi.advanceTimersByTime(1100)
|
||||
|
||||
expect(checkRateLimit(key, config).allowed).toBe(true)
|
||||
})
|
||||
|
||||
it('exposes a sensible resetAt timestamp', () => {
|
||||
const before = Date.now()
|
||||
const r = checkRateLimit('reset-at-test', { limit: 5, windowSec: 60 })
|
||||
expect(r.resetAt).toBeGreaterThanOrEqual(before + 60_000 - 10)
|
||||
expect(r.resetAt).toBeLessThanOrEqual(before + 60_000 + 10)
|
||||
})
|
||||
|
||||
describe('preset configs', () => {
|
||||
it('magicLink: 3 per hour', () => {
|
||||
expect(RATE_LIMITS.magicLink.limit).toBe(3)
|
||||
expect(RATE_LIMITS.magicLink.windowSec).toBe(3600)
|
||||
})
|
||||
|
||||
it('authVerify: 10 per 15 minutes', () => {
|
||||
expect(RATE_LIMITS.authVerify.limit).toBe(10)
|
||||
expect(RATE_LIMITS.authVerify.windowSec).toBe(900)
|
||||
})
|
||||
|
||||
it('chat: 20 per minute', () => {
|
||||
expect(RATE_LIMITS.chat.limit).toBe(20)
|
||||
expect(RATE_LIMITS.chat.windowSec).toBe(60)
|
||||
})
|
||||
})
|
||||
})
|
||||
4
pitch-deck/__tests__/setup.ts
Normal file
4
pitch-deck/__tests__/setup.ts
Normal file
@@ -0,0 +1,4 @@
|
||||
// Vitest global setup. Required env so the auth modules can initialize.
|
||||
process.env.PITCH_JWT_SECRET = process.env.PITCH_JWT_SECRET || 'test-secret-do-not-use-in-production-32chars'
|
||||
process.env.PITCH_ADMIN_SECRET = process.env.PITCH_ADMIN_SECRET || 'test-admin-secret'
|
||||
process.env.DATABASE_URL = process.env.DATABASE_URL || 'postgres://test:test@localhost:5432/test'
|
||||
62
pitch-deck/app/api/admin-auth/login/route.ts
Normal file
62
pitch-deck/app/api/admin-auth/login/route.ts
Normal file
@@ -0,0 +1,62 @@
|
||||
import { NextRequest, NextResponse } from 'next/server'
|
||||
import pool from '@/lib/db'
|
||||
import { verifyPassword, createAdminSession, setAdminCookie, logAdminAudit } from '@/lib/admin-auth'
|
||||
import { getClientIp } from '@/lib/auth'
|
||||
import { checkRateLimit, RATE_LIMITS } from '@/lib/rate-limit'
|
||||
|
||||
export async function POST(request: NextRequest) {
|
||||
const ip = getClientIp(request) || 'unknown'
|
||||
|
||||
// Reuse the auth-verify rate limit (10/IP/15min)
|
||||
const rl = checkRateLimit(`admin-login:${ip}`, RATE_LIMITS.authVerify)
|
||||
if (!rl.allowed) {
|
||||
return NextResponse.json({ error: 'Too many attempts. Try again later.' }, { status: 429 })
|
||||
}
|
||||
|
||||
const body = await request.json().catch(() => ({}))
|
||||
const email = (body.email || '').trim().toLowerCase()
|
||||
const password = body.password || ''
|
||||
|
||||
if (!email || !password) {
|
||||
return NextResponse.json({ error: 'Email and password required' }, { status: 400 })
|
||||
}
|
||||
|
||||
const { rows } = await pool.query(
|
||||
`SELECT id, email, name, password_hash, is_active FROM pitch_admins WHERE email = $1`,
|
||||
[email],
|
||||
)
|
||||
|
||||
if (rows.length === 0) {
|
||||
await logAdminAudit(null, 'admin_login_failed', { email, reason: 'unknown_email' }, request)
|
||||
return NextResponse.json({ error: 'Invalid credentials' }, { status: 401 })
|
||||
}
|
||||
|
||||
const admin = rows[0]
|
||||
|
||||
if (!admin.is_active) {
|
||||
await logAdminAudit(admin.id, 'admin_login_failed', { reason: 'inactive' }, request)
|
||||
return NextResponse.json({ error: 'Account disabled' }, { status: 403 })
|
||||
}
|
||||
|
||||
const ok = await verifyPassword(password, admin.password_hash)
|
||||
if (!ok) {
|
||||
await logAdminAudit(admin.id, 'admin_login_failed', { reason: 'wrong_password' }, request)
|
||||
return NextResponse.json({ error: 'Invalid credentials' }, { status: 401 })
|
||||
}
|
||||
|
||||
const ua = request.headers.get('user-agent')
|
||||
const { jwt } = await createAdminSession(admin.id, ip, ua)
|
||||
await setAdminCookie(jwt)
|
||||
|
||||
await pool.query(
|
||||
`UPDATE pitch_admins SET last_login_at = NOW(), updated_at = NOW() WHERE id = $1`,
|
||||
[admin.id],
|
||||
)
|
||||
|
||||
await logAdminAudit(admin.id, 'admin_login_success', { email }, request)
|
||||
|
||||
return NextResponse.json({
|
||||
success: true,
|
||||
admin: { id: admin.id, email: admin.email, name: admin.name },
|
||||
})
|
||||
}
|
||||
17
pitch-deck/app/api/admin-auth/logout/route.ts
Normal file
17
pitch-deck/app/api/admin-auth/logout/route.ts
Normal file
@@ -0,0 +1,17 @@
|
||||
import { NextRequest, NextResponse } from 'next/server'
|
||||
import {
|
||||
getAdminPayloadFromCookie,
|
||||
revokeAdminSession,
|
||||
clearAdminCookie,
|
||||
logAdminAudit,
|
||||
} from '@/lib/admin-auth'
|
||||
|
||||
export async function POST(request: NextRequest) {
|
||||
const payload = await getAdminPayloadFromCookie()
|
||||
if (payload) {
|
||||
await revokeAdminSession(payload.sessionId)
|
||||
await logAdminAudit(payload.sub, 'admin_logout', {}, request)
|
||||
}
|
||||
await clearAdminCookie()
|
||||
return NextResponse.json({ success: true })
|
||||
}
|
||||
10
pitch-deck/app/api/admin-auth/me/route.ts
Normal file
10
pitch-deck/app/api/admin-auth/me/route.ts
Normal file
@@ -0,0 +1,10 @@
|
||||
import { NextResponse } from 'next/server'
|
||||
import { getAdminFromCookie } from '@/lib/admin-auth'
|
||||
|
||||
export async function GET() {
|
||||
const admin = await getAdminFromCookie()
|
||||
if (!admin) {
|
||||
return NextResponse.json({ error: 'Not authenticated' }, { status: 401 })
|
||||
}
|
||||
return NextResponse.json({ admin })
|
||||
}
|
||||
81
pitch-deck/app/api/admin/admins/[id]/route.ts
Normal file
81
pitch-deck/app/api/admin/admins/[id]/route.ts
Normal file
@@ -0,0 +1,81 @@
|
||||
import { NextRequest, NextResponse } from 'next/server'
|
||||
import pool from '@/lib/db'
|
||||
import { requireAdmin, logAdminAudit, hashPassword, revokeAllAdminSessions } from '@/lib/admin-auth'
|
||||
|
||||
interface RouteContext {
|
||||
params: Promise<{ id: string }>
|
||||
}
|
||||
|
||||
export async function PATCH(request: NextRequest, ctx: RouteContext) {
|
||||
const guard = await requireAdmin(request)
|
||||
if (guard.kind === 'response') return guard.response
|
||||
const actorAdminId = guard.kind === 'admin' ? guard.admin.id : null
|
||||
|
||||
const { id } = await ctx.params
|
||||
const body = await request.json().catch(() => ({}))
|
||||
const { name, is_active, password } = body
|
||||
|
||||
const before = await pool.query(
|
||||
`SELECT email, name, is_active FROM pitch_admins WHERE id = $1`,
|
||||
[id],
|
||||
)
|
||||
if (before.rows.length === 0) {
|
||||
return NextResponse.json({ error: 'Admin not found' }, { status: 404 })
|
||||
}
|
||||
|
||||
const updates: string[] = []
|
||||
const params: unknown[] = []
|
||||
let p = 1
|
||||
|
||||
if (typeof name === 'string' && name.trim()) {
|
||||
updates.push(`name = $${p++}`)
|
||||
params.push(name.trim())
|
||||
}
|
||||
if (typeof is_active === 'boolean') {
|
||||
updates.push(`is_active = $${p++}`)
|
||||
params.push(is_active)
|
||||
}
|
||||
if (typeof password === 'string') {
|
||||
if (password.length < 12) {
|
||||
return NextResponse.json({ error: 'password must be at least 12 characters' }, { status: 400 })
|
||||
}
|
||||
const hash = await hashPassword(password)
|
||||
updates.push(`password_hash = $${p++}`)
|
||||
params.push(hash)
|
||||
}
|
||||
|
||||
if (updates.length === 0) {
|
||||
return NextResponse.json({ error: 'no fields to update' }, { status: 400 })
|
||||
}
|
||||
|
||||
updates.push(`updated_at = NOW()`)
|
||||
params.push(id)
|
||||
|
||||
const { rows } = await pool.query(
|
||||
`UPDATE pitch_admins SET ${updates.join(', ')}
|
||||
WHERE id = $${p}
|
||||
RETURNING id, email, name, is_active, last_login_at, created_at`,
|
||||
params,
|
||||
)
|
||||
|
||||
// If deactivated or password changed, revoke their sessions
|
||||
if (is_active === false || typeof password === 'string') {
|
||||
await revokeAllAdminSessions(id)
|
||||
}
|
||||
|
||||
const action = is_active === false ? 'admin_deactivated' : 'admin_edited'
|
||||
await logAdminAudit(
|
||||
actorAdminId,
|
||||
action,
|
||||
{
|
||||
target_admin_id: id,
|
||||
target_email: before.rows[0].email,
|
||||
before: before.rows[0],
|
||||
after: { name: rows[0].name, is_active: rows[0].is_active },
|
||||
password_changed: typeof password === 'string',
|
||||
},
|
||||
request,
|
||||
)
|
||||
|
||||
return NextResponse.json({ admin: rows[0] })
|
||||
}
|
||||
52
pitch-deck/app/api/admin/admins/route.ts
Normal file
52
pitch-deck/app/api/admin/admins/route.ts
Normal file
@@ -0,0 +1,52 @@
|
||||
import { NextRequest, NextResponse } from 'next/server'
|
||||
import pool from '@/lib/db'
|
||||
import { requireAdmin, logAdminAudit, hashPassword } from '@/lib/admin-auth'
|
||||
|
||||
export async function GET(request: NextRequest) {
|
||||
const guard = await requireAdmin(request)
|
||||
if (guard.kind === 'response') return guard.response
|
||||
|
||||
const { rows } = await pool.query(
|
||||
`SELECT id, email, name, is_active, last_login_at, created_at, updated_at
|
||||
FROM pitch_admins ORDER BY created_at ASC`,
|
||||
)
|
||||
return NextResponse.json({ admins: rows })
|
||||
}
|
||||
|
||||
export async function POST(request: NextRequest) {
|
||||
const guard = await requireAdmin(request)
|
||||
if (guard.kind === 'response') return guard.response
|
||||
const adminId = guard.kind === 'admin' ? guard.admin.id : null
|
||||
|
||||
const body = await request.json().catch(() => ({}))
|
||||
const email = (body.email || '').trim().toLowerCase()
|
||||
const name = (body.name || '').trim()
|
||||
const password = body.password || ''
|
||||
|
||||
if (!email || !name || !password) {
|
||||
return NextResponse.json({ error: 'email, name, password required' }, { status: 400 })
|
||||
}
|
||||
if (password.length < 12) {
|
||||
return NextResponse.json({ error: 'password must be at least 12 characters' }, { status: 400 })
|
||||
}
|
||||
|
||||
const hash = await hashPassword(password)
|
||||
|
||||
try {
|
||||
const { rows } = await pool.query(
|
||||
`INSERT INTO pitch_admins (email, name, password_hash, is_active)
|
||||
VALUES ($1, $2, $3, true)
|
||||
RETURNING id, email, name, is_active, created_at`,
|
||||
[email, name, hash],
|
||||
)
|
||||
const newAdmin = rows[0]
|
||||
await logAdminAudit(adminId, 'admin_created', { email, name, new_admin_id: newAdmin.id }, request)
|
||||
return NextResponse.json({ admin: newAdmin })
|
||||
} catch (err) {
|
||||
const e = err as { code?: string }
|
||||
if (e.code === '23505') {
|
||||
return NextResponse.json({ error: 'Email already exists' }, { status: 409 })
|
||||
}
|
||||
throw err
|
||||
}
|
||||
}
|
||||
77
pitch-deck/app/api/admin/audit-logs/route.ts
Normal file
77
pitch-deck/app/api/admin/audit-logs/route.ts
Normal file
@@ -0,0 +1,77 @@
|
||||
import { NextRequest, NextResponse } from 'next/server'
|
||||
import pool from '@/lib/db'
|
||||
import { requireAdmin } from '@/lib/admin-auth'
|
||||
|
||||
export async function GET(request: NextRequest) {
|
||||
const guard = await requireAdmin(request)
|
||||
if (guard.kind === 'response') return guard.response
|
||||
|
||||
const { searchParams } = new URL(request.url)
|
||||
const investorId = searchParams.get('investor_id')
|
||||
const targetInvestorId = searchParams.get('target_investor_id')
|
||||
const adminId = searchParams.get('admin_id')
|
||||
const actorType = searchParams.get('actor_type') // 'admin' | 'investor'
|
||||
const action = searchParams.get('action')
|
||||
const since = searchParams.get('since') // ISO date
|
||||
const until = searchParams.get('until')
|
||||
const limit = Math.min(parseInt(searchParams.get('limit') || '100'), 500)
|
||||
const offset = parseInt(searchParams.get('offset') || '0')
|
||||
|
||||
const conditions: string[] = []
|
||||
const params: unknown[] = []
|
||||
let p = 1
|
||||
|
||||
if (investorId) {
|
||||
conditions.push(`a.investor_id = $${p++}`)
|
||||
params.push(investorId)
|
||||
}
|
||||
if (targetInvestorId) {
|
||||
conditions.push(`a.target_investor_id = $${p++}`)
|
||||
params.push(targetInvestorId)
|
||||
}
|
||||
if (adminId) {
|
||||
conditions.push(`a.admin_id = $${p++}`)
|
||||
params.push(adminId)
|
||||
}
|
||||
if (actorType === 'admin') {
|
||||
conditions.push(`a.admin_id IS NOT NULL`)
|
||||
} else if (actorType === 'investor') {
|
||||
conditions.push(`a.investor_id IS NOT NULL`)
|
||||
}
|
||||
if (action) {
|
||||
conditions.push(`a.action = $${p++}`)
|
||||
params.push(action)
|
||||
}
|
||||
if (since) {
|
||||
conditions.push(`a.created_at >= $${p++}`)
|
||||
params.push(since)
|
||||
}
|
||||
if (until) {
|
||||
conditions.push(`a.created_at <= $${p++}`)
|
||||
params.push(until)
|
||||
}
|
||||
|
||||
const where = conditions.length > 0 ? `WHERE ${conditions.join(' AND ')}` : ''
|
||||
|
||||
const { rows } = await pool.query(
|
||||
`SELECT a.*,
|
||||
i.email AS investor_email, i.name AS investor_name,
|
||||
ti.email AS target_investor_email, ti.name AS target_investor_name,
|
||||
ad.email AS admin_email, ad.name AS admin_name
|
||||
FROM pitch_audit_logs a
|
||||
LEFT JOIN pitch_investors i ON i.id = a.investor_id
|
||||
LEFT JOIN pitch_investors ti ON ti.id = a.target_investor_id
|
||||
LEFT JOIN pitch_admins ad ON ad.id = a.admin_id
|
||||
${where}
|
||||
ORDER BY a.created_at DESC
|
||||
LIMIT $${p++} OFFSET $${p++}`,
|
||||
[...params, limit, offset],
|
||||
)
|
||||
|
||||
const totalRes = await pool.query(
|
||||
`SELECT COUNT(*)::int AS total FROM pitch_audit_logs a ${where}`,
|
||||
params,
|
||||
)
|
||||
|
||||
return NextResponse.json({ logs: rows, total: totalRes.rows[0].total })
|
||||
}
|
||||
46
pitch-deck/app/api/admin/dashboard/route.ts
Normal file
46
pitch-deck/app/api/admin/dashboard/route.ts
Normal file
@@ -0,0 +1,46 @@
|
||||
import { NextRequest, NextResponse } from 'next/server'
|
||||
import pool from '@/lib/db'
|
||||
import { requireAdmin } from '@/lib/admin-auth'
|
||||
|
||||
export async function GET(request: NextRequest) {
|
||||
const guard = await requireAdmin(request)
|
||||
if (guard.kind === 'response') return guard.response
|
||||
|
||||
const [totals, recentLogins, recentActivity] = await Promise.all([
|
||||
pool.query(`
|
||||
SELECT
|
||||
(SELECT COUNT(*)::int FROM pitch_investors) AS total_investors,
|
||||
(SELECT COUNT(*)::int FROM pitch_investors WHERE status = 'invited') AS pending_invites,
|
||||
(SELECT COUNT(*)::int FROM pitch_investors WHERE last_login_at >= NOW() - INTERVAL '7 days') AS active_7d,
|
||||
(SELECT COUNT(*)::int FROM pitch_audit_logs WHERE action = 'slide_viewed') AS slides_viewed_total,
|
||||
(SELECT COUNT(*)::int FROM pitch_sessions WHERE revoked = false AND expires_at > NOW()) AS active_sessions,
|
||||
(SELECT COUNT(*)::int FROM pitch_admins WHERE is_active = true) AS active_admins
|
||||
`),
|
||||
pool.query(`
|
||||
SELECT a.created_at, a.ip_address, i.id AS investor_id, i.email, i.name, i.company
|
||||
FROM pitch_audit_logs a
|
||||
JOIN pitch_investors i ON i.id = a.investor_id
|
||||
WHERE a.action = 'login_success'
|
||||
ORDER BY a.created_at DESC
|
||||
LIMIT 10
|
||||
`),
|
||||
pool.query(`
|
||||
SELECT a.id, a.action, a.created_at, a.details,
|
||||
i.email AS investor_email, i.name AS investor_name,
|
||||
ti.email AS target_investor_email,
|
||||
ad.email AS admin_email, ad.name AS admin_name
|
||||
FROM pitch_audit_logs a
|
||||
LEFT JOIN pitch_investors i ON i.id = a.investor_id
|
||||
LEFT JOIN pitch_investors ti ON ti.id = a.target_investor_id
|
||||
LEFT JOIN pitch_admins ad ON ad.id = a.admin_id
|
||||
ORDER BY a.created_at DESC
|
||||
LIMIT 15
|
||||
`),
|
||||
])
|
||||
|
||||
return NextResponse.json({
|
||||
totals: totals.rows[0],
|
||||
recent_logins: recentLogins.rows,
|
||||
recent_activity: recentActivity.rows,
|
||||
})
|
||||
}
|
||||
93
pitch-deck/app/api/admin/fm/assumptions/[id]/route.ts
Normal file
93
pitch-deck/app/api/admin/fm/assumptions/[id]/route.ts
Normal file
@@ -0,0 +1,93 @@
|
||||
import { NextRequest, NextResponse } from 'next/server'
|
||||
import pool from '@/lib/db'
|
||||
import { requireAdmin, logAdminAudit } from '@/lib/admin-auth'
|
||||
|
||||
interface RouteContext {
|
||||
params: Promise<{ id: string }>
|
||||
}
|
||||
|
||||
export async function PATCH(request: NextRequest, ctx: RouteContext) {
|
||||
const guard = await requireAdmin(request)
|
||||
if (guard.kind === 'response') return guard.response
|
||||
const adminId = guard.kind === 'admin' ? guard.admin.id : null
|
||||
|
||||
const { id } = await ctx.params
|
||||
const body = await request.json().catch(() => ({}))
|
||||
const { value, min_value, max_value, step_size, label_de, label_en } = body
|
||||
|
||||
const before = await pool.query(
|
||||
`SELECT scenario_id, key, label_de, label_en, value, min_value, max_value, step_size
|
||||
FROM pitch_fm_assumptions WHERE id = $1`,
|
||||
[id],
|
||||
)
|
||||
if (before.rows.length === 0) {
|
||||
return NextResponse.json({ error: 'Assumption not found' }, { status: 404 })
|
||||
}
|
||||
|
||||
const updates: string[] = []
|
||||
const params: unknown[] = []
|
||||
let p = 1
|
||||
|
||||
if (value !== undefined) {
|
||||
updates.push(`value = $${p++}`)
|
||||
params.push(JSON.stringify(value))
|
||||
}
|
||||
if (min_value !== undefined) {
|
||||
updates.push(`min_value = $${p++}`)
|
||||
params.push(min_value)
|
||||
}
|
||||
if (max_value !== undefined) {
|
||||
updates.push(`max_value = $${p++}`)
|
||||
params.push(max_value)
|
||||
}
|
||||
if (step_size !== undefined) {
|
||||
updates.push(`step_size = $${p++}`)
|
||||
params.push(step_size)
|
||||
}
|
||||
if (typeof label_de === 'string') {
|
||||
updates.push(`label_de = $${p++}`)
|
||||
params.push(label_de)
|
||||
}
|
||||
if (typeof label_en === 'string') {
|
||||
updates.push(`label_en = $${p++}`)
|
||||
params.push(label_en)
|
||||
}
|
||||
|
||||
if (updates.length === 0) {
|
||||
return NextResponse.json({ error: 'no fields to update' }, { status: 400 })
|
||||
}
|
||||
|
||||
params.push(id)
|
||||
const { rows } = await pool.query(
|
||||
`UPDATE pitch_fm_assumptions SET ${updates.join(', ')} WHERE id = $${p} RETURNING *`,
|
||||
params,
|
||||
)
|
||||
|
||||
// Invalidate cached results for this scenario so the next compute uses the new value
|
||||
await pool.query(`DELETE FROM pitch_fm_results WHERE scenario_id = $1`, [before.rows[0].scenario_id])
|
||||
|
||||
await logAdminAudit(
|
||||
adminId,
|
||||
'assumption_edited',
|
||||
{
|
||||
assumption_id: id,
|
||||
scenario_id: before.rows[0].scenario_id,
|
||||
key: before.rows[0].key,
|
||||
before: {
|
||||
value: typeof before.rows[0].value === 'string' ? JSON.parse(before.rows[0].value) : before.rows[0].value,
|
||||
min_value: before.rows[0].min_value,
|
||||
max_value: before.rows[0].max_value,
|
||||
step_size: before.rows[0].step_size,
|
||||
},
|
||||
after: {
|
||||
value: typeof rows[0].value === 'string' ? JSON.parse(rows[0].value) : rows[0].value,
|
||||
min_value: rows[0].min_value,
|
||||
max_value: rows[0].max_value,
|
||||
step_size: rows[0].step_size,
|
||||
},
|
||||
},
|
||||
request,
|
||||
)
|
||||
|
||||
return NextResponse.json({ assumption: rows[0] })
|
||||
}
|
||||
52
pitch-deck/app/api/admin/fm/scenarios/[id]/route.ts
Normal file
52
pitch-deck/app/api/admin/fm/scenarios/[id]/route.ts
Normal file
@@ -0,0 +1,52 @@
|
||||
import { NextRequest, NextResponse } from 'next/server'
|
||||
import pool from '@/lib/db'
|
||||
import { requireAdmin, logAdminAudit } from '@/lib/admin-auth'
|
||||
|
||||
interface RouteContext {
|
||||
params: Promise<{ id: string }>
|
||||
}
|
||||
|
||||
export async function PATCH(request: NextRequest, ctx: RouteContext) {
|
||||
const guard = await requireAdmin(request)
|
||||
if (guard.kind === 'response') return guard.response
|
||||
const adminId = guard.kind === 'admin' ? guard.admin.id : null
|
||||
|
||||
const { id } = await ctx.params
|
||||
const body = await request.json().catch(() => ({}))
|
||||
const { name, description, color } = body
|
||||
|
||||
if (name === undefined && description === undefined && color === undefined) {
|
||||
return NextResponse.json({ error: 'name, description, or color required' }, { status: 400 })
|
||||
}
|
||||
|
||||
const before = await pool.query(
|
||||
`SELECT name, description, color FROM pitch_fm_scenarios WHERE id = $1`,
|
||||
[id],
|
||||
)
|
||||
if (before.rows.length === 0) {
|
||||
return NextResponse.json({ error: 'Scenario not found' }, { status: 404 })
|
||||
}
|
||||
|
||||
const { rows } = await pool.query(
|
||||
`UPDATE pitch_fm_scenarios SET
|
||||
name = COALESCE($1, name),
|
||||
description = COALESCE($2, description),
|
||||
color = COALESCE($3, color)
|
||||
WHERE id = $4
|
||||
RETURNING *`,
|
||||
[name ?? null, description ?? null, color ?? null, id],
|
||||
)
|
||||
|
||||
await logAdminAudit(
|
||||
adminId,
|
||||
'scenario_edited',
|
||||
{
|
||||
scenario_id: id,
|
||||
before: before.rows[0],
|
||||
after: { name: rows[0].name, description: rows[0].description, color: rows[0].color },
|
||||
},
|
||||
request,
|
||||
)
|
||||
|
||||
return NextResponse.json({ scenario: rows[0] })
|
||||
}
|
||||
27
pitch-deck/app/api/admin/fm/scenarios/route.ts
Normal file
27
pitch-deck/app/api/admin/fm/scenarios/route.ts
Normal file
@@ -0,0 +1,27 @@
|
||||
import { NextRequest, NextResponse } from 'next/server'
|
||||
import pool from '@/lib/db'
|
||||
import { requireAdmin } from '@/lib/admin-auth'
|
||||
|
||||
export async function GET(request: NextRequest) {
|
||||
const guard = await requireAdmin(request)
|
||||
if (guard.kind === 'response') return guard.response
|
||||
|
||||
const scenarios = await pool.query(
|
||||
`SELECT * FROM pitch_fm_scenarios ORDER BY is_default DESC, name`,
|
||||
)
|
||||
const assumptions = await pool.query(
|
||||
`SELECT * FROM pitch_fm_assumptions ORDER BY scenario_id, sort_order`,
|
||||
)
|
||||
|
||||
const result = scenarios.rows.map(s => ({
|
||||
...s,
|
||||
assumptions: assumptions.rows
|
||||
.filter(a => a.scenario_id === s.id)
|
||||
.map(a => ({
|
||||
...a,
|
||||
value: typeof a.value === 'string' ? JSON.parse(a.value) : a.value,
|
||||
})),
|
||||
}))
|
||||
|
||||
return NextResponse.json({ scenarios: result })
|
||||
}
|
||||
63
pitch-deck/app/api/admin/import-fp/route.ts
Normal file
63
pitch-deck/app/api/admin/import-fp/route.ts
Normal file
@@ -0,0 +1,63 @@
|
||||
import { NextRequest, NextResponse } from 'next/server'
|
||||
import { requireAdmin } from '@/lib/admin-auth'
|
||||
import pool from '@/lib/db'
|
||||
|
||||
// POST: Import finanzplan data (all fp_* tables) from JSON dump
|
||||
export async function POST(request: NextRequest) {
|
||||
const guard = await requireAdmin(request)
|
||||
if (guard.kind === 'response') return guard.response
|
||||
|
||||
try {
|
||||
const data = await request.json()
|
||||
const results: string[] = []
|
||||
const client = await pool.connect()
|
||||
|
||||
try {
|
||||
await client.query('BEGIN')
|
||||
|
||||
const tables = [
|
||||
'fp_scenarios', 'fp_kunden', 'fp_kunden_summary', 'fp_umsatzerloese',
|
||||
'fp_materialaufwand', 'fp_personalkosten', 'fp_betriebliche_aufwendungen',
|
||||
'fp_investitionen', 'fp_sonst_ertraege', 'fp_liquiditaet', 'fp_guv',
|
||||
]
|
||||
|
||||
for (const table of tables) {
|
||||
const rows = data[table]
|
||||
if (!rows || !Array.isArray(rows) || rows.length === 0) {
|
||||
results.push(`SKIP: ${table} (no data)`)
|
||||
continue
|
||||
}
|
||||
|
||||
// Clear existing data
|
||||
await client.query(`DELETE FROM ${table}`)
|
||||
|
||||
// Insert rows
|
||||
const cols = Object.keys(rows[0])
|
||||
const colNames = cols.join(', ')
|
||||
|
||||
for (const row of rows) {
|
||||
const values = cols.map(c => {
|
||||
const v = row[c]
|
||||
if (v === null || v === undefined) return null
|
||||
if (typeof v === 'object') return JSON.stringify(v)
|
||||
return v
|
||||
})
|
||||
const placeholders = values.map((_, i) => `$${i + 1}`).join(', ')
|
||||
await client.query(`INSERT INTO ${table} (${colNames}) VALUES (${placeholders})`, values)
|
||||
}
|
||||
|
||||
results.push(`OK: ${table} — ${rows.length} rows`)
|
||||
}
|
||||
|
||||
await client.query('COMMIT')
|
||||
return NextResponse.json({ success: true, results })
|
||||
} catch (err) {
|
||||
await client.query('ROLLBACK')
|
||||
throw err
|
||||
} finally {
|
||||
client.release()
|
||||
}
|
||||
} catch (error) {
|
||||
return NextResponse.json({ error: String(error) }, { status: 500 })
|
||||
}
|
||||
}
|
||||
60
pitch-deck/app/api/admin/investors/[id]/resend/route.ts
Normal file
60
pitch-deck/app/api/admin/investors/[id]/resend/route.ts
Normal file
@@ -0,0 +1,60 @@
|
||||
import { NextRequest, NextResponse } from 'next/server'
|
||||
import pool from '@/lib/db'
|
||||
import { generateToken } from '@/lib/auth'
|
||||
import { requireAdmin, logAdminAudit } from '@/lib/admin-auth'
|
||||
import { sendMagicLinkEmail } from '@/lib/email'
|
||||
import { checkRateLimit, RATE_LIMITS } from '@/lib/rate-limit'
|
||||
|
||||
interface RouteContext {
|
||||
params: Promise<{ id: string }>
|
||||
}
|
||||
|
||||
export async function POST(request: NextRequest, ctx: RouteContext) {
|
||||
const guard = await requireAdmin(request)
|
||||
if (guard.kind === 'response') return guard.response
|
||||
const adminId = guard.kind === 'admin' ? guard.admin.id : null
|
||||
|
||||
const { id } = await ctx.params
|
||||
|
||||
const { rows } = await pool.query(
|
||||
`SELECT id, email, name, status FROM pitch_investors WHERE id = $1`,
|
||||
[id],
|
||||
)
|
||||
if (rows.length === 0) {
|
||||
return NextResponse.json({ error: 'Investor not found' }, { status: 404 })
|
||||
}
|
||||
|
||||
const investor = rows[0]
|
||||
if (investor.status === 'revoked') {
|
||||
return NextResponse.json({ error: 'Investor is revoked. Reactivate first by re-inviting.' }, { status: 400 })
|
||||
}
|
||||
|
||||
// Rate limit by email
|
||||
const rl = checkRateLimit(`magic-link:${investor.email}`, RATE_LIMITS.magicLink)
|
||||
if (!rl.allowed) {
|
||||
return NextResponse.json({ error: 'Too many resends for this email. Try again later.' }, { status: 429 })
|
||||
}
|
||||
|
||||
const token = generateToken()
|
||||
const ttlHours = parseInt(process.env.MAGIC_LINK_TTL_HOURS || '72')
|
||||
const expiresAt = new Date(Date.now() + ttlHours * 60 * 60 * 1000)
|
||||
|
||||
await pool.query(
|
||||
`INSERT INTO pitch_magic_links (investor_id, token, expires_at) VALUES ($1, $2, $3)`,
|
||||
[investor.id, token, expiresAt],
|
||||
)
|
||||
|
||||
const baseUrl = process.env.PITCH_BASE_URL || 'https://pitch.breakpilot.ai'
|
||||
const magicLinkUrl = `${baseUrl}/auth/verify?token=${token}`
|
||||
await sendMagicLinkEmail(investor.email, investor.name, magicLinkUrl)
|
||||
|
||||
await logAdminAudit(
|
||||
adminId,
|
||||
'magic_link_resent',
|
||||
{ email: investor.email, expires_at: expiresAt.toISOString() },
|
||||
request,
|
||||
investor.id,
|
||||
)
|
||||
|
||||
return NextResponse.json({ success: true, expires_at: expiresAt.toISOString() })
|
||||
}
|
||||
125
pitch-deck/app/api/admin/investors/[id]/route.ts
Normal file
125
pitch-deck/app/api/admin/investors/[id]/route.ts
Normal file
@@ -0,0 +1,125 @@
|
||||
import { NextRequest, NextResponse } from 'next/server'
|
||||
import pool from '@/lib/db'
|
||||
import { requireAdmin, logAdminAudit } from '@/lib/admin-auth'
|
||||
|
||||
interface RouteContext {
|
||||
params: Promise<{ id: string }>
|
||||
}
|
||||
|
||||
export async function GET(request: NextRequest, ctx: RouteContext) {
|
||||
const guard = await requireAdmin(request)
|
||||
if (guard.kind === 'response') return guard.response
|
||||
|
||||
const { id } = await ctx.params
|
||||
|
||||
const [investor, sessions, snapshots, audit] = await Promise.all([
|
||||
pool.query(
|
||||
`SELECT i.id, i.email, i.name, i.company, i.status, i.last_login_at, i.login_count,
|
||||
i.created_at, i.updated_at, i.assigned_version_id,
|
||||
v.name AS version_name, v.status AS version_status
|
||||
FROM pitch_investors i
|
||||
LEFT JOIN pitch_versions v ON v.id = i.assigned_version_id
|
||||
WHERE i.id = $1`,
|
||||
[id],
|
||||
),
|
||||
pool.query(
|
||||
`SELECT id, ip_address, user_agent, expires_at, revoked, created_at
|
||||
FROM pitch_sessions WHERE investor_id = $1
|
||||
ORDER BY created_at DESC LIMIT 50`,
|
||||
[id],
|
||||
),
|
||||
pool.query(
|
||||
`SELECT id, scenario_id, label, is_latest, created_at
|
||||
FROM pitch_investor_snapshots WHERE investor_id = $1
|
||||
ORDER BY created_at DESC LIMIT 50`,
|
||||
[id],
|
||||
),
|
||||
pool.query(
|
||||
`SELECT a.id, a.action, a.created_at, a.details, a.ip_address, a.slide_id,
|
||||
ad.email AS admin_email, ad.name AS admin_name
|
||||
FROM pitch_audit_logs a
|
||||
LEFT JOIN pitch_admins ad ON ad.id = a.admin_id
|
||||
WHERE a.investor_id = $1 OR a.target_investor_id = $1
|
||||
ORDER BY a.created_at DESC LIMIT 100`,
|
||||
[id],
|
||||
),
|
||||
])
|
||||
|
||||
if (investor.rows.length === 0) {
|
||||
return NextResponse.json({ error: 'Investor not found' }, { status: 404 })
|
||||
}
|
||||
|
||||
return NextResponse.json({
|
||||
investor: investor.rows[0],
|
||||
sessions: sessions.rows,
|
||||
snapshots: snapshots.rows,
|
||||
audit: audit.rows,
|
||||
})
|
||||
}
|
||||
|
||||
export async function PATCH(request: NextRequest, ctx: RouteContext) {
|
||||
const guard = await requireAdmin(request)
|
||||
if (guard.kind === 'response') return guard.response
|
||||
const adminId = guard.kind === 'admin' ? guard.admin.id : null
|
||||
|
||||
const { id } = await ctx.params
|
||||
const body = await request.json().catch(() => ({}))
|
||||
const { name, company, assigned_version_id } = body
|
||||
|
||||
if (name === undefined && company === undefined && assigned_version_id === undefined) {
|
||||
return NextResponse.json({ error: 'name, company, or assigned_version_id required' }, { status: 400 })
|
||||
}
|
||||
|
||||
const before = await pool.query(
|
||||
`SELECT name, company, assigned_version_id FROM pitch_investors WHERE id = $1`,
|
||||
[id],
|
||||
)
|
||||
if (before.rows.length === 0) {
|
||||
return NextResponse.json({ error: 'Investor not found' }, { status: 404 })
|
||||
}
|
||||
|
||||
// Validate version exists and is committed (if assigning)
|
||||
if (assigned_version_id !== undefined && assigned_version_id !== null) {
|
||||
const ver = await pool.query(
|
||||
`SELECT id, status FROM pitch_versions WHERE id = $1`,
|
||||
[assigned_version_id],
|
||||
)
|
||||
if (ver.rows.length === 0) {
|
||||
return NextResponse.json({ error: 'Version not found' }, { status: 404 })
|
||||
}
|
||||
if (ver.rows[0].status !== 'committed') {
|
||||
return NextResponse.json({ error: 'Can only assign committed versions' }, { status: 400 })
|
||||
}
|
||||
}
|
||||
|
||||
// Use null to clear version assignment, undefined to leave unchanged
|
||||
const versionValue = assigned_version_id === undefined ? before.rows[0].assigned_version_id : (assigned_version_id || null)
|
||||
|
||||
const { rows } = await pool.query(
|
||||
`UPDATE pitch_investors SET
|
||||
name = COALESCE($1, name),
|
||||
company = COALESCE($2, company),
|
||||
assigned_version_id = $4,
|
||||
updated_at = NOW()
|
||||
WHERE id = $3
|
||||
RETURNING id, email, name, company, status, assigned_version_id`,
|
||||
[name ?? null, company ?? null, id, versionValue],
|
||||
)
|
||||
|
||||
const action = assigned_version_id !== undefined && assigned_version_id !== before.rows[0].assigned_version_id
|
||||
? 'investor_version_assigned'
|
||||
: 'investor_edited'
|
||||
|
||||
await logAdminAudit(
|
||||
adminId,
|
||||
action,
|
||||
{
|
||||
before: before.rows[0],
|
||||
after: { name: rows[0].name, company: rows[0].company, assigned_version_id: rows[0].assigned_version_id },
|
||||
},
|
||||
request,
|
||||
id,
|
||||
)
|
||||
|
||||
return NextResponse.json({ investor: rows[0] })
|
||||
}
|
||||
20
pitch-deck/app/api/admin/investors/route.ts
Normal file
20
pitch-deck/app/api/admin/investors/route.ts
Normal file
@@ -0,0 +1,20 @@
|
||||
import { NextRequest, NextResponse } from 'next/server'
|
||||
import pool from '@/lib/db'
|
||||
import { requireAdmin } from '@/lib/admin-auth'
|
||||
|
||||
export async function GET(request: NextRequest) {
|
||||
const guard = await requireAdmin(request)
|
||||
if (guard.kind === 'response') return guard.response
|
||||
|
||||
const { rows } = await pool.query(
|
||||
`SELECT i.id, i.email, i.name, i.company, i.status, i.last_login_at, i.login_count, i.created_at,
|
||||
i.assigned_version_id, v.name AS version_name,
|
||||
(SELECT COUNT(*) FROM pitch_audit_logs a WHERE a.investor_id = i.id AND a.action = 'slide_viewed') as slides_viewed,
|
||||
(SELECT MAX(a.created_at) FROM pitch_audit_logs a WHERE a.investor_id = i.id) as last_activity
|
||||
FROM pitch_investors i
|
||||
LEFT JOIN pitch_versions v ON v.id = i.assigned_version_id
|
||||
ORDER BY i.created_at DESC`,
|
||||
)
|
||||
|
||||
return NextResponse.json({ investors: rows })
|
||||
}
|
||||
73
pitch-deck/app/api/admin/invite/route.ts
Normal file
73
pitch-deck/app/api/admin/invite/route.ts
Normal file
@@ -0,0 +1,73 @@
|
||||
import { NextRequest, NextResponse } from 'next/server'
|
||||
import pool from '@/lib/db'
|
||||
import { generateToken } from '@/lib/auth'
|
||||
import { requireAdmin, logAdminAudit } from '@/lib/admin-auth'
|
||||
import { sendMagicLinkEmail } from '@/lib/email'
|
||||
import { checkRateLimit, RATE_LIMITS } from '@/lib/rate-limit'
|
||||
|
||||
export async function POST(request: NextRequest) {
|
||||
const guard = await requireAdmin(request)
|
||||
if (guard.kind === 'response') return guard.response
|
||||
const adminId = guard.kind === 'admin' ? guard.admin.id : null
|
||||
|
||||
const body = await request.json().catch(() => ({}))
|
||||
const { email, name, company, greeting, message, closing } = body
|
||||
|
||||
if (!email || typeof email !== 'string') {
|
||||
return NextResponse.json({ error: 'Email required' }, { status: 400 })
|
||||
}
|
||||
|
||||
// Rate limit by email (3/hour)
|
||||
const rl = checkRateLimit(`magic-link:${email.toLowerCase()}`, RATE_LIMITS.magicLink)
|
||||
if (!rl.allowed) {
|
||||
return NextResponse.json({ error: 'Too many invites for this email. Try again later.' }, { status: 429 })
|
||||
}
|
||||
|
||||
const normalizedEmail = email.toLowerCase().trim()
|
||||
|
||||
// Upsert investor
|
||||
const { rows } = await pool.query(
|
||||
`INSERT INTO pitch_investors (email, name, company)
|
||||
VALUES ($1, $2, $3)
|
||||
ON CONFLICT (email) DO UPDATE SET
|
||||
name = COALESCE(EXCLUDED.name, pitch_investors.name),
|
||||
company = COALESCE(EXCLUDED.company, pitch_investors.company),
|
||||
status = CASE WHEN pitch_investors.status = 'revoked' THEN 'invited' ELSE pitch_investors.status END,
|
||||
updated_at = NOW()
|
||||
RETURNING id, status`,
|
||||
[normalizedEmail, name || null, company || null],
|
||||
)
|
||||
|
||||
const investor = rows[0]
|
||||
|
||||
// Generate magic link
|
||||
const token = generateToken()
|
||||
const ttlHours = parseInt(process.env.MAGIC_LINK_TTL_HOURS || '72')
|
||||
const expiresAt = new Date(Date.now() + ttlHours * 60 * 60 * 1000)
|
||||
|
||||
await pool.query(
|
||||
`INSERT INTO pitch_magic_links (investor_id, token, expires_at)
|
||||
VALUES ($1, $2, $3)`,
|
||||
[investor.id, token, expiresAt],
|
||||
)
|
||||
|
||||
const baseUrl = process.env.PITCH_BASE_URL || 'https://pitch.breakpilot.ai'
|
||||
const magicLinkUrl = `${baseUrl}/auth/verify?token=${token}`
|
||||
|
||||
await sendMagicLinkEmail(normalizedEmail, name || null, magicLinkUrl, greeting, message, closing)
|
||||
|
||||
await logAdminAudit(
|
||||
adminId,
|
||||
'investor_invited',
|
||||
{ email: normalizedEmail, name: name || null, company: company || null, expires_at: expiresAt.toISOString() },
|
||||
request,
|
||||
investor.id,
|
||||
)
|
||||
|
||||
return NextResponse.json({
|
||||
success: true,
|
||||
investor_id: investor.id,
|
||||
email: normalizedEmail,
|
||||
expires_at: expiresAt.toISOString(),
|
||||
})
|
||||
}
|
||||
128
pitch-deck/app/api/admin/migrate/route.ts
Normal file
128
pitch-deck/app/api/admin/migrate/route.ts
Normal file
@@ -0,0 +1,128 @@
|
||||
import { NextRequest, NextResponse } from 'next/server'
|
||||
import { requireAdmin } from '@/lib/admin-auth'
|
||||
import pool from '@/lib/db'
|
||||
|
||||
export async function POST(request: NextRequest) {
|
||||
const guard = await requireAdmin(request)
|
||||
if (guard.kind === 'response') return guard.response
|
||||
|
||||
const results: string[] = []
|
||||
|
||||
// Finanzplan tables — the ones missing on production
|
||||
const statements = [
|
||||
`CREATE TABLE IF NOT EXISTS fp_scenarios (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
name TEXT NOT NULL DEFAULT 'Base Case',
|
||||
description TEXT,
|
||||
is_default BOOLEAN DEFAULT false,
|
||||
created_at TIMESTAMPTZ DEFAULT NOW(),
|
||||
updated_at TIMESTAMPTZ DEFAULT NOW()
|
||||
)`,
|
||||
`INSERT INTO fp_scenarios (name, description, is_default)
|
||||
SELECT 'Base Case', 'Basisdaten aus Excel-Import', true
|
||||
WHERE NOT EXISTS (SELECT 1 FROM fp_scenarios WHERE is_default = true)`,
|
||||
`CREATE TABLE IF NOT EXISTS fp_kunden (
|
||||
id SERIAL PRIMARY KEY, scenario_id UUID REFERENCES fp_scenarios(id) ON DELETE CASCADE,
|
||||
segment_name TEXT NOT NULL, segment_index INT NOT NULL, row_label TEXT NOT NULL, row_index INT NOT NULL,
|
||||
percentage NUMERIC(5,3), formula_type TEXT, is_editable BOOLEAN DEFAULT false,
|
||||
values JSONB NOT NULL DEFAULT '{}', excel_row INT, sort_order INT NOT NULL,
|
||||
created_at TIMESTAMPTZ DEFAULT NOW(), updated_at TIMESTAMPTZ DEFAULT NOW()
|
||||
)`,
|
||||
`CREATE TABLE IF NOT EXISTS fp_kunden_summary (
|
||||
id SERIAL PRIMARY KEY, scenario_id UUID REFERENCES fp_scenarios(id) ON DELETE CASCADE,
|
||||
row_label TEXT NOT NULL, row_index INT NOT NULL, values JSONB NOT NULL DEFAULT '{}',
|
||||
excel_row INT, sort_order INT NOT NULL
|
||||
)`,
|
||||
`CREATE TABLE IF NOT EXISTS fp_umsatzerloese (
|
||||
id SERIAL PRIMARY KEY, scenario_id UUID REFERENCES fp_scenarios(id) ON DELETE CASCADE,
|
||||
section TEXT NOT NULL, row_label TEXT NOT NULL, row_index INT NOT NULL,
|
||||
is_editable BOOLEAN DEFAULT false, values JSONB NOT NULL DEFAULT '{}',
|
||||
excel_row INT, sort_order INT NOT NULL,
|
||||
created_at TIMESTAMPTZ DEFAULT NOW(), updated_at TIMESTAMPTZ DEFAULT NOW()
|
||||
)`,
|
||||
`CREATE TABLE IF NOT EXISTS fp_materialaufwand (
|
||||
id SERIAL PRIMARY KEY, scenario_id UUID REFERENCES fp_scenarios(id) ON DELETE CASCADE,
|
||||
section TEXT NOT NULL, row_label TEXT NOT NULL, row_index INT NOT NULL,
|
||||
is_editable BOOLEAN DEFAULT false, values JSONB NOT NULL DEFAULT '{}',
|
||||
excel_row INT, sort_order INT NOT NULL,
|
||||
created_at TIMESTAMPTZ DEFAULT NOW(), updated_at TIMESTAMPTZ DEFAULT NOW()
|
||||
)`,
|
||||
`CREATE TABLE IF NOT EXISTS fp_personalkosten (
|
||||
id SERIAL PRIMARY KEY, scenario_id UUID REFERENCES fp_scenarios(id) ON DELETE CASCADE,
|
||||
person_name TEXT NOT NULL, person_nr TEXT, position TEXT,
|
||||
start_date DATE, end_date DATE, brutto_monthly NUMERIC(10,2),
|
||||
annual_raise_pct NUMERIC(5,2) DEFAULT 3.0, ag_sozial_pct NUMERIC(5,2) DEFAULT 20.425,
|
||||
is_editable BOOLEAN DEFAULT true,
|
||||
values_brutto JSONB NOT NULL DEFAULT '{}', values_sozial JSONB NOT NULL DEFAULT '{}',
|
||||
values_total JSONB NOT NULL DEFAULT '{}',
|
||||
excel_row INT, sort_order INT NOT NULL,
|
||||
created_at TIMESTAMPTZ DEFAULT NOW(), updated_at TIMESTAMPTZ DEFAULT NOW()
|
||||
)`,
|
||||
`CREATE TABLE IF NOT EXISTS fp_betriebliche_aufwendungen (
|
||||
id SERIAL PRIMARY KEY, scenario_id UUID REFERENCES fp_scenarios(id) ON DELETE CASCADE,
|
||||
category TEXT NOT NULL, row_label TEXT NOT NULL, row_index INT NOT NULL,
|
||||
is_editable BOOLEAN DEFAULT true, is_sum_row BOOLEAN DEFAULT false, formula_desc TEXT,
|
||||
values JSONB NOT NULL DEFAULT '{}', excel_row INT, sort_order INT NOT NULL,
|
||||
created_at TIMESTAMPTZ DEFAULT NOW(), updated_at TIMESTAMPTZ DEFAULT NOW()
|
||||
)`,
|
||||
`CREATE TABLE IF NOT EXISTS fp_investitionen (
|
||||
id SERIAL PRIMARY KEY, scenario_id UUID REFERENCES fp_scenarios(id) ON DELETE CASCADE,
|
||||
item_name TEXT NOT NULL, category TEXT, purchase_amount NUMERIC(12,2) NOT NULL,
|
||||
purchase_date DATE, afa_years INT, afa_end_date DATE, is_editable BOOLEAN DEFAULT true,
|
||||
values_invest JSONB NOT NULL DEFAULT '{}', values_afa JSONB NOT NULL DEFAULT '{}',
|
||||
excel_row INT, sort_order INT NOT NULL,
|
||||
created_at TIMESTAMPTZ DEFAULT NOW(), updated_at TIMESTAMPTZ DEFAULT NOW()
|
||||
)`,
|
||||
`CREATE TABLE IF NOT EXISTS fp_sonst_ertraege (
|
||||
id SERIAL PRIMARY KEY, scenario_id UUID REFERENCES fp_scenarios(id) ON DELETE CASCADE,
|
||||
category TEXT NOT NULL, row_label TEXT, row_index INT NOT NULL,
|
||||
is_editable BOOLEAN DEFAULT true, is_sum_row BOOLEAN DEFAULT false,
|
||||
values JSONB NOT NULL DEFAULT '{}', excel_row INT, sort_order INT NOT NULL,
|
||||
created_at TIMESTAMPTZ DEFAULT NOW(), updated_at TIMESTAMPTZ DEFAULT NOW()
|
||||
)`,
|
||||
`CREATE TABLE IF NOT EXISTS fp_liquiditaet (
|
||||
id SERIAL PRIMARY KEY, scenario_id UUID REFERENCES fp_scenarios(id) ON DELETE CASCADE,
|
||||
row_label TEXT NOT NULL, row_type TEXT NOT NULL,
|
||||
is_editable BOOLEAN DEFAULT false, formula_desc TEXT,
|
||||
values JSONB NOT NULL DEFAULT '{}', excel_row INT, sort_order INT NOT NULL,
|
||||
created_at TIMESTAMPTZ DEFAULT NOW(), updated_at TIMESTAMPTZ DEFAULT NOW()
|
||||
)`,
|
||||
`CREATE TABLE IF NOT EXISTS fp_guv (
|
||||
id SERIAL PRIMARY KEY, scenario_id UUID REFERENCES fp_scenarios(id) ON DELETE CASCADE,
|
||||
row_label TEXT NOT NULL, row_index INT NOT NULL,
|
||||
is_sum_row BOOLEAN DEFAULT false, formula_desc TEXT,
|
||||
values JSONB NOT NULL DEFAULT '{}', excel_row INT, sort_order INT NOT NULL,
|
||||
created_at TIMESTAMPTZ DEFAULT NOW(), updated_at TIMESTAMPTZ DEFAULT NOW()
|
||||
)`,
|
||||
`CREATE TABLE IF NOT EXISTS fp_cell_overrides (
|
||||
id SERIAL PRIMARY KEY, scenario_id UUID REFERENCES fp_scenarios(id) ON DELETE CASCADE,
|
||||
sheet_name TEXT NOT NULL, row_id INT NOT NULL, month_key TEXT NOT NULL,
|
||||
override_value NUMERIC, created_at TIMESTAMPTZ DEFAULT NOW(),
|
||||
UNIQUE(scenario_id, sheet_name, row_id, month_key)
|
||||
)`,
|
||||
`CREATE INDEX IF NOT EXISTS idx_fp_kunden_scenario ON fp_kunden(scenario_id)`,
|
||||
`CREATE INDEX IF NOT EXISTS idx_fp_kunden_summary_scenario ON fp_kunden_summary(scenario_id)`,
|
||||
`CREATE INDEX IF NOT EXISTS idx_fp_umsatz_scenario ON fp_umsatzerloese(scenario_id)`,
|
||||
`CREATE INDEX IF NOT EXISTS idx_fp_material_scenario ON fp_materialaufwand(scenario_id)`,
|
||||
`CREATE INDEX IF NOT EXISTS idx_fp_personal_scenario ON fp_personalkosten(scenario_id)`,
|
||||
`CREATE INDEX IF NOT EXISTS idx_fp_betrieb_scenario ON fp_betriebliche_aufwendungen(scenario_id)`,
|
||||
`CREATE INDEX IF NOT EXISTS idx_fp_invest_scenario ON fp_investitionen(scenario_id)`,
|
||||
`CREATE INDEX IF NOT EXISTS idx_fp_sonst_scenario ON fp_sonst_ertraege(scenario_id)`,
|
||||
`CREATE INDEX IF NOT EXISTS idx_fp_liquid_scenario ON fp_liquiditaet(scenario_id)`,
|
||||
`CREATE INDEX IF NOT EXISTS idx_fp_guv_scenario ON fp_guv(scenario_id)`,
|
||||
`CREATE INDEX IF NOT EXISTS idx_fp_overrides_lookup ON fp_cell_overrides(scenario_id, sheet_name, row_id)`,
|
||||
]
|
||||
|
||||
for (const sql of statements) {
|
||||
try {
|
||||
await pool.query(sql)
|
||||
const label = sql.substring(0, 60).replace(/\s+/g, ' ')
|
||||
results.push(`OK: ${label}...`)
|
||||
} catch (err) {
|
||||
const msg = err instanceof Error ? err.message : String(err)
|
||||
results.push(`ERROR: ${msg}`)
|
||||
}
|
||||
}
|
||||
|
||||
return NextResponse.json({ success: true, results })
|
||||
}
|
||||
32
pitch-deck/app/api/admin/revoke/route.ts
Normal file
32
pitch-deck/app/api/admin/revoke/route.ts
Normal file
@@ -0,0 +1,32 @@
|
||||
import { NextRequest, NextResponse } from 'next/server'
|
||||
import pool from '@/lib/db'
|
||||
import { revokeAllSessions } from '@/lib/auth'
|
||||
import { requireAdmin, logAdminAudit } from '@/lib/admin-auth'
|
||||
|
||||
export async function POST(request: NextRequest) {
|
||||
const guard = await requireAdmin(request)
|
||||
if (guard.kind === 'response') return guard.response
|
||||
const adminId = guard.kind === 'admin' ? guard.admin.id : null
|
||||
|
||||
const body = await request.json().catch(() => ({}))
|
||||
const { investor_id } = body
|
||||
|
||||
if (!investor_id) {
|
||||
return NextResponse.json({ error: 'investor_id required' }, { status: 400 })
|
||||
}
|
||||
|
||||
const { rows } = await pool.query(
|
||||
`UPDATE pitch_investors SET status = 'revoked', updated_at = NOW()
|
||||
WHERE id = $1 RETURNING email`,
|
||||
[investor_id],
|
||||
)
|
||||
|
||||
if (rows.length === 0) {
|
||||
return NextResponse.json({ error: 'Investor not found' }, { status: 404 })
|
||||
}
|
||||
|
||||
await revokeAllSessions(investor_id)
|
||||
await logAdminAudit(adminId, 'investor_revoked', { email: rows[0].email }, request, investor_id)
|
||||
|
||||
return NextResponse.json({ success: true })
|
||||
}
|
||||
31
pitch-deck/app/api/admin/versions/[id]/commit/route.ts
Normal file
31
pitch-deck/app/api/admin/versions/[id]/commit/route.ts
Normal file
@@ -0,0 +1,31 @@
|
||||
import { NextRequest, NextResponse } from 'next/server'
|
||||
import pool from '@/lib/db'
|
||||
import { requireAdmin, logAdminAudit } from '@/lib/admin-auth'
|
||||
|
||||
interface Ctx { params: Promise<{ id: string }> }
|
||||
|
||||
export async function POST(request: NextRequest, ctx: Ctx) {
|
||||
const guard = await requireAdmin(request)
|
||||
if (guard.kind === 'response') return guard.response
|
||||
const adminId = guard.kind === 'admin' ? guard.admin.id : null
|
||||
|
||||
const { id } = await ctx.params
|
||||
|
||||
const ver = await pool.query(`SELECT status, name FROM pitch_versions WHERE id = $1`, [id])
|
||||
if (ver.rows.length === 0) return NextResponse.json({ error: 'Not found' }, { status: 404 })
|
||||
if (ver.rows[0].status === 'committed') {
|
||||
return NextResponse.json({ error: 'Already committed' }, { status: 400 })
|
||||
}
|
||||
|
||||
const { rows } = await pool.query(
|
||||
`UPDATE pitch_versions SET status = 'committed', committed_at = NOW() WHERE id = $1 RETURNING *`,
|
||||
[id],
|
||||
)
|
||||
|
||||
await logAdminAudit(adminId, 'version_committed', {
|
||||
version_id: id,
|
||||
name: rows[0].name,
|
||||
}, request)
|
||||
|
||||
return NextResponse.json({ version: rows[0] })
|
||||
}
|
||||
@@ -0,0 +1,73 @@
|
||||
import { NextRequest, NextResponse } from 'next/server'
|
||||
import pool from '@/lib/db'
|
||||
import { requireAdmin, logAdminAudit } from '@/lib/admin-auth'
|
||||
import { VERSION_TABLES, VersionTableName } from '@/lib/version-helpers'
|
||||
|
||||
interface Ctx { params: Promise<{ id: string; tableName: string }> }
|
||||
|
||||
export async function GET(request: NextRequest, ctx: Ctx) {
|
||||
const guard = await requireAdmin(request)
|
||||
if (guard.kind === 'response') return guard.response
|
||||
|
||||
const { id, tableName } = await ctx.params
|
||||
|
||||
if (!VERSION_TABLES.includes(tableName as VersionTableName)) {
|
||||
return NextResponse.json({ error: `Invalid table: ${tableName}` }, { status: 400 })
|
||||
}
|
||||
|
||||
const { rows } = await pool.query(
|
||||
`SELECT data, updated_at, updated_by FROM pitch_version_data
|
||||
WHERE version_id = $1 AND table_name = $2`,
|
||||
[id, tableName],
|
||||
)
|
||||
|
||||
if (rows.length === 0) {
|
||||
return NextResponse.json({ data: [], updated_at: null })
|
||||
}
|
||||
|
||||
const data = typeof rows[0].data === 'string' ? JSON.parse(rows[0].data) : rows[0].data
|
||||
return NextResponse.json({ data, updated_at: rows[0].updated_at })
|
||||
}
|
||||
|
||||
export async function PUT(request: NextRequest, ctx: Ctx) {
|
||||
const guard = await requireAdmin(request)
|
||||
if (guard.kind === 'response') return guard.response
|
||||
const adminId = guard.kind === 'admin' ? guard.admin.id : null
|
||||
|
||||
const { id, tableName } = await ctx.params
|
||||
|
||||
if (!VERSION_TABLES.includes(tableName as VersionTableName)) {
|
||||
return NextResponse.json({ error: `Invalid table: ${tableName}` }, { status: 400 })
|
||||
}
|
||||
|
||||
// Verify version is a draft
|
||||
const ver = await pool.query(`SELECT status FROM pitch_versions WHERE id = $1`, [id])
|
||||
if (ver.rows.length === 0) return NextResponse.json({ error: 'Version not found' }, { status: 404 })
|
||||
if (ver.rows[0].status === 'committed') {
|
||||
return NextResponse.json({ error: 'Cannot edit a committed version' }, { status: 400 })
|
||||
}
|
||||
|
||||
const body = await request.json().catch(() => ({}))
|
||||
const { data } = body
|
||||
if (!Array.isArray(data) && typeof data !== 'object') {
|
||||
return NextResponse.json({ error: 'data must be an array or object' }, { status: 400 })
|
||||
}
|
||||
|
||||
// Wrap single-record tables in array for consistency
|
||||
const normalizedData = Array.isArray(data) ? data : [data]
|
||||
|
||||
await pool.query(
|
||||
`INSERT INTO pitch_version_data (version_id, table_name, data, updated_by)
|
||||
VALUES ($1, $2, $3, $4)
|
||||
ON CONFLICT (version_id, table_name) DO UPDATE SET
|
||||
data = $3, updated_at = NOW(), updated_by = $4`,
|
||||
[id, tableName, JSON.stringify(normalizedData), adminId],
|
||||
)
|
||||
|
||||
await logAdminAudit(adminId, 'version_data_edited', {
|
||||
version_id: id,
|
||||
table_name: tableName,
|
||||
}, request)
|
||||
|
||||
return NextResponse.json({ success: true })
|
||||
}
|
||||
@@ -0,0 +1,39 @@
|
||||
import { NextRequest, NextResponse } from 'next/server'
|
||||
import pool from '@/lib/db'
|
||||
import { requireAdmin } from '@/lib/admin-auth'
|
||||
import { loadVersionData, VERSION_TABLES } from '@/lib/version-helpers'
|
||||
import { diffTable } from '@/lib/version-diff'
|
||||
|
||||
interface Ctx { params: Promise<{ id: string; otherId: string }> }
|
||||
|
||||
export async function GET(request: NextRequest, ctx: Ctx) {
|
||||
const guard = await requireAdmin(request)
|
||||
if (guard.kind === 'response') return guard.response
|
||||
|
||||
const { id, otherId } = await ctx.params
|
||||
|
||||
// Verify both versions exist
|
||||
const [vA, vB] = await Promise.all([
|
||||
pool.query(`SELECT id, name, status, created_at FROM pitch_versions WHERE id = $1`, [id]),
|
||||
pool.query(`SELECT id, name, status, created_at FROM pitch_versions WHERE id = $1`, [otherId]),
|
||||
])
|
||||
if (vA.rows.length === 0 || vB.rows.length === 0) {
|
||||
return NextResponse.json({ error: 'One or both versions not found' }, { status: 404 })
|
||||
}
|
||||
|
||||
const [dataA, dataB] = await Promise.all([
|
||||
loadVersionData(id),
|
||||
loadVersionData(otherId),
|
||||
])
|
||||
|
||||
const diffs = VERSION_TABLES.map(tableName =>
|
||||
diffTable(tableName, dataA[tableName] || [], dataB[tableName] || [])
|
||||
).filter(d => d.hasChanges)
|
||||
|
||||
return NextResponse.json({
|
||||
versionA: vA.rows[0],
|
||||
versionB: vB.rows[0],
|
||||
diffs,
|
||||
total_changes: diffs.reduce((sum, d) => sum + d.rows.filter(r => r.status !== 'unchanged').length, 0),
|
||||
})
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user