847 lines
31 KiB
Python
Executable File
847 lines
31 KiB
Python
Executable File
#!/usr/bin/env python3
|
|
import argparse
|
|
import dataclasses
|
|
import datetime as dt
|
|
import hashlib
|
|
import json
|
|
import pathlib
|
|
import re
|
|
import subprocess
|
|
import sys
|
|
from typing import Any, Dict, List, Optional, Tuple
|
|
|
|
|
|
ROOT = pathlib.Path(__file__).resolve().parents[1]
|
|
BASELINE_PATH = ROOT / "docs" / "voice-recording-parity-baseline.json"
|
|
|
|
CONSTANT_SPECS = [
|
|
{
|
|
"id": "hold_threshold",
|
|
"severity": "P0",
|
|
"file": "Rosetta/Features/Chats/ChatDetail/VoiceRecordingParityMath.swift",
|
|
"pattern": r"static let holdThreshold: TimeInterval = ([0-9.]+)",
|
|
"expected": "0.19",
|
|
"telegram_file": "Telegram-iOS/submodules/TelegramUI/Components/ChatTextInputMediaRecordingButton/Sources/ChatTextInputMediaRecordingButton.swift",
|
|
"telegram_pattern": r"timeout: 0\.19",
|
|
},
|
|
{
|
|
"id": "cancel_distance_threshold",
|
|
"severity": "P0",
|
|
"file": "Rosetta/Features/Chats/ChatDetail/VoiceRecordingParityMath.swift",
|
|
"pattern": r"static let cancelDistanceThreshold: CGFloat = (-?[0-9.]+)",
|
|
"expected": "-150",
|
|
"telegram_file": "Telegram-iOS/submodules/LegacyComponents/Sources/TGModernConversationInputMicButton.m",
|
|
"telegram_pattern": r"distanceX < -150\.0f",
|
|
},
|
|
{
|
|
"id": "cancel_haptic_threshold",
|
|
"severity": "P0",
|
|
"file": "Rosetta/Features/Chats/ChatDetail/VoiceRecordingParityMath.swift",
|
|
"pattern": r"static let cancelHapticThreshold: CGFloat = (-?[0-9.]+)",
|
|
"expected": "-100",
|
|
"telegram_file": "Telegram-iOS/submodules/LegacyComponents/Sources/TGModernConversationInputMicButton.m",
|
|
"telegram_pattern": r"distanceX < -100\.0",
|
|
},
|
|
{
|
|
"id": "lock_distance_threshold",
|
|
"severity": "P0",
|
|
"file": "Rosetta/Features/Chats/ChatDetail/VoiceRecordingParityMath.swift",
|
|
"pattern": r"static let lockDistanceThreshold: CGFloat = (-?[0-9.]+)",
|
|
"expected": "-110",
|
|
"telegram_file": "Telegram-iOS/submodules/LegacyComponents/Sources/TGModernConversationInputMicButton.m",
|
|
"telegram_pattern": r"distanceY < -110\.0f",
|
|
},
|
|
{
|
|
"id": "lock_haptic_threshold",
|
|
"severity": "P0",
|
|
"file": "Rosetta/Features/Chats/ChatDetail/VoiceRecordingParityMath.swift",
|
|
"pattern": r"static let lockHapticThreshold: CGFloat = (-?[0-9.]+)",
|
|
"expected": "-60",
|
|
"telegram_file": "Telegram-iOS/submodules/LegacyComponents/Sources/TGModernConversationInputMicButton.m",
|
|
"telegram_pattern": r"distanceY < -60\.0",
|
|
},
|
|
{
|
|
"id": "velocity_gate",
|
|
"severity": "P0",
|
|
"file": "Rosetta/Features/Chats/ChatDetail/VoiceRecordingParityMath.swift",
|
|
"pattern": r"static let velocityGate: CGFloat = (-?[0-9.]+)",
|
|
"expected": "-400",
|
|
"telegram_file": "Telegram-iOS/submodules/LegacyComponents/Sources/TGModernConversationInputMicButton.m",
|
|
"telegram_pattern": r"velocity\.x < -400\.0f|velocity\.y < -400\.0f",
|
|
},
|
|
{
|
|
"id": "lockness_divisor",
|
|
"severity": "P1",
|
|
"file": "Rosetta/Features/Chats/ChatDetail/VoiceRecordingParityMath.swift",
|
|
"pattern": r"static let locknessDivisor: CGFloat = ([0-9.]+)",
|
|
"expected": "105",
|
|
"telegram_file": "Telegram-iOS/submodules/LegacyComponents/Sources/TGModernConversationInputMicButton.m",
|
|
"telegram_pattern": r"fabs\(_targetTranslation\) / 105\.0f",
|
|
},
|
|
{
|
|
"id": "drag_normalize_divisor",
|
|
"severity": "P1",
|
|
"file": "Rosetta/Features/Chats/ChatDetail/VoiceRecordingParityMath.swift",
|
|
"pattern": r"static let dragNormalizeDivisor: CGFloat = ([0-9.]+)",
|
|
"expected": "300",
|
|
"telegram_file": "Telegram-iOS/submodules/LegacyComponents/Sources/TGModernConversationInputMicButton.m",
|
|
"telegram_pattern": r"\(-distanceX\) / 300\.0f|\(-distanceY\) / 300\.0f",
|
|
},
|
|
{
|
|
"id": "cancel_transform_threshold",
|
|
"severity": "P1",
|
|
"file": "Rosetta/Features/Chats/ChatDetail/VoiceRecordingParityMath.swift",
|
|
"pattern": r"static let cancelTransformThreshold: CGFloat = ([0-9.]+)",
|
|
"expected": "8",
|
|
"telegram_file": "Rosetta/Features/Chats/ChatDetail/VoiceRecordingPanel.swift",
|
|
"telegram_pattern": r"VoiceRecordingParityMath\.shouldApplyCancelTransform\(translation\)",
|
|
},
|
|
{
|
|
"id": "send_accessibility_hit_size",
|
|
"severity": "P1",
|
|
"file": "Rosetta/Features/Chats/ChatDetail/VoiceRecordingParityMath.swift",
|
|
"pattern": r"static let sendAccessibilityHitSize: CGFloat = ([0-9.]+)",
|
|
"expected": "120",
|
|
"telegram_file": "Telegram-iOS/submodules/LegacyComponents/Sources/TGModernConversationInputMicButton.m",
|
|
"telegram_pattern": r"VoiceOver\.Recording\.StopAndPreview",
|
|
},
|
|
{
|
|
"id": "min_trim_formula",
|
|
"severity": "P0",
|
|
"file": "Rosetta/Features/Chats/ChatDetail/VoiceRecordingParityMath.swift",
|
|
"pattern": r"max\(1\.0, 56\.0 \* duration / max\(waveformWidth, 1\)\)",
|
|
"expected": r"max\(1\.0, 56\.0 \* duration / max\(waveformWidth, 1\)\)",
|
|
"telegram_file": "Telegram-iOS/submodules/TelegramUI/Components/Chat/ChatRecordingPreviewInputPanelNode/Sources/ChatRecordingPreviewInputPanelNode.swift",
|
|
"telegram_pattern": r"max\(1\.0, 56\.0 \* audio\.duration / waveformBackgroundFrame\.size\.width\)",
|
|
"raw_match": True,
|
|
},
|
|
]
|
|
|
|
FLOW_SPEC = {
|
|
"severity": "P0",
|
|
"state_file": "Rosetta/Features/Chats/ChatDetail/VoiceRecordingFlowTypes.swift",
|
|
"expected_states": [
|
|
"idle",
|
|
"armed",
|
|
"recordingUnlocked",
|
|
"recordingLocked",
|
|
"waitingForPreview",
|
|
"draftPreview",
|
|
],
|
|
"required_transitions": [
|
|
{
|
|
"id": "armed",
|
|
"severity": "P1",
|
|
"file": "Rosetta/Features/Chats/ChatDetail/ComposerView.swift",
|
|
"snippet": "setRecordingFlowState(.armed)",
|
|
},
|
|
{
|
|
"id": "recordingUnlocked",
|
|
"severity": "P1",
|
|
"file": "Rosetta/Features/Chats/ChatDetail/ComposerView.swift",
|
|
"snippet": "setRecordingFlowState(.recordingUnlocked)",
|
|
},
|
|
{
|
|
"id": "recordingLocked",
|
|
"severity": "P1",
|
|
"file": "Rosetta/Features/Chats/ChatDetail/ComposerView.swift",
|
|
"snippet": "setRecordingFlowState(.recordingLocked)",
|
|
},
|
|
{
|
|
"id": "waitingForPreview",
|
|
"severity": "P1",
|
|
"file": "Rosetta/Features/Chats/ChatDetail/ComposerView.swift",
|
|
"snippet": "setRecordingFlowState(.waitingForPreview)",
|
|
},
|
|
{
|
|
"id": "draftPreview",
|
|
"severity": "P1",
|
|
"file": "Rosetta/Features/Chats/ChatDetail/ComposerView.swift",
|
|
"snippet": "setRecordingFlowState(.draftPreview)",
|
|
},
|
|
],
|
|
}
|
|
|
|
ACCESSIBILITY_SPECS = [
|
|
{
|
|
"id": "mic_button",
|
|
"severity": "P1",
|
|
"file": "Rosetta/Features/Chats/ChatDetail/ComposerView.swift",
|
|
"snippet": "micButton.accessibilityLabel = \"Voice message\"",
|
|
},
|
|
{
|
|
"id": "stop_area",
|
|
"severity": "P1",
|
|
"file": "Rosetta/Features/Chats/ChatDetail/ComposerView.swift",
|
|
"snippet": "button.accessibilityIdentifier = \"voice.recording.stopArea\"",
|
|
},
|
|
{
|
|
"id": "lock_stop",
|
|
"severity": "P1",
|
|
"file": "Rosetta/Features/Chats/ChatDetail/RecordingLockView.swift",
|
|
"snippet": "stopButton.accessibilityLabel = \"Stop recording\"",
|
|
},
|
|
{
|
|
"id": "slide_to_cancel",
|
|
"severity": "P1",
|
|
"file": "Rosetta/Features/Chats/ChatDetail/VoiceRecordingPanel.swift",
|
|
"snippet": "cancelContainer.accessibilityLabel = \"Slide left to cancel recording\"",
|
|
},
|
|
{
|
|
"id": "preview_send",
|
|
"severity": "P1",
|
|
"file": "Rosetta/Features/Chats/ChatDetail/RecordingPreviewPanel.swift",
|
|
"snippet": "sendButton.accessibilityLabel = \"Send recording\"",
|
|
},
|
|
{
|
|
"id": "preview_record_more",
|
|
"severity": "P1",
|
|
"file": "Rosetta/Features/Chats/ChatDetail/RecordingPreviewPanel.swift",
|
|
"snippet": "recordMoreButton.accessibilityLabel = \"Record more\"",
|
|
},
|
|
{
|
|
"id": "preview_delete",
|
|
"severity": "P1",
|
|
"file": "Rosetta/Features/Chats/ChatDetail/RecordingPreviewPanel.swift",
|
|
"snippet": "deleteButton.accessibilityIdentifier = \"voice.preview.delete\"",
|
|
},
|
|
{
|
|
"id": "preview_play_pause",
|
|
"severity": "P1",
|
|
"file": "Rosetta/Features/Chats/ChatDetail/RecordingPreviewPanel.swift",
|
|
"snippet": "playButton.accessibilityIdentifier = \"voice.preview.playPause\"",
|
|
},
|
|
{
|
|
"id": "preview_waveform",
|
|
"severity": "P1",
|
|
"file": "Rosetta/Features/Chats/ChatDetail/RecordingPreviewPanel.swift",
|
|
"snippet": "waveformContainer.accessibilityIdentifier = \"voice.preview.waveform\"",
|
|
},
|
|
]
|
|
|
|
GEOMETRY_SPECS = [
|
|
{
|
|
"id": "overlay_inner_diameter",
|
|
"severity": "P1",
|
|
"file": "Rosetta/Features/Chats/ChatDetail/VoiceRecordingOverlay.swift",
|
|
"pattern": r"private let innerDiameter: CGFloat = ([0-9.]+)",
|
|
"expected": "110",
|
|
"telegram_file": "Telegram-iOS/submodules/LegacyComponents/Sources/TGModernConversationInputMicButton.m",
|
|
"telegram_pattern": r"innerCircleRadius = 110\.0f",
|
|
},
|
|
{
|
|
"id": "overlay_outer_diameter",
|
|
"severity": "P1",
|
|
"file": "Rosetta/Features/Chats/ChatDetail/VoiceRecordingOverlay.swift",
|
|
"pattern": r"private let outerDiameter: CGFloat = ([0-9.]+)",
|
|
"expected": "160",
|
|
"telegram_file": "Telegram-iOS/submodules/LegacyComponents/Sources/TGModernConversationInputMicButton.m",
|
|
"telegram_pattern": r"outerCircleRadius = innerCircleRadius \+ 50\.0f",
|
|
},
|
|
{
|
|
"id": "panel_dot_x",
|
|
"severity": "P1",
|
|
"file": "Rosetta/Features/Chats/ChatDetail/VoiceRecordingPanel.swift",
|
|
"pattern": r"private let dotX: CGFloat = ([0-9.]+)",
|
|
"expected": "5",
|
|
},
|
|
{
|
|
"id": "panel_timer_x",
|
|
"severity": "P1",
|
|
"file": "Rosetta/Features/Chats/ChatDetail/VoiceRecordingPanel.swift",
|
|
"pattern": r"private let timerX: CGFloat = ([0-9.]+)",
|
|
"expected": "40",
|
|
},
|
|
{
|
|
"id": "panel_dot_size",
|
|
"severity": "P1",
|
|
"file": "Rosetta/Features/Chats/ChatDetail/VoiceRecordingPanel.swift",
|
|
"pattern": r"private let dotSize: CGFloat = ([0-9.]+)",
|
|
"expected": "10",
|
|
},
|
|
{
|
|
"id": "lock_panel_width",
|
|
"severity": "P1",
|
|
"file": "Rosetta/Features/Chats/ChatDetail/RecordingLockView.swift",
|
|
"pattern": r"private let panelWidth: CGFloat = ([0-9.]+)",
|
|
"expected": "40",
|
|
"telegram_file": "Telegram-iOS/submodules/LegacyComponents/Sources/TGModernConversationInputMicButton.m",
|
|
"telegram_pattern": r"CGRectMake\(0\.0f, 0\.0f, 40\.0f, 72\.0f\)",
|
|
},
|
|
{
|
|
"id": "lock_panel_full_height",
|
|
"severity": "P1",
|
|
"file": "Rosetta/Features/Chats/ChatDetail/RecordingLockView.swift",
|
|
"pattern": r"private let panelFullHeight: CGFloat = ([0-9.]+)",
|
|
"expected": "72",
|
|
"telegram_file": "Telegram-iOS/submodules/LegacyComponents/Sources/TGModernConversationInputMicButton.m",
|
|
"telegram_pattern": r"CGRectMake\(0\.0f, 0\.0f, 40\.0f, 72\.0f\)",
|
|
},
|
|
{
|
|
"id": "lock_panel_locked_height",
|
|
"severity": "P1",
|
|
"file": "Rosetta/Features/Chats/ChatDetail/RecordingLockView.swift",
|
|
"pattern": r"private let panelLockedHeight: CGFloat = ([0-9.]+)",
|
|
"expected": "40",
|
|
},
|
|
{
|
|
"id": "lock_vertical_offset",
|
|
"severity": "P1",
|
|
"file": "Rosetta/Features/Chats/ChatDetail/RecordingLockView.swift",
|
|
"pattern": r"private let verticalOffset: CGFloat = ([0-9.]+)",
|
|
"expected": "122",
|
|
"telegram_file": "Telegram-iOS/submodules/LegacyComponents/Sources/TGModernConversationInputMicButton.m",
|
|
"telegram_pattern": r"centerPoint\.y - 122\.0f",
|
|
},
|
|
]
|
|
|
|
ANIMATION_SPECS = [
|
|
{
|
|
"id": "preview_play_to_pause_frames",
|
|
"severity": "P1",
|
|
"file": "Rosetta/Features/Chats/ChatDetail/RecordingPreviewPanel.swift",
|
|
"snippet": "playPauseAnimationView.play(fromFrame: 0, toFrame: 41, loopMode: .playOnce)",
|
|
},
|
|
{
|
|
"id": "preview_pause_to_play_frames",
|
|
"severity": "P1",
|
|
"file": "Rosetta/Features/Chats/ChatDetail/RecordingPreviewPanel.swift",
|
|
"snippet": "playPauseAnimationView.play(fromFrame: 41, toFrame: 83, loopMode: .playOnce)",
|
|
},
|
|
{
|
|
"id": "overlay_spring_timing",
|
|
"severity": "P2",
|
|
"file": "Rosetta/Features/Chats/ChatDetail/VoiceRecordingOverlay.swift",
|
|
"snippet": "UIView.animate(withDuration: 0.5, delay: 0, usingSpringWithDamping: 0.55",
|
|
},
|
|
{
|
|
"id": "lock_stop_delay",
|
|
"severity": "P2",
|
|
"file": "Rosetta/Features/Chats/ChatDetail/RecordingLockView.swift",
|
|
"snippet": "UIView.animate(withDuration: 0.25, delay: 0.56, options: [.curveEaseOut])",
|
|
},
|
|
]
|
|
|
|
|
|
@dataclasses.dataclass
|
|
class Finding:
|
|
severity: str
|
|
kind: str
|
|
layer: str
|
|
item_id: str
|
|
expected: str
|
|
actual: str
|
|
delta: str
|
|
evidence: str
|
|
|
|
def to_dict(self) -> Dict[str, Any]:
|
|
return {
|
|
"severity": self.severity,
|
|
"kind": self.kind,
|
|
"layer": self.layer,
|
|
"id": self.item_id,
|
|
"expected": self.expected,
|
|
"actual": self.actual,
|
|
"delta": self.delta,
|
|
"evidence": self.evidence,
|
|
}
|
|
|
|
|
|
def read_text(path: pathlib.Path) -> str:
|
|
return path.read_text(encoding="utf-8")
|
|
|
|
|
|
def sha256_file(path: pathlib.Path) -> str:
|
|
h = hashlib.sha256()
|
|
with path.open("rb") as f:
|
|
while True:
|
|
chunk = f.read(1024 * 64)
|
|
if not chunk:
|
|
break
|
|
h.update(chunk)
|
|
return h.hexdigest()
|
|
|
|
|
|
def parse_image_size(path: pathlib.Path) -> Optional[Tuple[int, int]]:
|
|
if path.suffix.lower() not in {".png", ".jpg", ".jpeg"}:
|
|
return None
|
|
try:
|
|
proc = subprocess.run(
|
|
["/usr/bin/sips", "-g", "pixelWidth", "-g", "pixelHeight", str(path)],
|
|
check=False,
|
|
capture_output=True,
|
|
text=True,
|
|
)
|
|
except FileNotFoundError:
|
|
return None
|
|
if proc.returncode != 0:
|
|
return None
|
|
width = None
|
|
height = None
|
|
for line in proc.stdout.splitlines():
|
|
if "pixelWidth:" in line:
|
|
width = int(line.split(":", 1)[1].strip())
|
|
if "pixelHeight:" in line:
|
|
height = int(line.split(":", 1)[1].strip())
|
|
if width is None or height is None:
|
|
return None
|
|
return (width, height)
|
|
|
|
|
|
def parse_lottie_meta(path: pathlib.Path) -> Dict[str, Any]:
|
|
data = json.loads(path.read_text(encoding="utf-8"))
|
|
return {
|
|
"fps": data.get("fr"),
|
|
"ip": data.get("ip"),
|
|
"op": data.get("op"),
|
|
"width": data.get("w"),
|
|
"height": data.get("h"),
|
|
}
|
|
|
|
|
|
def discover_asset_manifest() -> Dict[str, Any]:
|
|
imagesets: List[Dict[str, Any]] = []
|
|
assets_root = ROOT / "Rosetta" / "Assets.xcassets"
|
|
for imageset in sorted(assets_root.glob("VoiceRecording*.imageset")):
|
|
files = []
|
|
for file_path in sorted(imageset.iterdir()):
|
|
if file_path.name == "Contents.json":
|
|
continue
|
|
file_info: Dict[str, Any] = {
|
|
"name": file_path.name,
|
|
"sha256": sha256_file(file_path),
|
|
}
|
|
size = parse_image_size(file_path)
|
|
if size is not None:
|
|
file_info["size"] = {"width": size[0], "height": size[1]}
|
|
files.append(file_info)
|
|
imagesets.append(
|
|
{
|
|
"id": imageset.name.replace(".imageset", ""),
|
|
"severity": "P1",
|
|
"path": str(imageset.relative_to(ROOT)),
|
|
"files": files,
|
|
}
|
|
)
|
|
|
|
lottie: List[Dict[str, Any]] = []
|
|
lottie_root = ROOT / "Rosetta" / "Resources" / "Lottie"
|
|
for lottie_file in sorted(lottie_root.glob("voice_*.json")):
|
|
lottie.append(
|
|
{
|
|
"id": lottie_file.stem,
|
|
"severity": "P1",
|
|
"path": str(lottie_file.relative_to(ROOT)),
|
|
"sha256": sha256_file(lottie_file),
|
|
"meta": parse_lottie_meta(lottie_file),
|
|
}
|
|
)
|
|
|
|
return {"imagesets": imagesets, "lottie": lottie}
|
|
|
|
|
|
def parse_flow_states(path: pathlib.Path) -> List[str]:
|
|
text = read_text(path)
|
|
return re.findall(r"case\s+([A-Za-z_][A-Za-z0-9_]*)", text)
|
|
|
|
|
|
def build_baseline() -> Dict[str, Any]:
|
|
return {
|
|
"version": 1,
|
|
"generated_at": dt.datetime.now(dt.timezone.utc).isoformat(),
|
|
"constants": CONSTANT_SPECS,
|
|
"flow": {
|
|
**FLOW_SPEC,
|
|
"actual_states": parse_flow_states(ROOT / FLOW_SPEC["state_file"]),
|
|
},
|
|
"accessibility": ACCESSIBILITY_SPECS,
|
|
"geometry": GEOMETRY_SPECS,
|
|
"animations": ANIMATION_SPECS,
|
|
"assets": discover_asset_manifest(),
|
|
}
|
|
|
|
|
|
def parse_actual_constant(text: str, pattern: str, raw_match: bool) -> Optional[str]:
|
|
if raw_match:
|
|
return pattern if re.search(pattern, text) else None
|
|
m = re.search(pattern, text)
|
|
if not m:
|
|
return None
|
|
return m.group(1)
|
|
|
|
|
|
def run_checker(baseline: Dict[str, Any]) -> Dict[str, Any]:
|
|
findings: List[Finding] = []
|
|
|
|
# constants
|
|
for spec in baseline.get("constants", []):
|
|
file_path = ROOT / spec["file"]
|
|
text = read_text(file_path)
|
|
raw_match = bool(spec.get("raw_match", False))
|
|
actual = parse_actual_constant(text, spec["pattern"], raw_match)
|
|
expected = spec["expected"]
|
|
if actual is None:
|
|
findings.append(
|
|
Finding(
|
|
severity=spec["severity"],
|
|
kind="missing_pattern",
|
|
layer="constants",
|
|
item_id=spec["id"],
|
|
expected=str(expected),
|
|
actual="missing",
|
|
delta="pattern_not_found",
|
|
evidence=spec["file"],
|
|
)
|
|
)
|
|
elif str(actual) != str(expected):
|
|
findings.append(
|
|
Finding(
|
|
severity=spec["severity"],
|
|
kind="value_mismatch",
|
|
layer="constants",
|
|
item_id=spec["id"],
|
|
expected=str(expected),
|
|
actual=str(actual),
|
|
delta=f"{actual} != {expected}",
|
|
evidence=spec["file"],
|
|
)
|
|
)
|
|
|
|
telegram_file_raw = spec.get("telegram_file")
|
|
telegram_pattern = spec.get("telegram_pattern")
|
|
if telegram_file_raw and telegram_pattern:
|
|
telegram_file = ROOT / telegram_file_raw
|
|
telegram_text = read_text(telegram_file)
|
|
if re.search(telegram_pattern, telegram_text):
|
|
continue
|
|
findings.append(
|
|
Finding(
|
|
severity=spec["severity"],
|
|
kind="telegram_reference_missing",
|
|
layer="constants",
|
|
item_id=spec["id"],
|
|
expected=telegram_pattern,
|
|
actual="missing",
|
|
delta="telegram_evidence_not_found",
|
|
evidence=telegram_file_raw,
|
|
)
|
|
)
|
|
|
|
# geometry
|
|
for spec in baseline.get("geometry", []):
|
|
file_path = ROOT / spec["file"]
|
|
text = read_text(file_path)
|
|
actual = parse_actual_constant(text, spec["pattern"], bool(spec.get("raw_match", False)))
|
|
expected = spec["expected"]
|
|
if actual is None:
|
|
findings.append(
|
|
Finding(
|
|
severity=spec["severity"],
|
|
kind="missing_pattern",
|
|
layer="geometry",
|
|
item_id=spec["id"],
|
|
expected=str(expected),
|
|
actual="missing",
|
|
delta="pattern_not_found",
|
|
evidence=spec["file"],
|
|
)
|
|
)
|
|
elif str(actual) != str(expected):
|
|
findings.append(
|
|
Finding(
|
|
severity=spec["severity"],
|
|
kind="value_mismatch",
|
|
layer="geometry",
|
|
item_id=spec["id"],
|
|
expected=str(expected),
|
|
actual=str(actual),
|
|
delta=f"{actual} != {expected}",
|
|
evidence=spec["file"],
|
|
)
|
|
)
|
|
|
|
telegram_file_raw = spec.get("telegram_file")
|
|
telegram_pattern = spec.get("telegram_pattern")
|
|
if telegram_file_raw and telegram_pattern:
|
|
telegram_file = ROOT / telegram_file_raw
|
|
telegram_text = read_text(telegram_file)
|
|
if re.search(telegram_pattern, telegram_text):
|
|
continue
|
|
findings.append(
|
|
Finding(
|
|
severity=spec["severity"],
|
|
kind="telegram_reference_missing",
|
|
layer="geometry",
|
|
item_id=spec["id"],
|
|
expected=telegram_pattern,
|
|
actual="missing",
|
|
delta="telegram_evidence_not_found",
|
|
evidence=telegram_file_raw,
|
|
)
|
|
)
|
|
|
|
# flow
|
|
flow = baseline["flow"]
|
|
state_file = ROOT / flow["state_file"]
|
|
actual_states = parse_flow_states(state_file)
|
|
expected_states = flow["expected_states"]
|
|
if actual_states != expected_states:
|
|
findings.append(
|
|
Finding(
|
|
severity=flow["severity"],
|
|
kind="state_machine_mismatch",
|
|
layer="flow",
|
|
item_id="flow_states",
|
|
expected=",".join(expected_states),
|
|
actual=",".join(actual_states),
|
|
delta="state_list_diff",
|
|
evidence=flow["state_file"],
|
|
)
|
|
)
|
|
|
|
for transition in flow.get("required_transitions", []):
|
|
transition_file = ROOT / transition["file"]
|
|
transition_text = read_text(transition_file)
|
|
if transition["snippet"] not in transition_text:
|
|
findings.append(
|
|
Finding(
|
|
severity=transition["severity"],
|
|
kind="transition_missing",
|
|
layer="flow",
|
|
item_id=transition["id"],
|
|
expected=transition["snippet"],
|
|
actual="missing",
|
|
delta="snippet_not_found",
|
|
evidence=transition["file"],
|
|
)
|
|
)
|
|
|
|
# accessibility
|
|
for spec in baseline.get("accessibility", []):
|
|
file_path = ROOT / spec["file"]
|
|
text = read_text(file_path)
|
|
if spec["snippet"] not in text:
|
|
findings.append(
|
|
Finding(
|
|
severity=spec["severity"],
|
|
kind="accessibility_missing",
|
|
layer="accessibility",
|
|
item_id=spec["id"],
|
|
expected=spec["snippet"],
|
|
actual="missing",
|
|
delta="snippet_not_found",
|
|
evidence=spec["file"],
|
|
)
|
|
)
|
|
|
|
# animation snippets
|
|
for spec in baseline.get("animations", []):
|
|
file_path = ROOT / spec["file"]
|
|
text = read_text(file_path)
|
|
if spec["snippet"] not in text:
|
|
findings.append(
|
|
Finding(
|
|
severity=spec["severity"],
|
|
kind="animation_snippet_missing",
|
|
layer="animations",
|
|
item_id=spec["id"],
|
|
expected=spec["snippet"],
|
|
actual="missing",
|
|
delta="snippet_not_found",
|
|
evidence=spec["file"],
|
|
)
|
|
)
|
|
|
|
# assets/imagesets
|
|
assets = baseline.get("assets", {})
|
|
for spec in assets.get("imagesets", []):
|
|
imageset_path = ROOT / spec["path"]
|
|
if not imageset_path.exists():
|
|
findings.append(
|
|
Finding(
|
|
severity=spec["severity"],
|
|
kind="asset_missing",
|
|
layer="assets",
|
|
item_id=spec["id"],
|
|
expected=spec["path"],
|
|
actual="missing",
|
|
delta="imageset_missing",
|
|
evidence=spec["path"],
|
|
)
|
|
)
|
|
continue
|
|
for file_spec in spec.get("files", []):
|
|
file_path = imageset_path / file_spec["name"]
|
|
if not file_path.exists():
|
|
findings.append(
|
|
Finding(
|
|
severity=spec["severity"],
|
|
kind="asset_file_missing",
|
|
layer="assets",
|
|
item_id=f"{spec['id']}/{file_spec['name']}",
|
|
expected=file_spec["name"],
|
|
actual="missing",
|
|
delta="file_missing",
|
|
evidence=str(imageset_path.relative_to(ROOT)),
|
|
)
|
|
)
|
|
continue
|
|
actual_sha = sha256_file(file_path)
|
|
if actual_sha != file_spec["sha256"]:
|
|
findings.append(
|
|
Finding(
|
|
severity=spec["severity"],
|
|
kind="asset_hash_mismatch",
|
|
layer="assets",
|
|
item_id=f"{spec['id']}/{file_spec['name']}",
|
|
expected=file_spec["sha256"],
|
|
actual=actual_sha,
|
|
delta="sha256_mismatch",
|
|
evidence=str(file_path.relative_to(ROOT)),
|
|
)
|
|
)
|
|
expected_size = file_spec.get("size")
|
|
if expected_size is not None:
|
|
actual_size = parse_image_size(file_path)
|
|
if actual_size is None:
|
|
findings.append(
|
|
Finding(
|
|
severity=spec["severity"],
|
|
kind="asset_size_missing",
|
|
layer="assets",
|
|
item_id=f"{spec['id']}/{file_spec['name']}",
|
|
expected=f"{expected_size['width']}x{expected_size['height']}",
|
|
actual="unknown",
|
|
delta="size_unavailable",
|
|
evidence=str(file_path.relative_to(ROOT)),
|
|
)
|
|
)
|
|
else:
|
|
if actual_size != (expected_size["width"], expected_size["height"]):
|
|
findings.append(
|
|
Finding(
|
|
severity=spec["severity"],
|
|
kind="asset_size_mismatch",
|
|
layer="assets",
|
|
item_id=f"{spec['id']}/{file_spec['name']}",
|
|
expected=f"{expected_size['width']}x{expected_size['height']}",
|
|
actual=f"{actual_size[0]}x{actual_size[1]}",
|
|
delta="size_mismatch",
|
|
evidence=str(file_path.relative_to(ROOT)),
|
|
)
|
|
)
|
|
|
|
# assets/lottie
|
|
for spec in assets.get("lottie", []):
|
|
lottie_path = ROOT / spec["path"]
|
|
if not lottie_path.exists():
|
|
findings.append(
|
|
Finding(
|
|
severity=spec["severity"],
|
|
kind="lottie_missing",
|
|
layer="assets",
|
|
item_id=spec["id"],
|
|
expected=spec["path"],
|
|
actual="missing",
|
|
delta="lottie_missing",
|
|
evidence=spec["path"],
|
|
)
|
|
)
|
|
continue
|
|
actual_sha = sha256_file(lottie_path)
|
|
if actual_sha != spec["sha256"]:
|
|
findings.append(
|
|
Finding(
|
|
severity=spec["severity"],
|
|
kind="lottie_hash_mismatch",
|
|
layer="assets",
|
|
item_id=spec["id"],
|
|
expected=spec["sha256"],
|
|
actual=actual_sha,
|
|
delta="sha256_mismatch",
|
|
evidence=spec["path"],
|
|
)
|
|
)
|
|
actual_meta = parse_lottie_meta(lottie_path)
|
|
expected_meta = spec.get("meta", {})
|
|
for key in ["fps", "ip", "op", "width", "height"]:
|
|
if expected_meta.get(key) != actual_meta.get(key):
|
|
findings.append(
|
|
Finding(
|
|
severity=spec["severity"],
|
|
kind="lottie_meta_mismatch",
|
|
layer="assets",
|
|
item_id=f"{spec['id']}.{key}",
|
|
expected=str(expected_meta.get(key)),
|
|
actual=str(actual_meta.get(key)),
|
|
delta=f"{key}_mismatch",
|
|
evidence=spec["path"],
|
|
)
|
|
)
|
|
|
|
finding_dicts = [f.to_dict() for f in findings]
|
|
counts = {"P0": 0, "P1": 0, "P2": 0, "P3": 0}
|
|
for f in findings:
|
|
counts[f.severity] = counts.get(f.severity, 0) + 1
|
|
|
|
return {
|
|
"generated_at": dt.datetime.now(dt.timezone.utc).isoformat(),
|
|
"summary": {
|
|
"total": len(findings),
|
|
"by_severity": counts,
|
|
},
|
|
"findings": finding_dicts,
|
|
}
|
|
|
|
|
|
def severity_rank(severity: str) -> int:
|
|
order = {"P0": 0, "P1": 1, "P2": 2, "P3": 3}
|
|
return order.get(severity, 99)
|
|
|
|
|
|
def main() -> int:
|
|
parser = argparse.ArgumentParser(description="Voice recording parity checker")
|
|
parser.add_argument("--baseline", default=str(BASELINE_PATH))
|
|
parser.add_argument("--emit-baseline", action="store_true")
|
|
parser.add_argument("--report-json", action="store_true")
|
|
parser.add_argument("--pretty", action="store_true")
|
|
parser.add_argument("--fail-on", default="P1", choices=["P0", "P1", "P2", "P3", "none"])
|
|
args = parser.parse_args()
|
|
|
|
baseline_path = pathlib.Path(args.baseline)
|
|
|
|
if args.emit_baseline:
|
|
baseline = build_baseline()
|
|
baseline_path.write_text(json.dumps(baseline, ensure_ascii=False, indent=2) + "\n", encoding="utf-8")
|
|
if args.report_json:
|
|
print(json.dumps({"baseline_written": str(baseline_path.relative_to(ROOT))}, ensure_ascii=False))
|
|
else:
|
|
print(f"Baseline written: {baseline_path}")
|
|
return 0
|
|
|
|
if not baseline_path.exists():
|
|
print(f"Baseline not found: {baseline_path}", file=sys.stderr)
|
|
return 2
|
|
|
|
baseline = json.loads(baseline_path.read_text(encoding="utf-8"))
|
|
report = run_checker(baseline)
|
|
|
|
if args.report_json or args.pretty:
|
|
print(json.dumps(report, ensure_ascii=False, indent=2 if args.pretty else None))
|
|
else:
|
|
total = report["summary"]["total"]
|
|
counts = report["summary"]["by_severity"]
|
|
print(f"findings={total} P0={counts.get('P0', 0)} P1={counts.get('P1', 0)} P2={counts.get('P2', 0)} P3={counts.get('P3', 0)}")
|
|
|
|
if args.fail_on != "none":
|
|
threshold = severity_rank(args.fail_on)
|
|
for finding in report["findings"]:
|
|
if severity_rank(finding["severity"]) <= threshold:
|
|
return 1
|
|
|
|
return 0
|
|
|
|
|
|
if __name__ == "__main__":
|
|
raise SystemExit(main())
|