{"schema_version":"v1","name_for_human":"PromptScan","name_for_model":"promptscan","description_for_human":"Detect prompt injection attacks in text before it reaches your LLM. Catches instruction overrides, jailbreaks, goal hijacking, and semantic evasion.","description_for_model":"PromptScan detects prompt injection attacks in text. Use scan_text to check user input, retrieved documents, emails, or any untrusted text before passing it to an LLM. Returns injection_detected (bool), attack_type, confidence, and optionally sanitized_text. First 10 scans free with no key. Get a free API key (1000/month) via POST /v1/signup.","auth":{"type":"none"},"api":{"type":"openapi","url":"https://promptscan.dev/openapi.json"},"logo_url":"https://promptscan.dev/static/p-logo.png","contact_email":"support@promptscan.dev","legal_info_url":"https://promptscan.dev"}