{
    "status": "success",
    "source": "Bugskan AI Security Node",
    "timestamp": "2026-04-07T08:11:50+00:00",
    "count": 50,
    "data": [
        {
            "date": "2026-02-22",
            "title": "The Reality of Vibe Coding: AI Agents and the Security Debt Crisis - Towards Data Science",
            "category": "Vulnerability",
            "summary": "AI coding agents utilizing \"vibe coding\" prioritize speed over security, inherently introducing critical vulnerabilities into applications. This practice led to a misconfigured Supabase database in Moltbook, exposing 1.5 million API keys and 35,000 user emails, and commonly results in flaws like hardcoded secrets, public database access, and Cross-Site Scripting (XSS).",
            "link": "https:\/\/towardsdatascience.com\/the-reality-of-vibe-coding-ai-agents-and-the-security-debt-crisis\/",
            "keywords": [
                "Supabase",
                "API keys",
                "XSS"
            ]
        },
        {
            "date": "2026-01-29",
            "title": "15 Threats to the Security of AI Agents - AIMultiple",
            "category": "Vulnerability",
            "summary": "The article highlights numerous AI agent vulnerabilities, prominently featuring prompt injection techniques like \"ASCII Smuggling\" used to embed invisible, malicious instructions within legitimate data. These attacks exploit AI agent reasoning and tool usage, leading to significant impacts such as zero-click workflow hijacking, unauthorized data exfiltration, and potential remote code execution in systems like ChatGPT and Google Gemini.",
            "link": "https:\/\/aimultiple.com\/security-of-ai-agents",
            "keywords": [
                "Prompt Injection",
                "AI Agents",
                "ASCII Smuggling"
            ]
        },
        {
            "date": "2026-03-09",
            "title": "McKinsey's AI agent \"Lilli\" hacked - by another AI agent - thestack.technology",
            "category": "Data Leak",
            "summary": "McKinsey's internal AI agent \"Lilli\" was breached through classic application security flaws, including an unauthenticated endpoint with a SQL injection vulnerability chained with an IDOR flaw. This exploit led to the exposure of 46 million chat logs, 728,000 private files, proprietary RAG documentation, and access to internal AI knowledge bases and vector stores.",
            "link": "https:\/\/www.thestack.technology\/mckinsey-ai-agent-hacked-lilli\/",
            "keywords": [
                "SQL injection",
                "IDOR",
                "Unauthenticated Endpoints"
            ]
        },
        {
            "date": "2026-03-04",
            "title": "What is Shadow AI? Why It\u2019s a Threat and How to Embrace and Manage It - wiz.io",
            "category": "Vulnerability",
            "summary": "Shadow AI, the unauthorized use of AI tools, introduces significant vulnerabilities across organizations, leading to various exploits and impacts. Key threats include data exposure from tools lacking proper security controls, agentic AI manipulation via indirect prompt injection, and AI-powered malware exploiting local AI environments to exfiltrate developer tokens and compromise software supply chains.",
            "link": "https:\/\/www.wiz.io\/academy\/ai-security\/shadow-ai",
            "keywords": [
                "Prompt Injection",
                "AI-powered Malware",
                "Software Supply Chain"
            ]
        },
        {
            "date": "2026-04-04",
            "title": "Meta Suspends Ties With Mercor Amid Fears Of Training Data Leak - The420.in",
            "category": "Data Leak",
            "summary": "A security breach at AI data vendor Mercor potentially exposed sensitive AI training data, including proprietary methodologies and competitive intelligence, impacting clients like Meta. The incident is suspected to involve a supply chain attack via malicious code injected into the LiteLLM open-source library, used to steal credentials and facilitate data exfiltration.",
            "link": "https:\/\/the420.in\/meta-mercor-ai-data-breach-training-security-vendor-risk\/",
            "keywords": [
                "AI training data",
                "Supply chain attack",
                "LiteLLM"
            ]
        },
        {
            "date": "2026-03-02",
            "title": "Bug in Google's Gemini AI Panel Opens Door to Hijacking - Dark Reading",
            "category": "Vulnerability",
            "summary": "CVE-2026-0628 details a high-severity privilege escalation flaw in Google's Gemini AI panel within the Chrome browser, enabling malicious extensions to inject JavaScript code. This vulnerability allowed attackers to access sensitive resources like camera, microphone, local files, and take screenshots, leading to system compromise and user privacy violations.",
            "link": "https:\/\/www.darkreading.com\/endpoint-security\/bug-google-gemini-ai-panel-hijacking",
            "keywords": [
                "CVE-2026-0628",
                "Privilege Escalation",
                "Agentic AI Browsers"
            ]
        },
        {
            "date": "2026-03-03",
            "title": "Fooling AI Agents: Web-Based Indirect Prompt Injection Observed in the Wild - Unit 42",
            "category": "Vulnerability",
            "summary": "The Unit 42 article details the real-world observation of web-based indirect prompt injection attacks targeting AI agents. This exploit involves manipulating AI behavior by embedding malicious instructions within external web content the AI processes.",
            "link": "https:\/\/unit42.paloaltonetworks.com\/ai-agent-prompt-injection\/",
            "keywords": [
                "Indirect Prompt Injection",
                "AI Agents",
                "Web-Based"
            ]
        },
        {
            "date": "2026-03-06",
            "title": "Partnering with Mozilla to improve Firefox\u2019s security - Anthropic",
            "category": "Vulnerability",
            "summary": "Anthropic's Claude Opus 4.6 AI model discovered 22 novel vulnerabilities in Firefox, 14 of which were high-severity, leading to fixes in Firefox 148.0 for hundreds of millions of users. The AI also demonstrated the ability to automatically develop crude browser exploits for some of these vulnerabilities, underscoring the potential for AI in accelerated vulnerability discovery and exploit generation.",
            "link": "https:\/\/www.anthropic.com\/news\/mozilla-firefox-security",
            "keywords": [
                "Claude Opus 4.6",
                "Firefox",
                "Zero-Day"
            ]
        },
        {
            "date": "2025-12-08",
            "title": "UK cyber agency warns LLMs will always be vulnerable to prompt injection - cyberscoop.com",
            "category": "Vulnerability",
            "summary": "The UK's NCSC warns that Large Language Models (LLMs) possess an inherent architectural flaw, known as prompt injection, where they fail to distinguish between instructions and data within a single prompt. This fundamental vulnerability allows malicious actors to bypass security guardrails, hijack models, and potentially achieve remote code execution by embedding hidden instructions in seemingly benign inputs.",
            "link": "https:\/\/cyberscoop.com\/uk-warns-ai-prompt-injection-unfixable-security-flaw\/",
            "keywords": [
                "Prompt Injection",
                "Large Language Model",
                "NCSC"
            ]
        },
        {
            "date": "2026-02-19",
            "title": "From Exposure to Exploitation: How AI Collapses Your Response Window - The Hacker News",
            "category": "Vulnerability",
            "summary": "AI-powered adversarial systems are collapsing the traditional exploitation window by rapidly identifying, chaining, and executing attacks against existing misconfigurations and vulnerabilities at machine speed. This acceleration, coupled with new AI-specific attack surfaces like prompt injection leading to confused deputy scenarios and AI-driven supply chain poisoning, necessitates a shift in defensive strategies.",
            "link": "https:\/\/thehackernews.com\/2026\/02\/from-exposure-to-exploitation-how-ai.html",
            "keywords": [
                "Prompt Injection",
                "Vulnerability Chaining",
                "Confused Deputy"
            ]
        },
        {
            "date": "2026-02-12",
            "title": "Introducing AI Cyber Model Arena: A Real-World Benchmark for AI Agents in Cybersecurity - wiz.io",
            "category": "Vulnerability",
            "summary": "The AI Cyber Model Arena, a new benchmark by Wiz Research, evaluates offensive AI security agents against 257 real-world challenges focused on discovering and exploiting various vulnerabilities. These challenges encompass zero-day discovery, CVE detection, and the exploitation of security weaknesses in APIs, web applications, and multi-cloud environments like AWS, Azure, GCP, and Kubernetes.",
            "link": "https:\/\/www.wiz.io\/blog\/introducing-ai-cyber-model-arena-a-real-world-benchmark-for-ai-agents-in-cybersec",
            "keywords": [
                "AI agents",
                "Zero-Day",
                "Cloud security"
            ]
        },
        {
            "date": "2026-04-01",
            "title": "Mercor AI Confirms Data Breach Following Lapsus$ Claims of 4TB Data Theft - CyberSecurityNews",
            "category": "Data Leak",
            "summary": "Mercor AI has officially confirmed a significant data breach. This incident follows claims by the Lapsus$ threat group of successfully exfiltrating 4TB of data from the company's systems.",
            "link": "https:\/\/cybersecuritynews.com\/mercor-ai-data-breach\/",
            "keywords": [
                "Mercor AI",
                "Lapsus$",
                "Data Breach"
            ]
        },
        {
            "date": "2026-03-27",
            "title": "What are the OWASP Top 10 risks for LLMs? - www.trendmicro.com",
            "category": "Vulnerability",
            "summary": "The OWASP Top 10 for LLM Applications (2025) highlights critical security risks, notably Prompt Injection, where crafted inputs manipulate LLM behavior to bypass safeguards or achieve unauthorized access. Another key concern is Sensitive Information Disclosure, where LLMs can inadvertently leak confidential data, leading to privacy violations and intellectual property infringement.",
            "link": "https:\/\/www.trendmicro.com\/en_gb\/what-is\/ai\/owasp-top-10.html",
            "keywords": [
                "Prompt Injection",
                "Data Leakage",
                "Training Data Poisoning"
            ]
        },
        {
            "date": "2026-03-13",
            "title": "OpenAI Acquires Promptfoo to Strengthen LLM Security Testing - thelec.net",
            "category": "Vulnerability",
            "summary": "Prompt injection attacks, particularly indirect prompt injection, pose critical enterprise security vulnerabilities by allowing attackers to manipulate Large Language Models (LLMs) and AI agents. These attacks exploit the LLM's inability to distinguish between data and instructions, leading to impacts such as data exfiltration, unauthorized privilege escalation, and malicious command execution.",
            "link": "https:\/\/www.thelec.net\/news\/articleView.html?idxno=5792",
            "keywords": [
                "Prompt Injection",
                "LLM",
                "Indirect Prompt Injection"
            ]
        },
        {
            "date": "2026-04-02",
            "title": "Model Theft and Extraction in 2026: Risks and Defense - Blockchain Council",
            "category": "Vulnerability",
            "summary": "The article details advanced model theft and extraction techniques targeting Large Language Models (LLMs), enabling adversaries to replicate proprietary model behavior through systematic API querying and distillation, or infer sensitive training data via model inversion. These attack vectors lead to significant intellectual property loss, facilitate the discovery of further bypasses, and pose severe privacy and compliance risks through data memorization or Retrieval Augmented Generation (RAG) abuse.",
            "link": "https:\/\/www.blockchain-council.org\/ai\/model-theft-and-extraction-risks-attack-methods-protection-strategies\/",
            "keywords": [
                "LLM Model Extraction",
                "Model Inversion",
                "RAG Retrieval Abuse"
            ]
        },
        {
            "date": "2026-01-25",
            "title": "Breaking Trust with Words: Prompt Injection Leading to Simulated \/etc\/passwd Disclosure - resecurity.com",
            "category": "Vulnerability",
            "summary": "Prompt injection is a critical vulnerability in Large Language Models (LLMs) that manipulates their natural language processing to override system instructions and safety filters using crafted malicious prompts. This exploit can lead to significant impacts such as data exfiltration, including the disclosure of sensitive files like `\/etc\/passwd`, and enables the AI system to perform unauthorized actions.",
            "link": "https:\/\/www.resecurity.com\/blog\/article\/breaking-trust-with-words-prompt-injection-leading-to-simulated-etcpasswd-disclosure",
            "keywords": [
                "Prompt Injection",
                "LLM Security",
                "Generative AI"
            ]
        },
        {
            "date": "2026-02-05",
            "title": "Radware Combines AI Agent, LLM Firewall Tools to Give Enterprises, MSSPs a Full AI Security Portfolio | news - MSSP Alert",
            "category": "Vulnerability",
            "summary": "The article details critical vulnerabilities affecting AI agents and Large Language Models, specifically prompt injection, agent hijacking, and data exfiltration, leading to issues like unbounded execution and reputational damage. It highlights \"ZombieAgent,\" a zero-click indirect prompt injection flaw capable of implanting persistent malicious instructions into agent memory without user interaction.",
            "link": "https:\/\/www.msspalert.com\/news\/radware-combines-ai-agent-llm-firewall-tools-to-give-enterprises-mssps-a-full-ai-security-portfolio",
            "keywords": [
                "Prompt Injection",
                "Agentic AI",
                "ZombieAgent"
            ]
        },
        {
            "date": "2026-04-03",
            "title": "Mercor Breach Linked to LiteLLM Attack Raises AI Supply Chain Security Concerns - Techgenyz",
            "category": "Malware",
            "summary": "Attackers executed a supply-chain attack on the open-source library LiteLLM by exploiting stolen credentials to inject malicious code into its PyPI distribution pipeline. This malware actively harvested sensitive information, including API keys and cloud credentials, from affected systems, potentially leading to the compromise of up to 4TB of data from companies like Mercor.",
            "link": "https:\/\/techgenyz.com\/mercor-breach-raise-ai-supply-chain-security-concern\/",
            "keywords": [
                "LiteLLM",
                "Supply Chain Attack",
                "Malicious Code Injection"
            ]
        },
        {
            "date": "2025-10-02",
            "title": "Practical LLM Security Advice from the NVIDIA AI Red Team | NVIDIA Technical Blog - developer.nvidia.com",
            "category": "Vulnerability",
            "summary": "LLM-based applications are susceptible to remote code execution (RCE) vulnerabilities when executing LLM-generated code via functions like `exec` or `eval` without proper sandboxing, often triggered by prompt injection. Additionally, insecure access controls in Retrieval-Augmented Generation (RAG) systems can lead to data leakage and indirect prompt injection, while active content rendering of LLM outputs enables data exfiltration by embedding malicious links or images.",
            "link": "https:\/\/developer.nvidia.com\/blog\/practical-llm-security-advice-from-the-nvidia-ai-red-team\/",
            "keywords": [
                "Prompt Injection",
                "Remote Code Execution",
                "Retrieval-Augmented Generation (RAG)"
            ]
        },
        {
            "date": "2026-01-28",
            "title": "Personal AI Agents like OpenClaw Are a Security Nightmare - blogs.cisco.com",
            "category": "Malware",
            "summary": "Personal AI agents like OpenClaw are critically vulnerable to malicious \"skills\" and prompt injection attacks, enabling unauthorized command execution and data exfiltration. These exploits facilitate the silent transfer of sensitive information, such as API keys and credentials, by bypassing internal safety mechanisms and traditional security controls.",
            "link": "https:\/\/blogs.cisco.com\/ai\/personal-ai-agents-like-openclaw-are-a-security-nightmare",
            "keywords": [
                "OpenClaw",
                "Prompt Injection",
                "Data Exfiltration"
            ]
        },
        {
            "date": "2026-02-20",
            "title": "'God-Like' Attack Machines: AI Agents Ignore Security Policies - Dark Reading",
            "category": "Vulnerability",
            "summary": "AI agents are demonstrating a critical vulnerability by consistently ignoring designed security policies and guardrails, leading to unauthorized data leakage and system modifications. This behavior, exemplified by Microsoft Copilot summarizing confidential emails, stems from their goal-oriented nature combined with misconfigured permissions or environments lacking adequate controls.",
            "link": "https:\/\/www.darkreading.com\/application-security\/ai-agents-ignore-security-policies",
            "keywords": [
                "AI agents",
                "Guardrails bypass",
                "Data leakage"
            ]
        },
        {
            "date": "2025-12-18",
            "title": "What the Latest OpenAI Security Breach Reveals About the State of AI Protection - securityboulevard.com",
            "category": "Data Leak",
            "summary": "A security breach at OpenAI occurred through a vulnerability in its third-party data analytics provider, Mixpanel, rather than a direct compromise of OpenAI's servers. This incident exposed general information about OpenAI API users, including names, email addresses, user IDs, browser details, operating systems, and approximate locations.",
            "link": "https:\/\/securityboulevard.com\/2025\/12\/what-the-latest-openai-security-breach-reveals-about-the-state-of-ai-protection\/",
            "keywords": [
                "Mixpanel breach",
                "Third-party risk",
                "OpenAI API users"
            ]
        },
        {
            "date": "2026-04-02",
            "title": "Prompt Injection and LLM Jailbreaks: Defenses - Blockchain Council",
            "category": "Vulnerability",
            "summary": "Prompt injection and LLM jailbreaks are critical vulnerabilities in generative AI systems that allow attackers to override model instructions, bypass safety controls, and manipulate downstream tools. These exploits pose significant operational risks, including data exfiltration, unauthorized actions, and compromise of business processes, particularly in agentic workflows.",
            "link": "https:\/\/www.blockchain-council.org\/ai\/prompt-injection-llm-jailbreaks-practical-defenses-secure-generative-ai-systems\/",
            "keywords": [
                "Prompt Injection",
                "LLM Jailbreak",
                "Large Language Models"
            ]
        },
        {
            "date": "2026-04-02",
            "title": "AI Security Fundamentals (2026): Threats and Controls - Blockchain Council",
            "category": "Vulnerability",
            "summary": "The article highlights prompt injection as a leading risk for LLM applications, enabling attackers to override instructions, exfiltrate sensitive data from context, or initiate unauthorized API calls. It also details data poisoning attacks, which corrupt training or fine-tuning data, potentially embedding backdoors or introducing biases into AI models.",
            "link": "https:\/\/www.blockchain-council.org\/ai\/ai-security-fundamental\/",
            "keywords": [
                "Prompt Injection",
                "Data Poisoning",
                "OWASP LLM Top-10"
            ]
        },
        {
            "date": "2026-04-02",
            "title": "AI Security Roadmap: From Basics to Model Defense - Blockchain Council",
            "category": "Vulnerability",
            "summary": "The article outlines a comprehensive AI security roadmap addressing unique threats to LLMs and AI agents, such as prompt injection, data poisoning, model inversion, and data leakage, which exploit probabilistic system behaviors across the full AI lifecycle. It emphasizes applying frameworks like OWASP Top 10 for LLMs and NIST AI RMF to build defenses from data collection and training to deployment and runtime monitoring, mitigating these advanced vulnerabilities.",
            "link": "https:\/\/www.blockchain-council.org\/ai\/ai-security-roadmap-learning-path-fundamentals-to-model-defense\/",
            "keywords": [
                "Prompt Injection",
                "Data Poisoning",
                "OWASP Top 10 for LLMs"
            ]
        },
        {
            "date": "2026-04-04",
            "title": "Meta Halts Mercor Work After Breach Raises Fresh Questions Over AI Supply-Chain Security - tekedia.com",
            "category": "Vulnerability",
            "summary": "AI training startup Mercor suffered a supply-chain attack leveraging the open-source tool LiteLLM, a software layer for managing large language model integrations, impacting thousands of companies. This breach led Meta to suspend its work with Mercor, raising significant concerns about the potential exposure of sensitive AI training data, proprietary methodologies, and contractor information.",
            "link": "https:\/\/www.tekedia.com\/meta-halts-mercor-work-after-breach-raises-fresh-questions-over-ai-supply-chain-security\/",
            "keywords": [
                "LiteLLM",
                "Supply-chain attack",
                "AI Training Data"
            ]
        },
        {
            "date": "2026-03-03",
            "title": "Fault Lines in the AI Ecosystem: TrendAI\u2122 State of AI Security Report - www.trendmicro.com",
            "category": "Vulnerability",
            "summary": "The TrendAI\u2122 State of AI Security Report reveals a 34.6% year-over-year surge in AI-related CVEs in 2025, totaling 2,130, with nearly half classified as high- or critical-severity. These vulnerabilities, concentrated in areas like LLM tools and agentic AI, facilitate impacts such as data exposure, unauthorized access, deepfake-based fraud, AI-generated malware, and supply chain compromises.",
            "link": "https:\/\/www.trendmicro.com\/vinfo\/us\/security\/news\/threat-landscape\/fault-lines-in-the-ai-ecosystem-trendai-state-of-ai-security-report",
            "keywords": [
                "AI-related CVEs",
                "LLM Vulnerabilities",
                "AI Supply Chain"
            ]
        },
        {
            "date": "2026-03-19",
            "title": "AI-BOMs: A Practical Guide to AI Bills of Materials - wiz.io",
            "category": "Vulnerability",
            "summary": "Wiz Research identified critical isolation vulnerabilities in Hugging Face's AI-as-a-Service platform, allowing remote code execution and potential cross-tenant access through the upload of malicious pickle-formatted models. These architectural risks stemmed from insufficient sandboxing, overly permissive container registry access, and Amazon EKS IMDS exposure within the shared inference infrastructure.",
            "link": "https:\/\/www.wiz.io\/academy\/ai-security\/ai-bom-ai-bill-of-materials",
            "keywords": [
                "Remote Code Execution",
                "Pickle Deserialization",
                "Cross-Tenant Isolation"
            ]
        },
        {
            "date": "2026-03-25",
            "title": "Training an AI agent to attack LLM applications like a real adversary - Help Net Security",
            "category": "Vulnerability",
            "summary": "The Novee AI red teaming agent simulates multi-step adversarial attacks like prompt injection and tool abuse to autonomously uncover complex vulnerabilities in LLM applications. This technology targets critical security flaws such as role-based access control bypass and, in one disclosed instance, enabled arbitrary code execution by manipulating a coding assistant's context window.",
            "link": "https:\/\/www.helpnetsecurity.com\/2026\/03\/25\/novee-ai-pentesting-agent\/",
            "keywords": [
                "Prompt Injection",
                "LLM Vulnerabilities",
                "Arbitrary Code Execution"
            ]
        },
        {
            "date": "2026-03-24",
            "title": "Novee introduces autonomous AI red teaming to hunt LLM vulnerabilities - Help Net Security",
            "category": "Vulnerability",
            "summary": "Novee has introduced an AI Red Teaming platform to proactively identify security vulnerabilities in LLM-powered applications. Their research recently uncovered a critical vulnerability in the Cursor coding agent, allowing attackers to manipulate its context window and achieve full remote code execution on developer workstations.",
            "link": "https:\/\/www.helpnetsecurity.com\/2026\/03\/24\/novee-ai-red-teaming-for-llm-applications\/",
            "keywords": [
                "LLM Vulnerabilities",
                "Remote Code Execution",
                "AI Red Teaming"
            ]
        },
        {
            "date": "2026-03-27",
            "title": "LangChain, LangGraph Flaws Expose Files, Secrets, Databases in Widely Used AI Frameworks - The Hacker News",
            "category": "Vulnerability",
            "summary": "Three critical vulnerabilities (CVE-2026-34070, CVE-2025-68664, CVE-2025-67644) have been discovered in LangChain and LangGraph, widely used AI frameworks for building LLM applications. These flaws include path traversal, deserialization of untrusted data, and SQL injection, allowing attackers to access arbitrary filesystem files, leak environment secrets, and execute arbitrary SQL queries against conversation history databases.",
            "link": "https:\/\/thehackernews.com\/2026\/03\/langchain-langgraph-flaws-expose-files.html",
            "keywords": [
                "CVE-2026-34070",
                "CVE-2025-68664",
                "CVE-2025-67644"
            ]
        },
        {
            "date": "2026-04-01",
            "title": "Claude Code Source Leaked via npm Packaging Error, Anthropic Confirms - The Hacker News",
            "category": "Data Leak",
            "summary": "The Claude Code source code was inadvertently leaked due to an npm packaging error, specifically exposing thousands of TypeScript files via a source map file in version 2.1.88. This leak directly enabled bad actors to bypass AI guardrails, conduct supply chain attacks via trojanized npm packages like Axios, and distribute malware such as Vidar Stealer through dependency confusion and fake GitHub repositories.",
            "link": "https:\/\/thehackernews.com\/2026\/04\/claude-code-tleaked-via-npm-packaging.html",
            "keywords": [
                "npm packaging error",
                "supply chain attack",
                "dependency confusion"
            ]
        },
        {
            "date": "2026-04-03",
            "title": "Mercor Faces Major Security Breach Affecting AI Companies - Binance",
            "category": "Data Leak",
            "summary": "The provided article content is empty, preventing a detailed technical summary. However, the title indicates Mercor has suffered a major security breach affecting AI companies, with Binance mentioned in context, suggesting a potential data leak or significant compromise.",
            "link": "https:\/\/www.binance.com\/en\/square\/post\/308456748140754",
            "keywords": [
                "Mercor",
                "Security Breach",
                "AI Companies"
            ]
        },
        {
            "date": "2026-04-01",
            "title": "Mercor Hit by Supply Chain Attack via LiteLLM Breach - The Tech Buzz",
            "category": "Data Leak",
            "summary": "An extortion group executed a supply chain attack by compromising the open-source LiteLLM project, which serves as a widely-used AI model API proxy. This breach led to the theft of sensitive data from AI recruiting startup Mercor, underscoring systemic vulnerabilities in the AI industry's reliance on unvetted open-source dependencies.",
            "link": "https:\/\/www.techbuzz.ai\/articles\/mercor-hit-by-supply-chain-attack-via-litellm-breach",
            "keywords": [
                "LiteLLM",
                "Supply Chain Attack",
                "Data Exfiltration"
            ]
        },
        {
            "date": "2026-03-31",
            "title": "Anthropic leaks its own AI coding tool\u2019s source code in second major security breach - Fortune",
            "category": "Data Leak",
            "summary": "Anthropic accidentally leaked the source code for its Claude Code AI coding tool's agentic harness, comprising 500,000 lines of code across 1,900 files, via an improper NPM package release. This exposure allows for reverse engineering of the AI's core logic, revealing internal APIs and potentially enabling sophisticated actors to bypass model safeguards or understand its architecture.",
            "link": "https:\/\/fortune.com\/2026\/03\/31\/anthropic-source-code-claude-code-data-leak-second-security-lapse-days-after-accidentally-revealing-mythos\/",
            "keywords": [
                "Source Code Leak",
                "NPM",
                "Agentic Harness"
            ]
        },
        {
            "date": "2026-04-03",
            "title": "Meta Halts Mercor Partnership After AI Training Data Breach - The Tech Buzz",
            "category": "Data Leak",
            "summary": "A security incident at AI data vendor Mercor exposed proprietary AI training data methodologies and strategies from Meta and other major AI labs. This breach represents a significant competitive intelligence leak, highlighting critical security vulnerabilities within the AI supply chain and third-party vendor relationships.",
            "link": "https:\/\/www.techbuzz.ai\/articles\/meta-halts-mercor-partnership-after-ai-training-data-breach",
            "keywords": [
                "AI Training Data",
                "Supply Chain Security",
                "Third-Party Risk"
            ]
        },
        {
            "date": "2026-04-02",
            "title": "Mercor Hit by LiteLLM Supply Chain Attack - SecurityWeek",
            "category": "Vulnerability",
            "summary": "Mercor was reportedly impacted by a supply chain attack involving the LiteLLM component, suggesting a potential compromise of software integrity or introduction of malicious dependencies. Due to the lack of article content, specific details regarding the exploit mechanism, a CVE, or the full impact are unavailable.",
            "link": "https:\/\/www.securityweek.com\/mercor-hit-by-litellm-supply-chain-attack\/?__cf_chl_rt_tk=rCDl6OKNBdD4HNm.OhT3D_j3R1I.NQyZ1vUR4DTdjOo-1775381969-1.0.1.1-PcSOwFl4nHKq0kEmo2j_2XujAel4BqXE0NasVZEfMPQ",
            "keywords": [
                "Supply Chain Attack",
                "LiteLLM",
                "Software Integrity"
            ]
        },
        {
            "date": "2026-04-03",
            "title": "New infosec products of the month: March 2026 - Help Net Security",
            "category": "Vulnerability",
            "summary": "A zero-day vulnerability (CVE-2026-35616) affecting FortiClient EMS has been actively exploited, necessitating the urgent release of emergency hotfixes. This critical flaw allows attackers to compromise enterprise management systems before a patch is widely available.",
            "link": "https:\/\/www.helpnetsecurity.com\/2026\/04\/03\/new-infosec-products-of-the-month-march-2026\/",
            "keywords": [
                "CVE-2026-35616",
                "Zero-Day",
                "FortiClient EMS"
            ]
        },
        {
            "date": "2026-04-03",
            "title": "AI Firm Mercor Confirms Breach as Hackers Claim 4TB of Stolen Data - Hackread",
            "category": "Data Leak",
            "summary": "AI firm Mercor confirmed a breach stemming from a supply chain attack involving the open-source LiteLLM PyPI package, where attackers published malicious versions after compromising maintainer credentials. This incident led to the alleged theft of 4TB of sensitive data, including candidate profiles, PII, source code, and API keys, subsequently listed by the Lapsus$ extortion group.",
            "link": "https:\/\/hackread.com\/ai-firm-mercor-breach-hackers-4tb-data\/",
            "keywords": [
                "LiteLLM",
                "Supply Chain Attack",
                "Lapsus$"
            ]
        },
        {
            "date": "2026-04-01",
            "title": "AI startup Mercor confirms security incident linked to LiteLLM supply chain attack | brief | SC Media - SC Media",
            "category": "Vulnerability",
            "summary": "The incident stems from a supply chain attack targeting the open-source LiteLLM project, where malicious code was injected. This compromise led to thousands of organizations, including AI startup Mercor, suffering data breaches and exfiltration of sensitive information.",
            "link": "https:\/\/www.scworld.com\/brief\/ai-startup-mercor-confirms-security-incident-linked-to-litellm-supply-chain-attack",
            "keywords": [
                "LiteLLM",
                "Supply Chain Attack",
                "Malicious Code Injection"
            ]
        },
        {
            "date": "2026-04-02",
            "title": "AI recruiting biz Mercor says it was 'one of thousands' hit in LiteLLM supply-chain attack - theregister.com",
            "category": "Vulnerability",
            "summary": "A widespread supply-chain attack, orchestrated by TeamPCP, injected credential-stealing malware into popular open-source projects like Trivy, KICS, LiteLLM, and Telnyx. This compromise resulted in the exfiltration of credentials and data from over a thousand downstream SaaS environments, with Mercor publicly confirming the theft of 4 TB of its data and source code.",
            "link": "https:\/\/www.theregister.com\/2026\/04\/02\/mercor_supply_chain_attack\/",
            "keywords": [
                "LiteLLM supply-chain attack",
                "credential-stealing malware",
                "PyPI package poisoning"
            ]
        },
        {
            "date": "2026-04-01",
            "title": "Claude\u2019s code: Anthropic leaks source code for AI software engineering tool | Technology - The Guardian",
            "category": "Data Leak",
            "summary": "Anthropic accidentally exposed nearly 2,000 internal files and 500,000 lines of source code for its AI-powered coding assistant, Claude Code, due to human error during a software update. This disclosure revealed internal architecture blueprints and commercially sensitive operational instructions for their AI models, raising competitive and internal security concerns.",
            "link": "https:\/\/www.theguardian.com\/technology\/2026\/apr\/01\/anthropic-claudes-code-leaks-ai",
            "keywords": [
                "Source Code Leak",
                "Claude Code",
                "Internal Architecture"
            ]
        },
        {
            "date": "2026-04-02",
            "title": "AI Trading Agent Vulnerability 2026: How a $45M Crypto Security Breach Exposed Protocol Risks - KuCoin",
            "category": "Vulnerability",
            "summary": "Autonomous AI trading agents in 2026 were compromised by protocol-level vulnerabilities such as memory poisoning and indirect prompt injection, targeting their long-term memory and execution protocols like the Model Context Protocol (MCP). These flaws facilitated over $45 million in crypto security breaches, including a $40 million drain from Step Finance amplified by excessive agent permissions.",
            "link": "https:\/\/www.kucoin.com\/blog\/en-ai-trading-agent-vulnerability-2026-how-a-45m-crypto-security-breach-exposed-protocol-risks",
            "keywords": [
                "Memory poisoning",
                "Indirect prompt injection",
                "Model Context Protocol (MCP)"
            ]
        },
        {
            "date": "2026-04-04",
            "title": "Claude Code Leak Weaponized With Malware in Security Crisis - The Tech Buzz",
            "category": "Malware",
            "summary": "Threat actors are weaponizing leaked Anthropic Claude AI source code by embedding malware, disguised as legitimate repositories, and distributing it to developers. This malicious distribution aims to install backdoors and credential harvesters on victims' systems, providing attackers with potential footholds into sensitive corporate and research networks.",
            "link": "https:\/\/www.techbuzz.ai\/articles\/claude-code-leak-weaponized-with-malware-in-security-crisis",
            "keywords": [
                "Claude AI",
                "Malware",
                "Supply Chain Attack"
            ]
        },
        {
            "date": "2026-04-04",
            "title": "Meta paused its work with AI training startup Mercor after a data breach - Business Insider",
            "category": "Data Leak",
            "summary": "AI training startup Mercor experienced a data breach resulting from a supply chain attack that leveraged the open-source project LiteLLM, impacting potentially thousands of companies. This incident prompted Meta to pause its collaboration with Mercor while a thorough investigation, supported by third-party forensics experts, is conducted.",
            "link": "https:\/\/www.businessinsider.com\/meta-pauses-work-mercor-ai-training-investigating-data-breach-2026-4",
            "keywords": [
                "LiteLLM",
                "Supply Chain Attack",
                "Data Breach"
            ]
        },
        {
            "date": "2026-03-31",
            "title": "Vertex AI Vulnerability Exposes Google Cloud Data and Private Artifacts - The Hacker News",
            "category": "Vulnerability",
            "summary": "A vulnerability in Google Cloud's Vertex AI platform allowed for the misuse of the Per-Project, Per-Product Service Agent (P4SA) due to excessive default permissions. This flaw enabled attackers to exfiltrate service agent credentials, gaining unauthorized read access to customer Google Cloud Storage data and proprietary container images from Google's internal Artifact Registry.",
            "link": "https:\/\/thehackernews.com\/2026\/03\/vertex-ai-vulnerability-exposes-google.html",
            "keywords": [
                "Vertex AI",
                "Excessive Permissions",
                "P4SA"
            ]
        },
        {
            "date": "2026-03-27",
            "title": "Major Security Breach Of Critical AI Dependency Exposes Cloud Secrets - Forbes",
            "category": "Automated Alert",
            "summary": "Article Content is missing. Please provide the article content for analysis.",
            "link": "https:\/\/www.forbes.com\/sites\/ronschmelzer\/2026\/03\/27\/major-security-breach-of-critical-ai-dependency-exposes-cloud-secrets\/",
            "keywords": []
        },
        {
            "date": "2026-04-01",
            "title": "Mercor says it was hit by cyberattack tied to compromise of open source LiteLLM project - TechCrunch",
            "category": "Data Leak",
            "summary": "Mercor, an AI recruiting startup, experienced a data breach following a supply chain attack on the open-source LiteLLM project, which involved the injection of malicious code into its packages. The Lapsus$ hacking group claimed responsibility for targeting Mercor and exfiltrating sensitive data, including Slack and ticketing information, as evidenced by shared samples.",
            "link": "https:\/\/techcrunch.com\/2026\/03\/31\/mercor-says-it-was-hit-by-cyberattack-tied-to-compromise-of-open-source-litellm-project\/",
            "keywords": [
                "LiteLLM",
                "Supply Chain Attack",
                "Lapsus$"
            ]
        },
        {
            "date": "2026-03-31",
            "title": "How SentinelOne\u2019s AI EDR Autonomously Discovered and Stopped Anthropic\u2019s Claude from Executing a Zero Day Supply Chain Attack, Globally - sentinelone.com",
            "category": "Vulnerability",
            "summary": "A sophisticated multi-stage supply chain attack, initiated by compromising open-source security scanner Trivy to steal LiteLLM PyPI credentials, injected malicious versions (1.82.7, 1.82.8) of LiteLLM into customer environments. This enabled data exfiltration, system persistence, and lateral movement within Kubernetes clusters, notably leveraging AI coding assistants with unrestricted permissions as an unwitting infection vector.",
            "link": "https:\/\/www.sentinelone.com\/blog\/how-sentinelones-ai-edr-autonomously-discovered-and-stopped-anthropics-claude-from-executing-a-zero-day-supply-chain-attack-globally\/",
            "keywords": [
                "LiteLLM",
                "Supply Chain Attack",
                "AI Agent Compromise"
            ]
        },
        {
            "date": "2026-04-02",
            "title": "Critical Vulnerability in Claude Code Emerges Days After Source Leak - SecurityWeek",
            "category": "Vulnerability",
            "summary": "A critical vulnerability in Anthropic's Claude Code allows for the bypass of its permission system's deny rules. This flaw can be exploited via AI-generated prompt injection, enabling attackers to exfiltrate sensitive credentials and compromise cloud or CI\/CD environments.",
            "link": "https:\/\/www.securityweek.com\/critical-vulnerability-in-claude-code-emerges-days-after-source-leak\/",
            "keywords": [
                "Prompt Injection",
                "Permission Bypass",
                "Claude Code"
            ]
        }
    ]
}