From b1fd26fe9e55163f780bf9e5f56bf9bf5f035c93 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 4 Jun 2024 17:44:14 -0400 Subject: [PATCH] pytorch xpu should be flash or mem efficient attention? --- comfy/model_management.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/comfy/model_management.py b/comfy/model_management.py index 3b9fad36..a5142d30 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -693,6 +693,8 @@ def pytorch_attention_flash_attention(): #TODO: more reliable way of checking for flash attention? if is_nvidia(): #pytorch flash attention only works on Nvidia return True + if is_intel_xpu(): + return True return False def force_upcast_attention_dtype():