[BugFix] Update AttnFusionPass cache key (#21947)

Signed-off-by: Richard Zou <zou3519@gmail.com>
This commit is contained in:
Richard Zou 2025-08-01 10:11:29 -04:00 committed by GitHub
parent a59cd9d9f7
commit 8026a335a1
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 5 additions and 1 deletions

View File

@ -164,3 +164,6 @@ class AttnFusionPass(VllmInductorPass):
logger.debug("Fused quantization onto %s attention nodes", count)
self.dump_graph(graph, "after_attn_fusion")
self.end_and_log()
def uuid(self):
return VllmInductorPass.hash_source(self, AttentionStaticQuantPattern)

View File

@ -76,9 +76,10 @@ class InductorPass(CustomGraphPass):
for src in srcs:
if isinstance(src, str):
src_str = src
elif isinstance(src, types.FunctionType):
elif isinstance(src, (types.FunctionType, type)):
src_str = inspect.getsource(src)
else:
# object instance
src_str = inspect.getsource(src.__class__)
hasher.update(src_str.encode("utf-8"))
return hasher.hexdigest()