Skip to content

Commit a341b53

Browse files
yiyixuxuyiyixuxu
andauthored
disable test_conversion_when_using_device_map (#7620)
* disable test * update --------- Co-authored-by: yiyixuxu <yixu310@gmail,com>
1 parent 8e46d97 commit a341b53

File tree

1 file changed

+39
-40
lines changed

1 file changed

+39
-40
lines changed

tests/models/test_attention_processor.py

Lines changed: 39 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,7 @@
1-
import tempfile
21
import unittest
32

4-
import numpy as np
53
import torch
64

7-
from diffusers import DiffusionPipeline
85
from diffusers.models.attention_processor import Attention, AttnAddedKVProcessor
96

107

@@ -80,40 +77,42 @@ def test_only_cross_attention(self):
8077

8178
class DeprecatedAttentionBlockTests(unittest.TestCase):
8279
def test_conversion_when_using_device_map(self):
83-
pipe = DiffusionPipeline.from_pretrained("hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=None)
84-
85-
pre_conversion = pipe(
86-
"foo",
87-
num_inference_steps=2,
88-
generator=torch.Generator("cpu").manual_seed(0),
89-
output_type="np",
90-
).images
91-
92-
# the initial conversion succeeds
93-
pipe = DiffusionPipeline.from_pretrained(
94-
"hf-internal-testing/tiny-stable-diffusion-pipe", device_map="sequential", safety_checker=None
95-
)
96-
97-
conversion = pipe(
98-
"foo",
99-
num_inference_steps=2,
100-
generator=torch.Generator("cpu").manual_seed(0),
101-
output_type="np",
102-
).images
103-
104-
with tempfile.TemporaryDirectory() as tmpdir:
105-
# save the converted model
106-
pipe.save_pretrained(tmpdir)
107-
108-
# can also load the converted weights
109-
pipe = DiffusionPipeline.from_pretrained(tmpdir, device_map="sequential", safety_checker=None)
110-
111-
after_conversion = pipe(
112-
"foo",
113-
num_inference_steps=2,
114-
generator=torch.Generator("cpu").manual_seed(0),
115-
output_type="np",
116-
).images
117-
118-
self.assertTrue(np.allclose(pre_conversion, conversion, atol=1e-5))
119-
self.assertTrue(np.allclose(conversion, after_conversion, atol=1e-5))
80+
# To-DO for Sayak: enable this test again and to test `device_map='balanced'` once we have this in accelerate https://github.com/huggingface/accelerate/pull/2641
81+
pass
82+
# pipe = DiffusionPipeline.from_pretrained("hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=None)
83+
84+
# pre_conversion = pipe(
85+
# "foo",
86+
# num_inference_steps=2,
87+
# generator=torch.Generator("cpu").manual_seed(0),
88+
# output_type="np",
89+
# ).images
90+
91+
# # the initial conversion succeeds
92+
# pipe = DiffusionPipeline.from_pretrained(
93+
# "hf-internal-testing/tiny-stable-diffusion-pipe", device_map="sequential", safety_checker=None
94+
# )
95+
96+
# conversion = pipe(
97+
# "foo",
98+
# num_inference_steps=2,
99+
# generator=torch.Generator("cpu").manual_seed(0),
100+
# output_type="np",
101+
# ).images
102+
103+
# with tempfile.TemporaryDirectory() as tmpdir:
104+
# # save the converted model
105+
# pipe.save_pretrained(tmpdir)
106+
107+
# # can also load the converted weights
108+
# pipe = DiffusionPipeline.from_pretrained(tmpdir, device_map="sequential", safety_checker=None)
109+
110+
# after_conversion = pipe(
111+
# "foo",
112+
# num_inference_steps=2,
113+
# generator=torch.Generator("cpu").manual_seed(0),
114+
# output_type="np",
115+
# ).images
116+
117+
# self.assertTrue(np.allclose(pre_conversion, conversion, atol=1e-5))
118+
# self.assertTrue(np.allclose(conversion, after_conversion, atol=1e-5))

0 commit comments

Comments
 (0)