Skip to content

Commit c27603c

Browse files
Merge branch 'main' into qwen-image-edit-controlnet
2 parents 5b1c134 + 78031c2 commit c27603c

File tree

15 files changed

+937
-7
lines changed

15 files changed

+937
-7
lines changed

examples/server/README.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,8 +9,8 @@ This guide will show you how to use the [`StableDiffusion3Pipeline`] in a server
99
Start by navigating to the `examples/server` folder and installing all of the dependencies.
1010

1111
```py
12-
pip install .
13-
pip install -f requirements.txt
12+
pip install diffusers
13+
pip install -r requirements.txt
1414
```
1515

1616
Launch the server with the following command.

examples/server/requirements.in

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,4 +6,5 @@ py-consul
66
prometheus_client >= 0.18.0
77
prometheus-fastapi-instrumentator >= 7.0.0
88
fastapi
9-
uvicorn
9+
uvicorn
10+
accelerate

examples/server/requirements.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ fsspec==2024.10.0
3939
# torch
4040
h11==0.14.0
4141
# via uvicorn
42-
huggingface-hub==0.26.1
42+
huggingface-hub==0.35.0
4343
# via
4444
# tokenizers
4545
# transformers

src/diffusers/__init__.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -516,6 +516,7 @@
516516
"QwenImageEditControlNetPipeline",
517517
"QwenImageEditInpaintPipeline",
518518
"QwenImageEditPipeline",
519+
"QwenImageEditPlusPipeline",
519520
"QwenImageImg2ImgPipeline",
520521
"QwenImageInpaintPipeline",
521522
"QwenImagePipeline",
@@ -1172,6 +1173,7 @@
11721173
QwenImageEditControlNetPipeline,
11731174
QwenImageEditInpaintPipeline,
11741175
QwenImageEditPipeline,
1176+
QwenImageEditPlusPipeline,
11751177
QwenImageImg2ImgPipeline,
11761178
QwenImageInpaintPipeline,
11771179
QwenImagePipeline,

src/diffusers/models/attention.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -241,7 +241,7 @@ def set_use_memory_efficient_attention_xformers(
241241
op_fw, op_bw = attention_op
242242
dtype, *_ = op_fw.SUPPORTED_DTYPES
243243
q = torch.randn((1, 2, 40), device="cuda", dtype=dtype)
244-
_ = xops.memory_efficient_attention(q, q, q)
244+
_ = xops.ops.memory_efficient_attention(q, q, q)
245245
except Exception as e:
246246
raise e
247247

src/diffusers/models/autoencoders/autoencoder_dc.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -617,7 +617,7 @@ def decode(self, z: torch.Tensor, return_dict: bool = True) -> Union[DecoderOutp
617617
returned.
618618
"""
619619
if self.use_slicing and z.size(0) > 1:
620-
decoded_slices = [self._decode(z_slice).sample for z_slice in z.split(1)]
620+
decoded_slices = [self._decode(z_slice) for z_slice in z.split(1)]
621621
decoded = torch.cat(decoded_slices)
622622
else:
623623
decoded = self._decode(z)

src/diffusers/pipelines/__init__.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -394,6 +394,7 @@
394394
"QwenImageImg2ImgPipeline",
395395
"QwenImageInpaintPipeline",
396396
"QwenImageEditPipeline",
397+
"QwenImageEditPlusPipeline",
397398
"QwenImageEditInpaintPipeline",
398399
"QwenImageEditControlNetPipeline",
399400
"QwenImageControlNetInpaintPipeline",
@@ -723,6 +724,7 @@
723724
QwenImageEditControlNetPipeline,
724725
QwenImageEditInpaintPipeline,
725726
QwenImageEditPipeline,
727+
QwenImageEditPlusPipeline,
726728
QwenImageImg2ImgPipeline,
727729
QwenImageInpaintPipeline,
728730
QwenImagePipeline,

src/diffusers/pipelines/qwenimage/__init__.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,7 @@
2929
_import_structure["pipeline_qwenimage_edit"] = ["QwenImageEditPipeline"]
3030
_import_structure["pipeline_qwenimage_edit_controlnet"] = ["QwenImageEditControlNetPipeline"]
3131
_import_structure["pipeline_qwenimage_edit_inpaint"] = ["QwenImageEditInpaintPipeline"]
32+
_import_structure["pipeline_qwenimage_edit_plus"] = ["QwenImageEditPlusPipeline"]
3233
_import_structure["pipeline_qwenimage_img2img"] = ["QwenImageImg2ImgPipeline"]
3334
_import_structure["pipeline_qwenimage_inpaint"] = ["QwenImageInpaintPipeline"]
3435

@@ -45,6 +46,7 @@
4546
from .pipeline_qwenimage_edit import QwenImageEditPipeline
4647
from .pipeline_qwenimage_edit_controlnet import QwenImageEditControlNetPipeline
4748
from .pipeline_qwenimage_edit_inpaint import QwenImageEditInpaintPipeline
49+
from .pipeline_qwenimage_edit_plus import QwenImageEditPlusPipeline
4850
from .pipeline_qwenimage_img2img import QwenImageImg2ImgPipeline
4951
from .pipeline_qwenimage_inpaint import QwenImageInpaintPipeline
5052
else:

src/diffusers/pipelines/qwenimage/pipeline_qwenimage_edit.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -208,7 +208,6 @@ def __init__(
208208
# QwenImage latents are turned into 2x2 patches and packed. This means the latent width and height has to be divisible
209209
# by the patch size. So the vae scale factor is multiplied by the patch size to account for this
210210
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor * 2)
211-
self.vl_processor = processor
212211
self.tokenizer_max_length = 1024
213212

214213
self.prompt_template_encode = "<|im_start|>system\nDescribe the key features of the input image (color, shape, size, texture, objects, background), then explain how the user's text instruction should alter or modify the image. Generate a new image that meets the user's requirements while maintaining consistency with the original input where appropriate.<|im_end|>\n<|im_start|>user\n<|vision_start|><|image_pad|><|vision_end|>{}<|im_end|>\n<|im_start|>assistant\n"

0 commit comments

Comments
 (0)