mirror of
https://github.com/huggingface/diffusers.git
synced 2026-01-27 17:22:53 +03:00
638 lines
26 KiB
Python
638 lines
26 KiB
Python
import argparse
|
|
import os
|
|
import pathlib
|
|
from typing import Any, Dict
|
|
|
|
import torch
|
|
from accelerate import init_empty_weights
|
|
from huggingface_hub import hf_hub_download
|
|
from safetensors.torch import load_file
|
|
from transformers import AutoProcessor, AutoTokenizer, CLIPVisionModelWithProjection, UMT5EncoderModel
|
|
|
|
from diffusers import (
|
|
AutoencoderKLWan,
|
|
SkyReelsV2DiffusionForcingPipeline,
|
|
SkyReelsV2ImageToVideoPipeline,
|
|
SkyReelsV2Pipeline,
|
|
SkyReelsV2Transformer3DModel,
|
|
UniPCMultistepScheduler,
|
|
)
|
|
|
|
|
|
TRANSFORMER_KEYS_RENAME_DICT = {
|
|
"time_embedding.0": "condition_embedder.time_embedder.linear_1",
|
|
"time_embedding.2": "condition_embedder.time_embedder.linear_2",
|
|
"text_embedding.0": "condition_embedder.text_embedder.linear_1",
|
|
"text_embedding.2": "condition_embedder.text_embedder.linear_2",
|
|
"time_projection.1": "condition_embedder.time_proj",
|
|
"head.modulation": "scale_shift_table",
|
|
"head.head": "proj_out",
|
|
"modulation": "scale_shift_table",
|
|
"ffn.0": "ffn.net.0.proj",
|
|
"ffn.2": "ffn.net.2",
|
|
"fps_projection.0": "fps_projection.net.0.proj",
|
|
"fps_projection.2": "fps_projection.net.2",
|
|
# Hack to swap the layer names
|
|
# The original model calls the norms in following order: norm1, norm3, norm2
|
|
# We convert it to: norm1, norm2, norm3
|
|
"norm2": "norm__placeholder",
|
|
"norm3": "norm2",
|
|
"norm__placeholder": "norm3",
|
|
# For the I2V model
|
|
"img_emb.proj.0": "condition_embedder.image_embedder.norm1",
|
|
"img_emb.proj.1": "condition_embedder.image_embedder.ff.net.0.proj",
|
|
"img_emb.proj.3": "condition_embedder.image_embedder.ff.net.2",
|
|
"img_emb.proj.4": "condition_embedder.image_embedder.norm2",
|
|
# for the FLF2V model
|
|
"img_emb.emb_pos": "condition_embedder.image_embedder.pos_embed",
|
|
# Add attention component mappings
|
|
"self_attn.q": "attn1.to_q",
|
|
"self_attn.k": "attn1.to_k",
|
|
"self_attn.v": "attn1.to_v",
|
|
"self_attn.o": "attn1.to_out.0",
|
|
"self_attn.norm_q": "attn1.norm_q",
|
|
"self_attn.norm_k": "attn1.norm_k",
|
|
"cross_attn.q": "attn2.to_q",
|
|
"cross_attn.k": "attn2.to_k",
|
|
"cross_attn.v": "attn2.to_v",
|
|
"cross_attn.o": "attn2.to_out.0",
|
|
"cross_attn.norm_q": "attn2.norm_q",
|
|
"cross_attn.norm_k": "attn2.norm_k",
|
|
"attn2.to_k_img": "attn2.add_k_proj",
|
|
"attn2.to_v_img": "attn2.add_v_proj",
|
|
"attn2.norm_k_img": "attn2.norm_added_k",
|
|
}
|
|
|
|
TRANSFORMER_SPECIAL_KEYS_REMAP = {}
|
|
|
|
|
|
def update_state_dict_(state_dict: Dict[str, Any], old_key: str, new_key: str) -> dict[str, Any]:
|
|
state_dict[new_key] = state_dict.pop(old_key)
|
|
|
|
|
|
def load_sharded_safetensors(dir: pathlib.Path):
|
|
if "720P" in str(dir):
|
|
file_paths = list(dir.glob("diffusion_pytorch_model*.safetensors"))
|
|
else:
|
|
file_paths = list(dir.glob("model*.safetensors"))
|
|
state_dict = {}
|
|
for path in file_paths:
|
|
state_dict.update(load_file(path))
|
|
return state_dict
|
|
|
|
|
|
def get_transformer_config(model_type: str) -> dict[str, Any]:
|
|
if model_type == "SkyReels-V2-DF-1.3B-540P":
|
|
config = {
|
|
"model_id": "Skywork/SkyReels-V2-DF-1.3B-540P",
|
|
"diffusers_config": {
|
|
"added_kv_proj_dim": None,
|
|
"attention_head_dim": 128,
|
|
"cross_attn_norm": True,
|
|
"eps": 1e-06,
|
|
"ffn_dim": 8960,
|
|
"freq_dim": 256,
|
|
"in_channels": 16,
|
|
"num_attention_heads": 12,
|
|
"inject_sample_info": True,
|
|
"num_layers": 30,
|
|
"out_channels": 16,
|
|
"patch_size": [1, 2, 2],
|
|
"qk_norm": "rms_norm_across_heads",
|
|
"text_dim": 4096,
|
|
},
|
|
}
|
|
elif model_type == "SkyReels-V2-DF-14B-720P":
|
|
config = {
|
|
"model_id": "Skywork/SkyReels-V2-DF-14B-720P",
|
|
"diffusers_config": {
|
|
"added_kv_proj_dim": None,
|
|
"attention_head_dim": 128,
|
|
"cross_attn_norm": True,
|
|
"eps": 1e-06,
|
|
"ffn_dim": 13824,
|
|
"freq_dim": 256,
|
|
"in_channels": 16,
|
|
"num_attention_heads": 40,
|
|
"inject_sample_info": False,
|
|
"num_layers": 40,
|
|
"out_channels": 16,
|
|
"patch_size": [1, 2, 2],
|
|
"qk_norm": "rms_norm_across_heads",
|
|
"text_dim": 4096,
|
|
},
|
|
}
|
|
elif model_type == "SkyReels-V2-DF-14B-540P":
|
|
config = {
|
|
"model_id": "Skywork/SkyReels-V2-DF-14B-540P",
|
|
"diffusers_config": {
|
|
"added_kv_proj_dim": None,
|
|
"attention_head_dim": 128,
|
|
"cross_attn_norm": True,
|
|
"eps": 1e-06,
|
|
"ffn_dim": 13824,
|
|
"freq_dim": 256,
|
|
"in_channels": 16,
|
|
"num_attention_heads": 40,
|
|
"inject_sample_info": False,
|
|
"num_layers": 40,
|
|
"out_channels": 16,
|
|
"patch_size": [1, 2, 2],
|
|
"qk_norm": "rms_norm_across_heads",
|
|
"text_dim": 4096,
|
|
},
|
|
}
|
|
elif model_type == "SkyReels-V2-T2V-14B-720P":
|
|
config = {
|
|
"model_id": "Skywork/SkyReels-V2-T2V-14B-720P",
|
|
"diffusers_config": {
|
|
"added_kv_proj_dim": None,
|
|
"attention_head_dim": 128,
|
|
"cross_attn_norm": True,
|
|
"eps": 1e-06,
|
|
"ffn_dim": 13824,
|
|
"freq_dim": 256,
|
|
"in_channels": 16,
|
|
"num_attention_heads": 40,
|
|
"inject_sample_info": False,
|
|
"num_layers": 40,
|
|
"out_channels": 16,
|
|
"patch_size": [1, 2, 2],
|
|
"qk_norm": "rms_norm_across_heads",
|
|
"text_dim": 4096,
|
|
},
|
|
}
|
|
elif model_type == "SkyReels-V2-T2V-14B-540P":
|
|
config = {
|
|
"model_id": "Skywork/SkyReels-V2-T2V-14B-540P",
|
|
"diffusers_config": {
|
|
"added_kv_proj_dim": None,
|
|
"attention_head_dim": 128,
|
|
"cross_attn_norm": True,
|
|
"eps": 1e-06,
|
|
"ffn_dim": 13824,
|
|
"freq_dim": 256,
|
|
"in_channels": 16,
|
|
"num_attention_heads": 40,
|
|
"inject_sample_info": False,
|
|
"num_layers": 40,
|
|
"out_channels": 16,
|
|
"patch_size": [1, 2, 2],
|
|
"qk_norm": "rms_norm_across_heads",
|
|
"text_dim": 4096,
|
|
},
|
|
}
|
|
elif model_type == "SkyReels-V2-I2V-1.3B-540P":
|
|
config = {
|
|
"model_id": "Skywork/SkyReels-V2-I2V-1.3B-540P",
|
|
"diffusers_config": {
|
|
"added_kv_proj_dim": 1536,
|
|
"attention_head_dim": 128,
|
|
"cross_attn_norm": True,
|
|
"eps": 1e-06,
|
|
"ffn_dim": 8960,
|
|
"freq_dim": 256,
|
|
"in_channels": 36,
|
|
"num_attention_heads": 12,
|
|
"inject_sample_info": False,
|
|
"num_layers": 30,
|
|
"out_channels": 16,
|
|
"patch_size": [1, 2, 2],
|
|
"qk_norm": "rms_norm_across_heads",
|
|
"text_dim": 4096,
|
|
"image_dim": 1280,
|
|
},
|
|
}
|
|
elif model_type == "SkyReels-V2-I2V-14B-540P":
|
|
config = {
|
|
"model_id": "Skywork/SkyReels-V2-I2V-14B-540P",
|
|
"diffusers_config": {
|
|
"added_kv_proj_dim": 5120,
|
|
"attention_head_dim": 128,
|
|
"cross_attn_norm": True,
|
|
"eps": 1e-06,
|
|
"ffn_dim": 13824,
|
|
"freq_dim": 256,
|
|
"in_channels": 36,
|
|
"num_attention_heads": 40,
|
|
"inject_sample_info": False,
|
|
"num_layers": 40,
|
|
"out_channels": 16,
|
|
"patch_size": [1, 2, 2],
|
|
"qk_norm": "rms_norm_across_heads",
|
|
"text_dim": 4096,
|
|
"image_dim": 1280,
|
|
},
|
|
}
|
|
elif model_type == "SkyReels-V2-I2V-14B-720P":
|
|
config = {
|
|
"model_id": "Skywork/SkyReels-V2-I2V-14B-720P",
|
|
"diffusers_config": {
|
|
"added_kv_proj_dim": 5120,
|
|
"attention_head_dim": 128,
|
|
"cross_attn_norm": True,
|
|
"eps": 1e-06,
|
|
"ffn_dim": 13824,
|
|
"freq_dim": 256,
|
|
"in_channels": 36,
|
|
"num_attention_heads": 40,
|
|
"inject_sample_info": False,
|
|
"num_layers": 40,
|
|
"out_channels": 16,
|
|
"patch_size": [1, 2, 2],
|
|
"qk_norm": "rms_norm_across_heads",
|
|
"text_dim": 4096,
|
|
"image_dim": 1280,
|
|
},
|
|
}
|
|
elif model_type == "SkyReels-V2-FLF2V-1.3B-540P":
|
|
config = {
|
|
"model_id": "Skywork/SkyReels-V2-I2V-1.3B-540P",
|
|
"diffusers_config": {
|
|
"added_kv_proj_dim": 1536,
|
|
"attention_head_dim": 128,
|
|
"cross_attn_norm": True,
|
|
"eps": 1e-06,
|
|
"ffn_dim": 8960,
|
|
"freq_dim": 256,
|
|
"in_channels": 36,
|
|
"num_attention_heads": 12,
|
|
"inject_sample_info": False,
|
|
"num_layers": 30,
|
|
"out_channels": 16,
|
|
"patch_size": [1, 2, 2],
|
|
"qk_norm": "rms_norm_across_heads",
|
|
"text_dim": 4096,
|
|
"image_dim": 1280,
|
|
"pos_embed_seq_len": 514,
|
|
},
|
|
}
|
|
elif model_type == "SkyReels-V2-FLF2V-14B-540P":
|
|
config = {
|
|
"model_id": "Skywork/SkyReels-V2-I2V-14B-540P",
|
|
"diffusers_config": {
|
|
"added_kv_proj_dim": 5120,
|
|
"attention_head_dim": 128,
|
|
"cross_attn_norm": True,
|
|
"eps": 1e-06,
|
|
"ffn_dim": 13824,
|
|
"freq_dim": 256,
|
|
"in_channels": 36,
|
|
"num_attention_heads": 40,
|
|
"inject_sample_info": False,
|
|
"num_layers": 40,
|
|
"out_channels": 16,
|
|
"patch_size": [1, 2, 2],
|
|
"qk_norm": "rms_norm_across_heads",
|
|
"text_dim": 4096,
|
|
"image_dim": 1280,
|
|
"pos_embed_seq_len": 514,
|
|
},
|
|
}
|
|
elif model_type == "SkyReels-V2-FLF2V-14B-720P":
|
|
config = {
|
|
"model_id": "Skywork/SkyReels-V2-I2V-14B-720P",
|
|
"diffusers_config": {
|
|
"added_kv_proj_dim": 5120,
|
|
"attention_head_dim": 128,
|
|
"cross_attn_norm": True,
|
|
"eps": 1e-06,
|
|
"ffn_dim": 13824,
|
|
"freq_dim": 256,
|
|
"in_channels": 36,
|
|
"num_attention_heads": 40,
|
|
"inject_sample_info": False,
|
|
"num_layers": 40,
|
|
"out_channels": 16,
|
|
"patch_size": [1, 2, 2],
|
|
"qk_norm": "rms_norm_across_heads",
|
|
"text_dim": 4096,
|
|
"image_dim": 1280,
|
|
"pos_embed_seq_len": 514,
|
|
},
|
|
}
|
|
return config
|
|
|
|
|
|
def convert_transformer(model_type: str):
|
|
config = get_transformer_config(model_type)
|
|
diffusers_config = config["diffusers_config"]
|
|
model_id = config["model_id"]
|
|
|
|
if "1.3B" in model_type:
|
|
original_state_dict = load_file(hf_hub_download(model_id, "model.safetensors"))
|
|
else:
|
|
os.makedirs(model_type, exist_ok=True)
|
|
model_dir = pathlib.Path(model_type)
|
|
if "720P" in model_type:
|
|
top_shard = 7 if "I2V" in model_type else 6
|
|
zeros = "0" * (4 if "I2V" or "T2V" in model_type else 3)
|
|
model_name = "diffusion_pytorch_model"
|
|
elif "540P" in model_type:
|
|
top_shard = 14 if "I2V" in model_type else 12
|
|
model_name = "model"
|
|
|
|
for i in range(1, top_shard + 1):
|
|
shard_path = f"{model_name}-{i:05d}-of-{zeros}{top_shard}.safetensors"
|
|
hf_hub_download(model_id, shard_path, local_dir=model_dir)
|
|
original_state_dict = load_sharded_safetensors(model_dir)
|
|
|
|
with init_empty_weights():
|
|
transformer = SkyReelsV2Transformer3DModel.from_config(diffusers_config)
|
|
|
|
for key in list(original_state_dict.keys()):
|
|
new_key = key[:]
|
|
for replace_key, rename_key in TRANSFORMER_KEYS_RENAME_DICT.items():
|
|
new_key = new_key.replace(replace_key, rename_key)
|
|
update_state_dict_(original_state_dict, key, new_key)
|
|
|
|
for key in list(original_state_dict.keys()):
|
|
for special_key, handler_fn_inplace in TRANSFORMER_SPECIAL_KEYS_REMAP.items():
|
|
if special_key not in key:
|
|
continue
|
|
handler_fn_inplace(key, original_state_dict)
|
|
|
|
if "FLF2V" in model_type:
|
|
if (
|
|
hasattr(transformer.condition_embedder, "image_embedder")
|
|
and hasattr(transformer.condition_embedder.image_embedder, "pos_embed")
|
|
and transformer.condition_embedder.image_embedder.pos_embed is not None
|
|
):
|
|
pos_embed_shape = transformer.condition_embedder.image_embedder.pos_embed.shape
|
|
original_state_dict["condition_embedder.image_embedder.pos_embed"] = torch.zeros(pos_embed_shape)
|
|
|
|
transformer.load_state_dict(original_state_dict, strict=True, assign=True)
|
|
return transformer
|
|
|
|
|
|
def convert_vae():
|
|
vae_ckpt_path = hf_hub_download("Wan-AI/Wan2.1-T2V-14B", "Wan2.1_VAE.pth")
|
|
old_state_dict = torch.load(vae_ckpt_path, weights_only=True)
|
|
new_state_dict = {}
|
|
|
|
# Create mappings for specific components
|
|
middle_key_mapping = {
|
|
# Encoder middle block
|
|
"encoder.middle.0.residual.0.gamma": "encoder.mid_block.resnets.0.norm1.gamma",
|
|
"encoder.middle.0.residual.2.bias": "encoder.mid_block.resnets.0.conv1.bias",
|
|
"encoder.middle.0.residual.2.weight": "encoder.mid_block.resnets.0.conv1.weight",
|
|
"encoder.middle.0.residual.3.gamma": "encoder.mid_block.resnets.0.norm2.gamma",
|
|
"encoder.middle.0.residual.6.bias": "encoder.mid_block.resnets.0.conv2.bias",
|
|
"encoder.middle.0.residual.6.weight": "encoder.mid_block.resnets.0.conv2.weight",
|
|
"encoder.middle.2.residual.0.gamma": "encoder.mid_block.resnets.1.norm1.gamma",
|
|
"encoder.middle.2.residual.2.bias": "encoder.mid_block.resnets.1.conv1.bias",
|
|
"encoder.middle.2.residual.2.weight": "encoder.mid_block.resnets.1.conv1.weight",
|
|
"encoder.middle.2.residual.3.gamma": "encoder.mid_block.resnets.1.norm2.gamma",
|
|
"encoder.middle.2.residual.6.bias": "encoder.mid_block.resnets.1.conv2.bias",
|
|
"encoder.middle.2.residual.6.weight": "encoder.mid_block.resnets.1.conv2.weight",
|
|
# Decoder middle block
|
|
"decoder.middle.0.residual.0.gamma": "decoder.mid_block.resnets.0.norm1.gamma",
|
|
"decoder.middle.0.residual.2.bias": "decoder.mid_block.resnets.0.conv1.bias",
|
|
"decoder.middle.0.residual.2.weight": "decoder.mid_block.resnets.0.conv1.weight",
|
|
"decoder.middle.0.residual.3.gamma": "decoder.mid_block.resnets.0.norm2.gamma",
|
|
"decoder.middle.0.residual.6.bias": "decoder.mid_block.resnets.0.conv2.bias",
|
|
"decoder.middle.0.residual.6.weight": "decoder.mid_block.resnets.0.conv2.weight",
|
|
"decoder.middle.2.residual.0.gamma": "decoder.mid_block.resnets.1.norm1.gamma",
|
|
"decoder.middle.2.residual.2.bias": "decoder.mid_block.resnets.1.conv1.bias",
|
|
"decoder.middle.2.residual.2.weight": "decoder.mid_block.resnets.1.conv1.weight",
|
|
"decoder.middle.2.residual.3.gamma": "decoder.mid_block.resnets.1.norm2.gamma",
|
|
"decoder.middle.2.residual.6.bias": "decoder.mid_block.resnets.1.conv2.bias",
|
|
"decoder.middle.2.residual.6.weight": "decoder.mid_block.resnets.1.conv2.weight",
|
|
}
|
|
|
|
# Create a mapping for attention blocks
|
|
attention_mapping = {
|
|
# Encoder middle attention
|
|
"encoder.middle.1.norm.gamma": "encoder.mid_block.attentions.0.norm.gamma",
|
|
"encoder.middle.1.to_qkv.weight": "encoder.mid_block.attentions.0.to_qkv.weight",
|
|
"encoder.middle.1.to_qkv.bias": "encoder.mid_block.attentions.0.to_qkv.bias",
|
|
"encoder.middle.1.proj.weight": "encoder.mid_block.attentions.0.proj.weight",
|
|
"encoder.middle.1.proj.bias": "encoder.mid_block.attentions.0.proj.bias",
|
|
# Decoder middle attention
|
|
"decoder.middle.1.norm.gamma": "decoder.mid_block.attentions.0.norm.gamma",
|
|
"decoder.middle.1.to_qkv.weight": "decoder.mid_block.attentions.0.to_qkv.weight",
|
|
"decoder.middle.1.to_qkv.bias": "decoder.mid_block.attentions.0.to_qkv.bias",
|
|
"decoder.middle.1.proj.weight": "decoder.mid_block.attentions.0.proj.weight",
|
|
"decoder.middle.1.proj.bias": "decoder.mid_block.attentions.0.proj.bias",
|
|
}
|
|
|
|
# Create a mapping for the head components
|
|
head_mapping = {
|
|
# Encoder head
|
|
"encoder.head.0.gamma": "encoder.norm_out.gamma",
|
|
"encoder.head.2.bias": "encoder.conv_out.bias",
|
|
"encoder.head.2.weight": "encoder.conv_out.weight",
|
|
# Decoder head
|
|
"decoder.head.0.gamma": "decoder.norm_out.gamma",
|
|
"decoder.head.2.bias": "decoder.conv_out.bias",
|
|
"decoder.head.2.weight": "decoder.conv_out.weight",
|
|
}
|
|
|
|
# Create a mapping for the quant components
|
|
quant_mapping = {
|
|
"conv1.weight": "quant_conv.weight",
|
|
"conv1.bias": "quant_conv.bias",
|
|
"conv2.weight": "post_quant_conv.weight",
|
|
"conv2.bias": "post_quant_conv.bias",
|
|
}
|
|
|
|
# Process each key in the state dict
|
|
for key, value in old_state_dict.items():
|
|
# Handle middle block keys using the mapping
|
|
if key in middle_key_mapping:
|
|
new_key = middle_key_mapping[key]
|
|
new_state_dict[new_key] = value
|
|
# Handle attention blocks using the mapping
|
|
elif key in attention_mapping:
|
|
new_key = attention_mapping[key]
|
|
new_state_dict[new_key] = value
|
|
# Handle head keys using the mapping
|
|
elif key in head_mapping:
|
|
new_key = head_mapping[key]
|
|
new_state_dict[new_key] = value
|
|
# Handle quant keys using the mapping
|
|
elif key in quant_mapping:
|
|
new_key = quant_mapping[key]
|
|
new_state_dict[new_key] = value
|
|
# Handle encoder conv1
|
|
elif key == "encoder.conv1.weight":
|
|
new_state_dict["encoder.conv_in.weight"] = value
|
|
elif key == "encoder.conv1.bias":
|
|
new_state_dict["encoder.conv_in.bias"] = value
|
|
# Handle decoder conv1
|
|
elif key == "decoder.conv1.weight":
|
|
new_state_dict["decoder.conv_in.weight"] = value
|
|
elif key == "decoder.conv1.bias":
|
|
new_state_dict["decoder.conv_in.bias"] = value
|
|
# Handle encoder downsamples
|
|
elif key.startswith("encoder.downsamples."):
|
|
# Convert to down_blocks
|
|
new_key = key.replace("encoder.downsamples.", "encoder.down_blocks.")
|
|
|
|
# Convert residual block naming but keep the original structure
|
|
if ".residual.0.gamma" in new_key:
|
|
new_key = new_key.replace(".residual.0.gamma", ".norm1.gamma")
|
|
elif ".residual.2.bias" in new_key:
|
|
new_key = new_key.replace(".residual.2.bias", ".conv1.bias")
|
|
elif ".residual.2.weight" in new_key:
|
|
new_key = new_key.replace(".residual.2.weight", ".conv1.weight")
|
|
elif ".residual.3.gamma" in new_key:
|
|
new_key = new_key.replace(".residual.3.gamma", ".norm2.gamma")
|
|
elif ".residual.6.bias" in new_key:
|
|
new_key = new_key.replace(".residual.6.bias", ".conv2.bias")
|
|
elif ".residual.6.weight" in new_key:
|
|
new_key = new_key.replace(".residual.6.weight", ".conv2.weight")
|
|
elif ".shortcut.bias" in new_key:
|
|
new_key = new_key.replace(".shortcut.bias", ".conv_shortcut.bias")
|
|
elif ".shortcut.weight" in new_key:
|
|
new_key = new_key.replace(".shortcut.weight", ".conv_shortcut.weight")
|
|
|
|
new_state_dict[new_key] = value
|
|
|
|
# Handle decoder upsamples
|
|
elif key.startswith("decoder.upsamples."):
|
|
# Convert to up_blocks
|
|
parts = key.split(".")
|
|
block_idx = int(parts[2])
|
|
|
|
# Group residual blocks
|
|
if "residual" in key:
|
|
if block_idx in [0, 1, 2]:
|
|
new_block_idx = 0
|
|
resnet_idx = block_idx
|
|
elif block_idx in [4, 5, 6]:
|
|
new_block_idx = 1
|
|
resnet_idx = block_idx - 4
|
|
elif block_idx in [8, 9, 10]:
|
|
new_block_idx = 2
|
|
resnet_idx = block_idx - 8
|
|
elif block_idx in [12, 13, 14]:
|
|
new_block_idx = 3
|
|
resnet_idx = block_idx - 12
|
|
else:
|
|
# Keep as is for other blocks
|
|
new_state_dict[key] = value
|
|
continue
|
|
|
|
# Convert residual block naming
|
|
if ".residual.0.gamma" in key:
|
|
new_key = f"decoder.up_blocks.{new_block_idx}.resnets.{resnet_idx}.norm1.gamma"
|
|
elif ".residual.2.bias" in key:
|
|
new_key = f"decoder.up_blocks.{new_block_idx}.resnets.{resnet_idx}.conv1.bias"
|
|
elif ".residual.2.weight" in key:
|
|
new_key = f"decoder.up_blocks.{new_block_idx}.resnets.{resnet_idx}.conv1.weight"
|
|
elif ".residual.3.gamma" in key:
|
|
new_key = f"decoder.up_blocks.{new_block_idx}.resnets.{resnet_idx}.norm2.gamma"
|
|
elif ".residual.6.bias" in key:
|
|
new_key = f"decoder.up_blocks.{new_block_idx}.resnets.{resnet_idx}.conv2.bias"
|
|
elif ".residual.6.weight" in key:
|
|
new_key = f"decoder.up_blocks.{new_block_idx}.resnets.{resnet_idx}.conv2.weight"
|
|
else:
|
|
new_key = key
|
|
|
|
new_state_dict[new_key] = value
|
|
|
|
# Handle shortcut connections
|
|
elif ".shortcut." in key:
|
|
if block_idx == 4:
|
|
new_key = key.replace(".shortcut.", ".resnets.0.conv_shortcut.")
|
|
new_key = new_key.replace("decoder.upsamples.4", "decoder.up_blocks.1")
|
|
else:
|
|
new_key = key.replace("decoder.upsamples.", "decoder.up_blocks.")
|
|
new_key = new_key.replace(".shortcut.", ".conv_shortcut.")
|
|
|
|
new_state_dict[new_key] = value
|
|
|
|
# Handle upsamplers
|
|
elif ".resample." in key or ".time_conv." in key:
|
|
if block_idx == 3:
|
|
new_key = key.replace(f"decoder.upsamples.{block_idx}", "decoder.up_blocks.0.upsamplers.0")
|
|
elif block_idx == 7:
|
|
new_key = key.replace(f"decoder.upsamples.{block_idx}", "decoder.up_blocks.1.upsamplers.0")
|
|
elif block_idx == 11:
|
|
new_key = key.replace(f"decoder.upsamples.{block_idx}", "decoder.up_blocks.2.upsamplers.0")
|
|
else:
|
|
new_key = key.replace("decoder.upsamples.", "decoder.up_blocks.")
|
|
|
|
new_state_dict[new_key] = value
|
|
else:
|
|
new_key = key.replace("decoder.upsamples.", "decoder.up_blocks.")
|
|
new_state_dict[new_key] = value
|
|
else:
|
|
# Keep other keys unchanged
|
|
new_state_dict[key] = value
|
|
|
|
with init_empty_weights():
|
|
vae = AutoencoderKLWan()
|
|
vae.load_state_dict(new_state_dict, strict=True, assign=True)
|
|
return vae
|
|
|
|
|
|
def get_args():
|
|
parser = argparse.ArgumentParser()
|
|
parser.add_argument("--model_type", type=str, default=None)
|
|
parser.add_argument("--output_path", type=str, required=True)
|
|
parser.add_argument("--dtype", default="fp32")
|
|
return parser.parse_args()
|
|
|
|
|
|
DTYPE_MAPPING = {
|
|
"fp32": torch.float32,
|
|
"fp16": torch.float16,
|
|
"bf16": torch.bfloat16,
|
|
}
|
|
|
|
|
|
if __name__ == "__main__":
|
|
args = get_args()
|
|
|
|
transformer = None
|
|
dtype = DTYPE_MAPPING[args.dtype]
|
|
|
|
transformer = convert_transformer(args.model_type).to(dtype=dtype)
|
|
vae = convert_vae()
|
|
text_encoder = UMT5EncoderModel.from_pretrained("google/umt5-xxl")
|
|
tokenizer = AutoTokenizer.from_pretrained("google/umt5-xxl")
|
|
scheduler = UniPCMultistepScheduler(
|
|
prediction_type="flow_prediction",
|
|
num_train_timesteps=1000,
|
|
use_flow_sigmas=True,
|
|
)
|
|
|
|
if "I2V" in args.model_type or "FLF2V" in args.model_type:
|
|
image_encoder = CLIPVisionModelWithProjection.from_pretrained("laion/CLIP-ViT-H-14-laion2B-s32B-b79K")
|
|
image_processor = AutoProcessor.from_pretrained("laion/CLIP-ViT-H-14-laion2B-s32B-b79K")
|
|
pipe = SkyReelsV2ImageToVideoPipeline(
|
|
transformer=transformer,
|
|
text_encoder=text_encoder,
|
|
tokenizer=tokenizer,
|
|
vae=vae,
|
|
scheduler=scheduler,
|
|
image_encoder=image_encoder,
|
|
image_processor=image_processor,
|
|
)
|
|
elif "T2V" in args.model_type:
|
|
pipe = SkyReelsV2Pipeline(
|
|
transformer=transformer,
|
|
text_encoder=text_encoder,
|
|
tokenizer=tokenizer,
|
|
vae=vae,
|
|
scheduler=scheduler,
|
|
)
|
|
elif "DF" in args.model_type:
|
|
pipe = SkyReelsV2DiffusionForcingPipeline(
|
|
transformer=transformer,
|
|
text_encoder=text_encoder,
|
|
tokenizer=tokenizer,
|
|
vae=vae,
|
|
scheduler=scheduler,
|
|
)
|
|
|
|
pipe.save_pretrained(
|
|
args.output_path,
|
|
safe_serialization=True,
|
|
max_shard_size="5GB",
|
|
# push_to_hub=True,
|
|
# repo_id=f"<place_holder>/{args.model_type}-Diffusers",
|
|
)
|