| { | |
| "metadata": { | |
| "total_size": 2409249296, | |
| "total_parameters": 602199429 | |
| }, | |
| "weight_map": { | |
| "audio_codec.decoder.blocks.0.block_0.alpha": "model.safetensors", | |
| "audio_codec.decoder.blocks.0.block_1.bias": "model.safetensors", | |
| "audio_codec.decoder.blocks.0.block_1.weight_g": "model.safetensors", | |
| "audio_codec.decoder.blocks.0.block_1.weight_v": "model.safetensors", | |
| "audio_codec.decoder.blocks.0.block_11.bias": "model.safetensors", | |
| "audio_codec.decoder.blocks.0.block_11.weight": "model.safetensors", | |
| "audio_codec.decoder.blocks.0.block_3.bias": "model.safetensors", | |
| "audio_codec.decoder.blocks.0.block_3.weight": "model.safetensors", | |
| "audio_codec.decoder.blocks.0.block_4.act1.alpha": "model.safetensors", | |
| "audio_codec.decoder.blocks.0.block_4.act2.alpha": "model.safetensors", | |
| "audio_codec.decoder.blocks.0.block_4.conv1.bias": "model.safetensors", | |
| "audio_codec.decoder.blocks.0.block_4.conv1.weight_g": "model.safetensors", | |
| "audio_codec.decoder.blocks.0.block_4.conv1.weight_v": "model.safetensors", | |
| "audio_codec.decoder.blocks.0.block_4.conv2.bias": "model.safetensors", | |
| "audio_codec.decoder.blocks.0.block_4.conv2.weight_g": "model.safetensors", | |
| "audio_codec.decoder.blocks.0.block_4.conv2.weight_v": "model.safetensors", | |
| "audio_codec.decoder.blocks.0.block_5.act1.alpha": "model.safetensors", | |
| "audio_codec.decoder.blocks.0.block_5.act2.alpha": "model.safetensors", | |
| "audio_codec.decoder.blocks.0.block_5.conv1.bias": "model.safetensors", | |
| "audio_codec.decoder.blocks.0.block_5.conv1.weight_g": "model.safetensors", | |
| "audio_codec.decoder.blocks.0.block_5.conv1.weight_v": "model.safetensors", | |
| "audio_codec.decoder.blocks.0.block_5.conv2.bias": "model.safetensors", | |
| "audio_codec.decoder.blocks.0.block_5.conv2.weight_g": "model.safetensors", | |
| "audio_codec.decoder.blocks.0.block_5.conv2.weight_v": "model.safetensors", | |
| "audio_codec.decoder.blocks.0.block_6.conv1.bias": "model.safetensors", | |
| "audio_codec.decoder.blocks.0.block_6.conv1.weight": "model.safetensors", | |
| "audio_codec.decoder.blocks.0.block_6.conv2.bias": "model.safetensors", | |
| "audio_codec.decoder.blocks.0.block_6.conv2.weight": "model.safetensors", | |
| "audio_codec.decoder.blocks.0.block_7.conv1.bias": "model.safetensors", | |
| "audio_codec.decoder.blocks.0.block_7.conv1.weight": "model.safetensors", | |
| "audio_codec.decoder.blocks.0.block_7.conv2.bias": "model.safetensors", | |
| "audio_codec.decoder.blocks.0.block_7.conv2.weight": "model.safetensors", | |
| "audio_codec.decoder.blocks.0.block_8.act1.alpha": "model.safetensors", | |
| "audio_codec.decoder.blocks.0.block_8.act2.alpha": "model.safetensors", | |
| "audio_codec.decoder.blocks.0.block_8.conv1.bias": "model.safetensors", | |
| "audio_codec.decoder.blocks.0.block_8.conv1.weight_g": "model.safetensors", | |
| "audio_codec.decoder.blocks.0.block_8.conv1.weight_v": "model.safetensors", | |
| "audio_codec.decoder.blocks.0.block_8.conv2.bias": "model.safetensors", | |
| "audio_codec.decoder.blocks.0.block_8.conv2.weight_g": "model.safetensors", | |
| "audio_codec.decoder.blocks.0.block_8.conv2.weight_v": "model.safetensors", | |
| "audio_codec.decoder.blocks.1.block_0.alpha": "model.safetensors", | |
| "audio_codec.decoder.blocks.1.block_1.bias": "model.safetensors", | |
| "audio_codec.decoder.blocks.1.block_1.weight_g": "model.safetensors", | |
| "audio_codec.decoder.blocks.1.block_1.weight_v": "model.safetensors", | |
| "audio_codec.decoder.blocks.1.block_11.bias": "model.safetensors", | |
| "audio_codec.decoder.blocks.1.block_11.weight": "model.safetensors", | |
| "audio_codec.decoder.blocks.1.block_3.bias": "model.safetensors", | |
| "audio_codec.decoder.blocks.1.block_3.weight": "model.safetensors", | |
| "audio_codec.decoder.blocks.1.block_4.act1.alpha": "model.safetensors", | |
| "audio_codec.decoder.blocks.1.block_4.act2.alpha": "model.safetensors", | |
| "audio_codec.decoder.blocks.1.block_4.conv1.bias": "model.safetensors", | |
| "audio_codec.decoder.blocks.1.block_4.conv1.weight_g": "model.safetensors", | |
| "audio_codec.decoder.blocks.1.block_4.conv1.weight_v": "model.safetensors", | |
| "audio_codec.decoder.blocks.1.block_4.conv2.bias": "model.safetensors", | |
| "audio_codec.decoder.blocks.1.block_4.conv2.weight_g": "model.safetensors", | |
| "audio_codec.decoder.blocks.1.block_4.conv2.weight_v": "model.safetensors", | |
| "audio_codec.decoder.blocks.1.block_5.act1.alpha": "model.safetensors", | |
| "audio_codec.decoder.blocks.1.block_5.act2.alpha": "model.safetensors", | |
| "audio_codec.decoder.blocks.1.block_5.conv1.bias": "model.safetensors", | |
| "audio_codec.decoder.blocks.1.block_5.conv1.weight_g": "model.safetensors", | |
| "audio_codec.decoder.blocks.1.block_5.conv1.weight_v": "model.safetensors", | |
| "audio_codec.decoder.blocks.1.block_5.conv2.bias": "model.safetensors", | |
| "audio_codec.decoder.blocks.1.block_5.conv2.weight_g": "model.safetensors", | |
| "audio_codec.decoder.blocks.1.block_5.conv2.weight_v": "model.safetensors", | |
| "audio_codec.decoder.blocks.1.block_6.conv1.bias": "model.safetensors", | |
| "audio_codec.decoder.blocks.1.block_6.conv1.weight": "model.safetensors", | |
| "audio_codec.decoder.blocks.1.block_6.conv2.bias": "model.safetensors", | |
| "audio_codec.decoder.blocks.1.block_6.conv2.weight": "model.safetensors", | |
| "audio_codec.decoder.blocks.1.block_7.conv1.bias": "model.safetensors", | |
| "audio_codec.decoder.blocks.1.block_7.conv1.weight": "model.safetensors", | |
| "audio_codec.decoder.blocks.1.block_7.conv2.bias": "model.safetensors", | |
| "audio_codec.decoder.blocks.1.block_7.conv2.weight": "model.safetensors", | |
| "audio_codec.decoder.blocks.1.block_8.act1.alpha": "model.safetensors", | |
| "audio_codec.decoder.blocks.1.block_8.act2.alpha": "model.safetensors", | |
| "audio_codec.decoder.blocks.1.block_8.conv1.bias": "model.safetensors", | |
| "audio_codec.decoder.blocks.1.block_8.conv1.weight_g": "model.safetensors", | |
| "audio_codec.decoder.blocks.1.block_8.conv1.weight_v": "model.safetensors", | |
| "audio_codec.decoder.blocks.1.block_8.conv2.bias": "model.safetensors", | |
| "audio_codec.decoder.blocks.1.block_8.conv2.weight_g": "model.safetensors", | |
| "audio_codec.decoder.blocks.1.block_8.conv2.weight_v": "model.safetensors", | |
| "audio_codec.decoder.blocks.2.block_0.alpha": "model.safetensors", | |
| "audio_codec.decoder.blocks.2.block_1.bias": "model.safetensors", | |
| "audio_codec.decoder.blocks.2.block_1.weight_g": "model.safetensors", | |
| "audio_codec.decoder.blocks.2.block_1.weight_v": "model.safetensors", | |
| "audio_codec.decoder.blocks.2.block_11.bias": "model.safetensors", | |
| "audio_codec.decoder.blocks.2.block_11.weight": "model.safetensors", | |
| "audio_codec.decoder.blocks.2.block_3.bias": "model.safetensors", | |
| "audio_codec.decoder.blocks.2.block_3.weight": "model.safetensors", | |
| "audio_codec.decoder.blocks.2.block_4.act1.alpha": "model.safetensors", | |
| "audio_codec.decoder.blocks.2.block_4.act2.alpha": "model.safetensors", | |
| "audio_codec.decoder.blocks.2.block_4.conv1.bias": "model.safetensors", | |
| "audio_codec.decoder.blocks.2.block_4.conv1.weight_g": "model.safetensors", | |
| "audio_codec.decoder.blocks.2.block_4.conv1.weight_v": "model.safetensors", | |
| "audio_codec.decoder.blocks.2.block_4.conv2.bias": "model.safetensors", | |
| "audio_codec.decoder.blocks.2.block_4.conv2.weight_g": "model.safetensors", | |
| "audio_codec.decoder.blocks.2.block_4.conv2.weight_v": "model.safetensors", | |
| "audio_codec.decoder.blocks.2.block_5.act1.alpha": "model.safetensors", | |
| "audio_codec.decoder.blocks.2.block_5.act2.alpha": "model.safetensors", | |
| "audio_codec.decoder.blocks.2.block_5.conv1.bias": "model.safetensors", | |
| "audio_codec.decoder.blocks.2.block_5.conv1.weight_g": "model.safetensors", | |
| "audio_codec.decoder.blocks.2.block_5.conv1.weight_v": "model.safetensors", | |
| "audio_codec.decoder.blocks.2.block_5.conv2.bias": "model.safetensors", | |
| "audio_codec.decoder.blocks.2.block_5.conv2.weight_g": "model.safetensors", | |
| "audio_codec.decoder.blocks.2.block_5.conv2.weight_v": "model.safetensors", | |
| "audio_codec.decoder.blocks.2.block_6.conv1.bias": "model.safetensors", | |
| "audio_codec.decoder.blocks.2.block_6.conv1.weight": "model.safetensors", | |
| "audio_codec.decoder.blocks.2.block_6.conv2.bias": "model.safetensors", | |
| "audio_codec.decoder.blocks.2.block_6.conv2.weight": "model.safetensors", | |
| "audio_codec.decoder.blocks.2.block_7.conv1.bias": "model.safetensors", | |
| "audio_codec.decoder.blocks.2.block_7.conv1.weight": "model.safetensors", | |
| "audio_codec.decoder.blocks.2.block_7.conv2.bias": "model.safetensors", | |
| "audio_codec.decoder.blocks.2.block_7.conv2.weight": "model.safetensors", | |
| "audio_codec.decoder.blocks.2.block_8.act1.alpha": "model.safetensors", | |
| "audio_codec.decoder.blocks.2.block_8.act2.alpha": "model.safetensors", | |
| "audio_codec.decoder.blocks.2.block_8.conv1.bias": "model.safetensors", | |
| "audio_codec.decoder.blocks.2.block_8.conv1.weight_g": "model.safetensors", | |
| "audio_codec.decoder.blocks.2.block_8.conv1.weight_v": "model.safetensors", | |
| "audio_codec.decoder.blocks.2.block_8.conv2.bias": "model.safetensors", | |
| "audio_codec.decoder.blocks.2.block_8.conv2.weight_g": "model.safetensors", | |
| "audio_codec.decoder.blocks.2.block_8.conv2.weight_v": "model.safetensors", | |
| "audio_codec.decoder.blocks.3.block_0.alpha": "model.safetensors", | |
| "audio_codec.decoder.blocks.3.block_1.bias": "model.safetensors", | |
| "audio_codec.decoder.blocks.3.block_1.weight_g": "model.safetensors", | |
| "audio_codec.decoder.blocks.3.block_1.weight_v": "model.safetensors", | |
| "audio_codec.decoder.blocks.3.block_11.bias": "model.safetensors", | |
| "audio_codec.decoder.blocks.3.block_11.weight": "model.safetensors", | |
| "audio_codec.decoder.blocks.3.block_3.bias": "model.safetensors", | |
| "audio_codec.decoder.blocks.3.block_3.weight": "model.safetensors", | |
| "audio_codec.decoder.blocks.3.block_4.act1.alpha": "model.safetensors", | |
| "audio_codec.decoder.blocks.3.block_4.act2.alpha": "model.safetensors", | |
| "audio_codec.decoder.blocks.3.block_4.conv1.bias": "model.safetensors", | |
| "audio_codec.decoder.blocks.3.block_4.conv1.weight_g": "model.safetensors", | |
| "audio_codec.decoder.blocks.3.block_4.conv1.weight_v": "model.safetensors", | |
| "audio_codec.decoder.blocks.3.block_4.conv2.bias": "model.safetensors", | |
| "audio_codec.decoder.blocks.3.block_4.conv2.weight_g": "model.safetensors", | |
| "audio_codec.decoder.blocks.3.block_4.conv2.weight_v": "model.safetensors", | |
| "audio_codec.decoder.blocks.3.block_5.act1.alpha": "model.safetensors", | |
| "audio_codec.decoder.blocks.3.block_5.act2.alpha": "model.safetensors", | |
| "audio_codec.decoder.blocks.3.block_5.conv1.bias": "model.safetensors", | |
| "audio_codec.decoder.blocks.3.block_5.conv1.weight_g": "model.safetensors", | |
| "audio_codec.decoder.blocks.3.block_5.conv1.weight_v": "model.safetensors", | |
| "audio_codec.decoder.blocks.3.block_5.conv2.bias": "model.safetensors", | |
| "audio_codec.decoder.blocks.3.block_5.conv2.weight_g": "model.safetensors", | |
| "audio_codec.decoder.blocks.3.block_5.conv2.weight_v": "model.safetensors", | |
| "audio_codec.decoder.blocks.3.block_6.conv1.bias": "model.safetensors", | |
| "audio_codec.decoder.blocks.3.block_6.conv1.weight": "model.safetensors", | |
| "audio_codec.decoder.blocks.3.block_6.conv2.bias": "model.safetensors", | |
| "audio_codec.decoder.blocks.3.block_6.conv2.weight": "model.safetensors", | |
| "audio_codec.decoder.blocks.3.block_7.conv1.bias": "model.safetensors", | |
| "audio_codec.decoder.blocks.3.block_7.conv1.weight": "model.safetensors", | |
| "audio_codec.decoder.blocks.3.block_7.conv2.bias": "model.safetensors", | |
| "audio_codec.decoder.blocks.3.block_7.conv2.weight": "model.safetensors", | |
| "audio_codec.decoder.blocks.3.block_8.act1.alpha": "model.safetensors", | |
| "audio_codec.decoder.blocks.3.block_8.act2.alpha": "model.safetensors", | |
| "audio_codec.decoder.blocks.3.block_8.conv1.bias": "model.safetensors", | |
| "audio_codec.decoder.blocks.3.block_8.conv1.weight_g": "model.safetensors", | |
| "audio_codec.decoder.blocks.3.block_8.conv1.weight_v": "model.safetensors", | |
| "audio_codec.decoder.blocks.3.block_8.conv2.bias": "model.safetensors", | |
| "audio_codec.decoder.blocks.3.block_8.conv2.weight_g": "model.safetensors", | |
| "audio_codec.decoder.blocks.3.block_8.conv2.weight_v": "model.safetensors", | |
| "audio_codec.decoder.conv_in.bias": "model.safetensors", | |
| "audio_codec.decoder.conv_in.weight_g": "model.safetensors", | |
| "audio_codec.decoder.conv_in.weight_v": "model.safetensors", | |
| "audio_codec.decoder.conv_out.bias": "model.safetensors", | |
| "audio_codec.decoder.conv_out.weight_g": "model.safetensors", | |
| "audio_codec.decoder.conv_out.weight_v": "model.safetensors", | |
| "audio_codec.decoder.snake_out.alpha": "model.safetensors", | |
| "audio_codec.decoder.wm_model.decoder_block.post_1.bias": "model.safetensors", | |
| "audio_codec.decoder.wm_model.decoder_block.post_1.weight": "model.safetensors", | |
| "audio_codec.decoder.wm_model.decoder_block.pre_0.bias": "model.safetensors", | |
| "audio_codec.decoder.wm_model.decoder_block.pre_0.weight": "model.safetensors", | |
| "audio_codec.decoder.wm_model.decoder_block.pre_1.lstm.layers.0.Wh": "model.safetensors", | |
| "audio_codec.decoder.wm_model.decoder_block.pre_1.lstm.layers.0.Wx": "model.safetensors", | |
| "audio_codec.decoder.wm_model.decoder_block.pre_1.lstm.layers.0.bias": "model.safetensors", | |
| "audio_codec.decoder.wm_model.decoder_block.pre_1.lstm.layers.1.Wh": "model.safetensors", | |
| "audio_codec.decoder.wm_model.decoder_block.pre_1.lstm.layers.1.Wx": "model.safetensors", | |
| "audio_codec.decoder.wm_model.decoder_block.pre_1.lstm.layers.1.bias": "model.safetensors", | |
| "audio_codec.decoder.wm_model.encoder_block.post_0.lstm.layers.0.Wh": "model.safetensors", | |
| "audio_codec.decoder.wm_model.encoder_block.post_0.lstm.layers.0.Wx": "model.safetensors", | |
| "audio_codec.decoder.wm_model.encoder_block.post_0.lstm.layers.0.bias": "model.safetensors", | |
| "audio_codec.decoder.wm_model.encoder_block.post_0.lstm.layers.1.Wh": "model.safetensors", | |
| "audio_codec.decoder.wm_model.encoder_block.post_0.lstm.layers.1.Wx": "model.safetensors", | |
| "audio_codec.decoder.wm_model.encoder_block.post_0.lstm.layers.1.bias": "model.safetensors", | |
| "audio_codec.decoder.wm_model.encoder_block.post_2.bias": "model.safetensors", | |
| "audio_codec.decoder.wm_model.encoder_block.post_2.weight": "model.safetensors", | |
| "audio_codec.decoder.wm_model.encoder_block.pre_3.bias": "model.safetensors", | |
| "audio_codec.decoder.wm_model.encoder_block.pre_3.weight": "model.safetensors", | |
| "audio_codec.decoder.wm_model.msg_processor.msg_processor.weight": "model.safetensors", | |
| "audio_codec.encoder.blocks.0.conv.bias": "model.safetensors", | |
| "audio_codec.encoder.blocks.0.conv.weight_g": "model.safetensors", | |
| "audio_codec.encoder.blocks.0.conv.weight_v": "model.safetensors", | |
| "audio_codec.encoder.blocks.0.res1.act1.alpha": "model.safetensors", | |
| "audio_codec.encoder.blocks.0.res1.act2.alpha": "model.safetensors", | |
| "audio_codec.encoder.blocks.0.res1.conv1.bias": "model.safetensors", | |
| "audio_codec.encoder.blocks.0.res1.conv1.weight_g": "model.safetensors", | |
| "audio_codec.encoder.blocks.0.res1.conv1.weight_v": "model.safetensors", | |
| "audio_codec.encoder.blocks.0.res1.conv2.bias": "model.safetensors", | |
| "audio_codec.encoder.blocks.0.res1.conv2.weight_g": "model.safetensors", | |
| "audio_codec.encoder.blocks.0.res1.conv2.weight_v": "model.safetensors", | |
| "audio_codec.encoder.blocks.0.res2.act1.alpha": "model.safetensors", | |
| "audio_codec.encoder.blocks.0.res2.act2.alpha": "model.safetensors", | |
| "audio_codec.encoder.blocks.0.res2.conv1.bias": "model.safetensors", | |
| "audio_codec.encoder.blocks.0.res2.conv1.weight_g": "model.safetensors", | |
| "audio_codec.encoder.blocks.0.res2.conv1.weight_v": "model.safetensors", | |
| "audio_codec.encoder.blocks.0.res2.conv2.bias": "model.safetensors", | |
| "audio_codec.encoder.blocks.0.res2.conv2.weight_g": "model.safetensors", | |
| "audio_codec.encoder.blocks.0.res2.conv2.weight_v": "model.safetensors", | |
| "audio_codec.encoder.blocks.0.res3.act1.alpha": "model.safetensors", | |
| "audio_codec.encoder.blocks.0.res3.act2.alpha": "model.safetensors", | |
| "audio_codec.encoder.blocks.0.res3.conv1.bias": "model.safetensors", | |
| "audio_codec.encoder.blocks.0.res3.conv1.weight_g": "model.safetensors", | |
| "audio_codec.encoder.blocks.0.res3.conv1.weight_v": "model.safetensors", | |
| "audio_codec.encoder.blocks.0.res3.conv2.bias": "model.safetensors", | |
| "audio_codec.encoder.blocks.0.res3.conv2.weight_g": "model.safetensors", | |
| "audio_codec.encoder.blocks.0.res3.conv2.weight_v": "model.safetensors", | |
| "audio_codec.encoder.blocks.0.snake.alpha": "model.safetensors", | |
| "audio_codec.encoder.blocks.1.conv.bias": "model.safetensors", | |
| "audio_codec.encoder.blocks.1.conv.weight_g": "model.safetensors", | |
| "audio_codec.encoder.blocks.1.conv.weight_v": "model.safetensors", | |
| "audio_codec.encoder.blocks.1.res1.act1.alpha": "model.safetensors", | |
| "audio_codec.encoder.blocks.1.res1.act2.alpha": "model.safetensors", | |
| "audio_codec.encoder.blocks.1.res1.conv1.bias": "model.safetensors", | |
| "audio_codec.encoder.blocks.1.res1.conv1.weight_g": "model.safetensors", | |
| "audio_codec.encoder.blocks.1.res1.conv1.weight_v": "model.safetensors", | |
| "audio_codec.encoder.blocks.1.res1.conv2.bias": "model.safetensors", | |
| "audio_codec.encoder.blocks.1.res1.conv2.weight_g": "model.safetensors", | |
| "audio_codec.encoder.blocks.1.res1.conv2.weight_v": "model.safetensors", | |
| "audio_codec.encoder.blocks.1.res2.act1.alpha": "model.safetensors", | |
| "audio_codec.encoder.blocks.1.res2.act2.alpha": "model.safetensors", | |
| "audio_codec.encoder.blocks.1.res2.conv1.bias": "model.safetensors", | |
| "audio_codec.encoder.blocks.1.res2.conv1.weight_g": "model.safetensors", | |
| "audio_codec.encoder.blocks.1.res2.conv1.weight_v": "model.safetensors", | |
| "audio_codec.encoder.blocks.1.res2.conv2.bias": "model.safetensors", | |
| "audio_codec.encoder.blocks.1.res2.conv2.weight_g": "model.safetensors", | |
| "audio_codec.encoder.blocks.1.res2.conv2.weight_v": "model.safetensors", | |
| "audio_codec.encoder.blocks.1.res3.act1.alpha": "model.safetensors", | |
| "audio_codec.encoder.blocks.1.res3.act2.alpha": "model.safetensors", | |
| "audio_codec.encoder.blocks.1.res3.conv1.bias": "model.safetensors", | |
| "audio_codec.encoder.blocks.1.res3.conv1.weight_g": "model.safetensors", | |
| "audio_codec.encoder.blocks.1.res3.conv1.weight_v": "model.safetensors", | |
| "audio_codec.encoder.blocks.1.res3.conv2.bias": "model.safetensors", | |
| "audio_codec.encoder.blocks.1.res3.conv2.weight_g": "model.safetensors", | |
| "audio_codec.encoder.blocks.1.res3.conv2.weight_v": "model.safetensors", | |
| "audio_codec.encoder.blocks.1.snake.alpha": "model.safetensors", | |
| "audio_codec.encoder.blocks.2.conv.bias": "model.safetensors", | |
| "audio_codec.encoder.blocks.2.conv.weight_g": "model.safetensors", | |
| "audio_codec.encoder.blocks.2.conv.weight_v": "model.safetensors", | |
| "audio_codec.encoder.blocks.2.res1.act1.alpha": "model.safetensors", | |
| "audio_codec.encoder.blocks.2.res1.act2.alpha": "model.safetensors", | |
| "audio_codec.encoder.blocks.2.res1.conv1.bias": "model.safetensors", | |
| "audio_codec.encoder.blocks.2.res1.conv1.weight_g": "model.safetensors", | |
| "audio_codec.encoder.blocks.2.res1.conv1.weight_v": "model.safetensors", | |
| "audio_codec.encoder.blocks.2.res1.conv2.bias": "model.safetensors", | |
| "audio_codec.encoder.blocks.2.res1.conv2.weight_g": "model.safetensors", | |
| "audio_codec.encoder.blocks.2.res1.conv2.weight_v": "model.safetensors", | |
| "audio_codec.encoder.blocks.2.res2.act1.alpha": "model.safetensors", | |
| "audio_codec.encoder.blocks.2.res2.act2.alpha": "model.safetensors", | |
| "audio_codec.encoder.blocks.2.res2.conv1.bias": "model.safetensors", | |
| "audio_codec.encoder.blocks.2.res2.conv1.weight_g": "model.safetensors", | |
| "audio_codec.encoder.blocks.2.res2.conv1.weight_v": "model.safetensors", | |
| "audio_codec.encoder.blocks.2.res2.conv2.bias": "model.safetensors", | |
| "audio_codec.encoder.blocks.2.res2.conv2.weight_g": "model.safetensors", | |
| "audio_codec.encoder.blocks.2.res2.conv2.weight_v": "model.safetensors", | |
| "audio_codec.encoder.blocks.2.res3.act1.alpha": "model.safetensors", | |
| "audio_codec.encoder.blocks.2.res3.act2.alpha": "model.safetensors", | |
| "audio_codec.encoder.blocks.2.res3.conv1.bias": "model.safetensors", | |
| "audio_codec.encoder.blocks.2.res3.conv1.weight_g": "model.safetensors", | |
| "audio_codec.encoder.blocks.2.res3.conv1.weight_v": "model.safetensors", | |
| "audio_codec.encoder.blocks.2.res3.conv2.bias": "model.safetensors", | |
| "audio_codec.encoder.blocks.2.res3.conv2.weight_g": "model.safetensors", | |
| "audio_codec.encoder.blocks.2.res3.conv2.weight_v": "model.safetensors", | |
| "audio_codec.encoder.blocks.2.snake.alpha": "model.safetensors", | |
| "audio_codec.encoder.blocks.3.conv.bias": "model.safetensors", | |
| "audio_codec.encoder.blocks.3.conv.weight_g": "model.safetensors", | |
| "audio_codec.encoder.blocks.3.conv.weight_v": "model.safetensors", | |
| "audio_codec.encoder.blocks.3.res1.act1.alpha": "model.safetensors", | |
| "audio_codec.encoder.blocks.3.res1.act2.alpha": "model.safetensors", | |
| "audio_codec.encoder.blocks.3.res1.conv1.bias": "model.safetensors", | |
| "audio_codec.encoder.blocks.3.res1.conv1.weight_g": "model.safetensors", | |
| "audio_codec.encoder.blocks.3.res1.conv1.weight_v": "model.safetensors", | |
| "audio_codec.encoder.blocks.3.res1.conv2.bias": "model.safetensors", | |
| "audio_codec.encoder.blocks.3.res1.conv2.weight_g": "model.safetensors", | |
| "audio_codec.encoder.blocks.3.res1.conv2.weight_v": "model.safetensors", | |
| "audio_codec.encoder.blocks.3.res2.act1.alpha": "model.safetensors", | |
| "audio_codec.encoder.blocks.3.res2.act2.alpha": "model.safetensors", | |
| "audio_codec.encoder.blocks.3.res2.conv1.bias": "model.safetensors", | |
| "audio_codec.encoder.blocks.3.res2.conv1.weight_g": "model.safetensors", | |
| "audio_codec.encoder.blocks.3.res2.conv1.weight_v": "model.safetensors", | |
| "audio_codec.encoder.blocks.3.res2.conv2.bias": "model.safetensors", | |
| "audio_codec.encoder.blocks.3.res2.conv2.weight_g": "model.safetensors", | |
| "audio_codec.encoder.blocks.3.res2.conv2.weight_v": "model.safetensors", | |
| "audio_codec.encoder.blocks.3.res3.act1.alpha": "model.safetensors", | |
| "audio_codec.encoder.blocks.3.res3.act2.alpha": "model.safetensors", | |
| "audio_codec.encoder.blocks.3.res3.conv1.bias": "model.safetensors", | |
| "audio_codec.encoder.blocks.3.res3.conv1.weight_g": "model.safetensors", | |
| "audio_codec.encoder.blocks.3.res3.conv1.weight_v": "model.safetensors", | |
| "audio_codec.encoder.blocks.3.res3.conv2.bias": "model.safetensors", | |
| "audio_codec.encoder.blocks.3.res3.conv2.weight_g": "model.safetensors", | |
| "audio_codec.encoder.blocks.3.res3.conv2.weight_v": "model.safetensors", | |
| "audio_codec.encoder.blocks.3.snake.alpha": "model.safetensors", | |
| "audio_codec.encoder.conv_in.bias": "model.safetensors", | |
| "audio_codec.encoder.conv_in.weight_g": "model.safetensors", | |
| "audio_codec.encoder.conv_in.weight_v": "model.safetensors", | |
| "audio_codec.encoder.conv_out.bias": "model.safetensors", | |
| "audio_codec.encoder.conv_out.weight_g": "model.safetensors", | |
| "audio_codec.encoder.conv_out.weight_v": "model.safetensors", | |
| "audio_codec.encoder.snake_out.alpha": "model.safetensors", | |
| "audio_codec.quantizer_in_proj.bias": "model.safetensors", | |
| "audio_codec.quantizer_in_proj.weight_g": "model.safetensors", | |
| "audio_codec.quantizer_in_proj.weight_v": "model.safetensors", | |
| "audio_codec.quantizer_out_proj.bias": "model.safetensors", | |
| "audio_codec.quantizer_out_proj.weight_g": "model.safetensors", | |
| "audio_codec.quantizer_out_proj.weight_v": "model.safetensors", | |
| "embed_anchors.embed.weight": "model.safetensors", | |
| "embed_anchors.gate": "model.safetensors", | |
| "embed_anchors.proj.weight": "model.safetensors", | |
| "memory_proj.bias": "model.safetensors", | |
| "memory_proj.weight": "model.safetensors", | |
| "proj.bias": "model.safetensors", | |
| "proj.weight": "model.safetensors", | |
| "transformer.final_layer_scale_shift_table": "model.safetensors", | |
| "transformer.layers.0.attention.k_norm.weight": "model.safetensors", | |
| "transformer.layers.0.attention.q_norm.weight": "model.safetensors", | |
| "transformer.layers.0.attention.wk.weight": "model.safetensors", | |
| "transformer.layers.0.attention.wo.weight": "model.safetensors", | |
| "transformer.layers.0.attention.wq.weight": "model.safetensors", | |
| "transformer.layers.0.attention.wv.weight": "model.safetensors", | |
| "transformer.layers.0.attention_norm.weight": "model.safetensors", | |
| "transformer.layers.0.cross_attention.k_norm.weight": "model.safetensors", | |
| "transformer.layers.0.cross_attention.q_norm.weight": "model.safetensors", | |
| "transformer.layers.0.cross_attention.wk.weight": "model.safetensors", | |
| "transformer.layers.0.cross_attention.wo.weight": "model.safetensors", | |
| "transformer.layers.0.cross_attention.wq.weight": "model.safetensors", | |
| "transformer.layers.0.cross_attention.wv.weight": "model.safetensors", | |
| "transformer.layers.0.feed_forward.w1.weight": "model.safetensors", | |
| "transformer.layers.0.feed_forward.w2.weight": "model.safetensors", | |
| "transformer.layers.0.feed_forward.w3.weight": "model.safetensors", | |
| "transformer.layers.0.ffn_norm.weight": "model.safetensors", | |
| "transformer.layers.0.scale_shift_table": "model.safetensors", | |
| "transformer.layers.1.attention.k_norm.weight": "model.safetensors", | |
| "transformer.layers.1.attention.q_norm.weight": "model.safetensors", | |
| "transformer.layers.1.attention.wk.weight": "model.safetensors", | |
| "transformer.layers.1.attention.wo.weight": "model.safetensors", | |
| "transformer.layers.1.attention.wq.weight": "model.safetensors", | |
| "transformer.layers.1.attention.wv.weight": "model.safetensors", | |
| "transformer.layers.1.attention_norm.weight": "model.safetensors", | |
| "transformer.layers.1.cross_attention.k_norm.weight": "model.safetensors", | |
| "transformer.layers.1.cross_attention.q_norm.weight": "model.safetensors", | |
| "transformer.layers.1.cross_attention.wk.weight": "model.safetensors", | |
| "transformer.layers.1.cross_attention.wo.weight": "model.safetensors", | |
| "transformer.layers.1.cross_attention.wq.weight": "model.safetensors", | |
| "transformer.layers.1.cross_attention.wv.weight": "model.safetensors", | |
| "transformer.layers.1.feed_forward.w1.weight": "model.safetensors", | |
| "transformer.layers.1.feed_forward.w2.weight": "model.safetensors", | |
| "transformer.layers.1.feed_forward.w3.weight": "model.safetensors", | |
| "transformer.layers.1.ffn_norm.weight": "model.safetensors", | |
| "transformer.layers.1.scale_shift_table": "model.safetensors", | |
| "transformer.layers.10.attention.k_norm.weight": "model.safetensors", | |
| "transformer.layers.10.attention.q_norm.weight": "model.safetensors", | |
| "transformer.layers.10.attention.wk.weight": "model.safetensors", | |
| "transformer.layers.10.attention.wo.weight": "model.safetensors", | |
| "transformer.layers.10.attention.wq.weight": "model.safetensors", | |
| "transformer.layers.10.attention.wv.weight": "model.safetensors", | |
| "transformer.layers.10.attention_norm.weight": "model.safetensors", | |
| "transformer.layers.10.cross_attention.k_norm.weight": "model.safetensors", | |
| "transformer.layers.10.cross_attention.q_norm.weight": "model.safetensors", | |
| "transformer.layers.10.cross_attention.wk.weight": "model.safetensors", | |
| "transformer.layers.10.cross_attention.wo.weight": "model.safetensors", | |
| "transformer.layers.10.cross_attention.wq.weight": "model.safetensors", | |
| "transformer.layers.10.cross_attention.wv.weight": "model.safetensors", | |
| "transformer.layers.10.feed_forward.w1.weight": "model.safetensors", | |
| "transformer.layers.10.feed_forward.w2.weight": "model.safetensors", | |
| "transformer.layers.10.feed_forward.w3.weight": "model.safetensors", | |
| "transformer.layers.10.ffn_norm.weight": "model.safetensors", | |
| "transformer.layers.10.scale_shift_table": "model.safetensors", | |
| "transformer.layers.11.attention.k_norm.weight": "model.safetensors", | |
| "transformer.layers.11.attention.q_norm.weight": "model.safetensors", | |
| "transformer.layers.11.attention.wk.weight": "model.safetensors", | |
| "transformer.layers.11.attention.wo.weight": "model.safetensors", | |
| "transformer.layers.11.attention.wq.weight": "model.safetensors", | |
| "transformer.layers.11.attention.wv.weight": "model.safetensors", | |
| "transformer.layers.11.attention_norm.weight": "model.safetensors", | |
| "transformer.layers.11.cross_attention.k_norm.weight": "model.safetensors", | |
| "transformer.layers.11.cross_attention.q_norm.weight": "model.safetensors", | |
| "transformer.layers.11.cross_attention.wk.weight": "model.safetensors", | |
| "transformer.layers.11.cross_attention.wo.weight": "model.safetensors", | |
| "transformer.layers.11.cross_attention.wq.weight": "model.safetensors", | |
| "transformer.layers.11.cross_attention.wv.weight": "model.safetensors", | |
| "transformer.layers.11.feed_forward.w1.weight": "model.safetensors", | |
| "transformer.layers.11.feed_forward.w2.weight": "model.safetensors", | |
| "transformer.layers.11.feed_forward.w3.weight": "model.safetensors", | |
| "transformer.layers.11.ffn_norm.weight": "model.safetensors", | |
| "transformer.layers.11.scale_shift_table": "model.safetensors", | |
| "transformer.layers.2.attention.k_norm.weight": "model.safetensors", | |
| "transformer.layers.2.attention.q_norm.weight": "model.safetensors", | |
| "transformer.layers.2.attention.wk.weight": "model.safetensors", | |
| "transformer.layers.2.attention.wo.weight": "model.safetensors", | |
| "transformer.layers.2.attention.wq.weight": "model.safetensors", | |
| "transformer.layers.2.attention.wv.weight": "model.safetensors", | |
| "transformer.layers.2.attention_norm.weight": "model.safetensors", | |
| "transformer.layers.2.cross_attention.k_norm.weight": "model.safetensors", | |
| "transformer.layers.2.cross_attention.q_norm.weight": "model.safetensors", | |
| "transformer.layers.2.cross_attention.wk.weight": "model.safetensors", | |
| "transformer.layers.2.cross_attention.wo.weight": "model.safetensors", | |
| "transformer.layers.2.cross_attention.wq.weight": "model.safetensors", | |
| "transformer.layers.2.cross_attention.wv.weight": "model.safetensors", | |
| "transformer.layers.2.feed_forward.w1.weight": "model.safetensors", | |
| "transformer.layers.2.feed_forward.w2.weight": "model.safetensors", | |
| "transformer.layers.2.feed_forward.w3.weight": "model.safetensors", | |
| "transformer.layers.2.ffn_norm.weight": "model.safetensors", | |
| "transformer.layers.2.scale_shift_table": "model.safetensors", | |
| "transformer.layers.3.attention.k_norm.weight": "model.safetensors", | |
| "transformer.layers.3.attention.q_norm.weight": "model.safetensors", | |
| "transformer.layers.3.attention.wk.weight": "model.safetensors", | |
| "transformer.layers.3.attention.wo.weight": "model.safetensors", | |
| "transformer.layers.3.attention.wq.weight": "model.safetensors", | |
| "transformer.layers.3.attention.wv.weight": "model.safetensors", | |
| "transformer.layers.3.attention_norm.weight": "model.safetensors", | |
| "transformer.layers.3.cross_attention.k_norm.weight": "model.safetensors", | |
| "transformer.layers.3.cross_attention.q_norm.weight": "model.safetensors", | |
| "transformer.layers.3.cross_attention.wk.weight": "model.safetensors", | |
| "transformer.layers.3.cross_attention.wo.weight": "model.safetensors", | |
| "transformer.layers.3.cross_attention.wq.weight": "model.safetensors", | |
| "transformer.layers.3.cross_attention.wv.weight": "model.safetensors", | |
| "transformer.layers.3.feed_forward.w1.weight": "model.safetensors", | |
| "transformer.layers.3.feed_forward.w2.weight": "model.safetensors", | |
| "transformer.layers.3.feed_forward.w3.weight": "model.safetensors", | |
| "transformer.layers.3.ffn_norm.weight": "model.safetensors", | |
| "transformer.layers.3.scale_shift_table": "model.safetensors", | |
| "transformer.layers.4.attention.k_norm.weight": "model.safetensors", | |
| "transformer.layers.4.attention.q_norm.weight": "model.safetensors", | |
| "transformer.layers.4.attention.wk.weight": "model.safetensors", | |
| "transformer.layers.4.attention.wo.weight": "model.safetensors", | |
| "transformer.layers.4.attention.wq.weight": "model.safetensors", | |
| "transformer.layers.4.attention.wv.weight": "model.safetensors", | |
| "transformer.layers.4.attention_norm.weight": "model.safetensors", | |
| "transformer.layers.4.cross_attention.k_norm.weight": "model.safetensors", | |
| "transformer.layers.4.cross_attention.q_norm.weight": "model.safetensors", | |
| "transformer.layers.4.cross_attention.wk.weight": "model.safetensors", | |
| "transformer.layers.4.cross_attention.wo.weight": "model.safetensors", | |
| "transformer.layers.4.cross_attention.wq.weight": "model.safetensors", | |
| "transformer.layers.4.cross_attention.wv.weight": "model.safetensors", | |
| "transformer.layers.4.feed_forward.w1.weight": "model.safetensors", | |
| "transformer.layers.4.feed_forward.w2.weight": "model.safetensors", | |
| "transformer.layers.4.feed_forward.w3.weight": "model.safetensors", | |
| "transformer.layers.4.ffn_norm.weight": "model.safetensors", | |
| "transformer.layers.4.scale_shift_table": "model.safetensors", | |
| "transformer.layers.5.attention.k_norm.weight": "model.safetensors", | |
| "transformer.layers.5.attention.q_norm.weight": "model.safetensors", | |
| "transformer.layers.5.attention.wk.weight": "model.safetensors", | |
| "transformer.layers.5.attention.wo.weight": "model.safetensors", | |
| "transformer.layers.5.attention.wq.weight": "model.safetensors", | |
| "transformer.layers.5.attention.wv.weight": "model.safetensors", | |
| "transformer.layers.5.attention_norm.weight": "model.safetensors", | |
| "transformer.layers.5.cross_attention.k_norm.weight": "model.safetensors", | |
| "transformer.layers.5.cross_attention.q_norm.weight": "model.safetensors", | |
| "transformer.layers.5.cross_attention.wk.weight": "model.safetensors", | |
| "transformer.layers.5.cross_attention.wo.weight": "model.safetensors", | |
| "transformer.layers.5.cross_attention.wq.weight": "model.safetensors", | |
| "transformer.layers.5.cross_attention.wv.weight": "model.safetensors", | |
| "transformer.layers.5.feed_forward.w1.weight": "model.safetensors", | |
| "transformer.layers.5.feed_forward.w2.weight": "model.safetensors", | |
| "transformer.layers.5.feed_forward.w3.weight": "model.safetensors", | |
| "transformer.layers.5.ffn_norm.weight": "model.safetensors", | |
| "transformer.layers.5.scale_shift_table": "model.safetensors", | |
| "transformer.layers.6.attention.k_norm.weight": "model.safetensors", | |
| "transformer.layers.6.attention.q_norm.weight": "model.safetensors", | |
| "transformer.layers.6.attention.wk.weight": "model.safetensors", | |
| "transformer.layers.6.attention.wo.weight": "model.safetensors", | |
| "transformer.layers.6.attention.wq.weight": "model.safetensors", | |
| "transformer.layers.6.attention.wv.weight": "model.safetensors", | |
| "transformer.layers.6.attention_norm.weight": "model.safetensors", | |
| "transformer.layers.6.cross_attention.k_norm.weight": "model.safetensors", | |
| "transformer.layers.6.cross_attention.q_norm.weight": "model.safetensors", | |
| "transformer.layers.6.cross_attention.wk.weight": "model.safetensors", | |
| "transformer.layers.6.cross_attention.wo.weight": "model.safetensors", | |
| "transformer.layers.6.cross_attention.wq.weight": "model.safetensors", | |
| "transformer.layers.6.cross_attention.wv.weight": "model.safetensors", | |
| "transformer.layers.6.feed_forward.w1.weight": "model.safetensors", | |
| "transformer.layers.6.feed_forward.w2.weight": "model.safetensors", | |
| "transformer.layers.6.feed_forward.w3.weight": "model.safetensors", | |
| "transformer.layers.6.ffn_norm.weight": "model.safetensors", | |
| "transformer.layers.6.scale_shift_table": "model.safetensors", | |
| "transformer.layers.7.attention.k_norm.weight": "model.safetensors", | |
| "transformer.layers.7.attention.q_norm.weight": "model.safetensors", | |
| "transformer.layers.7.attention.wk.weight": "model.safetensors", | |
| "transformer.layers.7.attention.wo.weight": "model.safetensors", | |
| "transformer.layers.7.attention.wq.weight": "model.safetensors", | |
| "transformer.layers.7.attention.wv.weight": "model.safetensors", | |
| "transformer.layers.7.attention_norm.weight": "model.safetensors", | |
| "transformer.layers.7.cross_attention.k_norm.weight": "model.safetensors", | |
| "transformer.layers.7.cross_attention.q_norm.weight": "model.safetensors", | |
| "transformer.layers.7.cross_attention.wk.weight": "model.safetensors", | |
| "transformer.layers.7.cross_attention.wo.weight": "model.safetensors", | |
| "transformer.layers.7.cross_attention.wq.weight": "model.safetensors", | |
| "transformer.layers.7.cross_attention.wv.weight": "model.safetensors", | |
| "transformer.layers.7.feed_forward.w1.weight": "model.safetensors", | |
| "transformer.layers.7.feed_forward.w2.weight": "model.safetensors", | |
| "transformer.layers.7.feed_forward.w3.weight": "model.safetensors", | |
| "transformer.layers.7.ffn_norm.weight": "model.safetensors", | |
| "transformer.layers.7.scale_shift_table": "model.safetensors", | |
| "transformer.layers.8.attention.k_norm.weight": "model.safetensors", | |
| "transformer.layers.8.attention.q_norm.weight": "model.safetensors", | |
| "transformer.layers.8.attention.wk.weight": "model.safetensors", | |
| "transformer.layers.8.attention.wo.weight": "model.safetensors", | |
| "transformer.layers.8.attention.wq.weight": "model.safetensors", | |
| "transformer.layers.8.attention.wv.weight": "model.safetensors", | |
| "transformer.layers.8.attention_norm.weight": "model.safetensors", | |
| "transformer.layers.8.cross_attention.k_norm.weight": "model.safetensors", | |
| "transformer.layers.8.cross_attention.q_norm.weight": "model.safetensors", | |
| "transformer.layers.8.cross_attention.wk.weight": "model.safetensors", | |
| "transformer.layers.8.cross_attention.wo.weight": "model.safetensors", | |
| "transformer.layers.8.cross_attention.wq.weight": "model.safetensors", | |
| "transformer.layers.8.cross_attention.wv.weight": "model.safetensors", | |
| "transformer.layers.8.feed_forward.w1.weight": "model.safetensors", | |
| "transformer.layers.8.feed_forward.w2.weight": "model.safetensors", | |
| "transformer.layers.8.feed_forward.w3.weight": "model.safetensors", | |
| "transformer.layers.8.ffn_norm.weight": "model.safetensors", | |
| "transformer.layers.8.scale_shift_table": "model.safetensors", | |
| "transformer.layers.9.attention.k_norm.weight": "model.safetensors", | |
| "transformer.layers.9.attention.q_norm.weight": "model.safetensors", | |
| "transformer.layers.9.attention.wk.weight": "model.safetensors", | |
| "transformer.layers.9.attention.wo.weight": "model.safetensors", | |
| "transformer.layers.9.attention.wq.weight": "model.safetensors", | |
| "transformer.layers.9.attention.wv.weight": "model.safetensors", | |
| "transformer.layers.9.attention_norm.weight": "model.safetensors", | |
| "transformer.layers.9.cross_attention.k_norm.weight": "model.safetensors", | |
| "transformer.layers.9.cross_attention.q_norm.weight": "model.safetensors", | |
| "transformer.layers.9.cross_attention.wk.weight": "model.safetensors", | |
| "transformer.layers.9.cross_attention.wo.weight": "model.safetensors", | |
| "transformer.layers.9.cross_attention.wq.weight": "model.safetensors", | |
| "transformer.layers.9.cross_attention.wv.weight": "model.safetensors", | |
| "transformer.layers.9.feed_forward.w1.weight": "model.safetensors", | |
| "transformer.layers.9.feed_forward.w2.weight": "model.safetensors", | |
| "transformer.layers.9.feed_forward.w3.weight": "model.safetensors", | |
| "transformer.layers.9.ffn_norm.weight": "model.safetensors", | |
| "transformer.layers.9.scale_shift_table": "model.safetensors", | |
| "transformer.norm.weight": "model.safetensors", | |
| "transformer.output.weight": "model.safetensors", | |
| "transformer.t_block.bias": "model.safetensors", | |
| "transformer.t_block.weight": "model.safetensors", | |
| "transformer.t_embedder.projection.w1.weight": "model.safetensors", | |
| "transformer.t_embedder.projection.w2.weight": "model.safetensors", | |
| "transformer.t_embedder.projection.w3.weight": "model.safetensors", | |
| "transformer.x_embedder.block.block1.groupnorm.bias": "model.safetensors", | |
| "transformer.x_embedder.block.block1.groupnorm.weight": "model.safetensors", | |
| "transformer.x_embedder.block.block1.project.bias": "model.safetensors", | |
| "transformer.x_embedder.block.block1.project.weight": "model.safetensors", | |
| "transformer.x_embedder.block.block2.groupnorm.bias": "model.safetensors", | |
| "transformer.x_embedder.block.block2.groupnorm.weight": "model.safetensors", | |
| "transformer.x_embedder.block.block2.project.bias": "model.safetensors", | |
| "transformer.x_embedder.block.block2.project.weight": "model.safetensors", | |
| "transformer.y_embedder.projection.w1.weight": "model.safetensors", | |
| "transformer.y_embedder.projection.w2.weight": "model.safetensors", | |
| "transformer.y_embedder.projection.w3.weight": "model.safetensors" | |
| } | |
| } |