MikuFactoryXL / mega-merge.json
nonetrix's picture
Add ComfyUI files
44f3267 verified
raw
history blame contribute delete
No virus
31.5 kB
{
"last_node_id": 31,
"last_link_id": 35,
"nodes": [
{
"id": 1,
"type": "CheckpointLoaderSimple",
"pos": [
395,
222
],
"size": {
"0": 315,
"1": 98
},
"flags": {},
"order": 0,
"mode": 0,
"outputs": [
{
"name": "MODEL",
"type": "MODEL",
"links": [
1
],
"shape": 3,
"tooltip": "The model used for denoising latents.",
"slot_index": 0
},
{
"name": "CLIP",
"type": "CLIP",
"links": null,
"shape": 3,
"tooltip": "The CLIP model used for encoding text prompts."
},
{
"name": "VAE",
"type": "VAE",
"links": null,
"shape": 3,
"tooltip": "The VAE model used for encoding and decoding images to and from latent space."
}
],
"properties": {
"Node name for S&R": "CheckpointLoaderSimple"
},
"widgets_values": [
"PVCStyleModelMovable_pony160.safetensors"
]
},
{
"id": 2,
"type": "CheckpointLoaderSimple",
"pos": [
391,
397
],
"size": {
"0": 315,
"1": 98
},
"flags": {},
"order": 1,
"mode": 0,
"outputs": [
{
"name": "MODEL",
"type": "MODEL",
"links": [
2
],
"shape": 3,
"tooltip": "The model used for denoising latents.",
"slot_index": 0
},
{
"name": "CLIP",
"type": "CLIP",
"links": null,
"shape": 3,
"tooltip": "The CLIP model used for encoding text prompts."
},
{
"name": "VAE",
"type": "VAE",
"links": null,
"shape": 3,
"tooltip": "The VAE model used for encoding and decoding images to and from latent space."
}
],
"properties": {
"Node name for S&R": "CheckpointLoaderSimple"
},
"widgets_values": [
"accretiondiscxl_v10.safetensors"
]
},
{
"id": 3,
"type": "ModelMergeBlocks",
"pos": [
823,
284
],
"size": {
"0": 315,
"1": 126
},
"flags": {},
"order": 15,
"mode": 0,
"inputs": [
{
"name": "model1",
"type": "MODEL",
"link": 1
},
{
"name": "model2",
"type": "MODEL",
"link": 2
}
],
"outputs": [
{
"name": "MODEL",
"type": "MODEL",
"links": [
4
],
"shape": 3,
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "ModelMergeBlocks"
},
"widgets_values": [
0.71,
0.67,
0.75
]
},
{
"id": 4,
"type": "ModelMergeBlocks",
"pos": [
1208,
284
],
"size": {
"0": 315,
"1": 126
},
"flags": {},
"order": 16,
"mode": 0,
"inputs": [
{
"name": "model1",
"type": "MODEL",
"link": 3
},
{
"name": "model2",
"type": "MODEL",
"link": 4
}
],
"outputs": [
{
"name": "MODEL",
"type": "MODEL",
"links": [
5
],
"shape": 3,
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "ModelMergeBlocks"
},
"widgets_values": [
0.58,
0.5,
0.6
]
},
{
"id": 6,
"type": "CheckpointLoaderSimple",
"pos": [
1544,
519
],
"size": {
"0": 315,
"1": 98
},
"flags": {},
"order": 2,
"mode": 0,
"outputs": [
{
"name": "MODEL",
"type": "MODEL",
"links": [
6
],
"shape": 3,
"tooltip": "The model used for denoising latents.",
"slot_index": 0
},
{
"name": "CLIP",
"type": "CLIP",
"links": null,
"shape": 3,
"tooltip": "The CLIP model used for encoding text prompts."
},
{
"name": "VAE",
"type": "VAE",
"links": null,
"shape": 3,
"tooltip": "The VAE model used for encoding and decoding images to and from latent space."
}
],
"properties": {
"Node name for S&R": "CheckpointLoaderSimple"
},
"widgets_values": [
"bambooShootMixMix_v10.safetensors"
]
},
{
"id": 7,
"type": "ModelMergeBlocks",
"pos": [
1613,
280
],
"size": {
"0": 315,
"1": 126
},
"flags": {},
"order": 17,
"mode": 0,
"inputs": [
{
"name": "model1",
"type": "MODEL",
"link": 5
},
{
"name": "model2",
"type": "MODEL",
"link": 6
}
],
"outputs": [
{
"name": "MODEL",
"type": "MODEL",
"links": [
7
],
"shape": 3,
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "ModelMergeBlocks"
},
"widgets_values": [
0.5,
0.4,
0.5
]
},
{
"id": 9,
"type": "CheckpointLoaderSimple",
"pos": [
1969,
544
],
"size": {
"0": 315,
"1": 98
},
"flags": {},
"order": 3,
"mode": 0,
"outputs": [
{
"name": "MODEL",
"type": "MODEL",
"links": [
8
],
"shape": 3,
"tooltip": "The model used for denoising latents.",
"slot_index": 0
},
{
"name": "CLIP",
"type": "CLIP",
"links": null,
"shape": 3,
"tooltip": "The CLIP model used for encoding text prompts."
},
{
"name": "VAE",
"type": "VAE",
"links": null,
"shape": 3,
"tooltip": "The VAE model used for encoding and decoding images to and from latent space."
}
],
"properties": {
"Node name for S&R": "CheckpointLoaderSimple"
},
"widgets_values": [
"cosmixv2WaifuToWife_v10.safetensors"
]
},
{
"id": 8,
"type": "ModelMergeBlocks",
"pos": [
2035,
281
],
"size": {
"0": 315,
"1": 126
},
"flags": {},
"order": 18,
"mode": 0,
"inputs": [
{
"name": "model1",
"type": "MODEL",
"link": 7
},
{
"name": "model2",
"type": "MODEL",
"link": 8
}
],
"outputs": [
{
"name": "MODEL",
"type": "MODEL",
"links": [
9
],
"shape": 3,
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "ModelMergeBlocks"
},
"widgets_values": [
0.44,
0.33,
0.42
]
},
{
"id": 11,
"type": "CheckpointLoaderSimple",
"pos": [
2410,
526
],
"size": {
"0": 315,
"1": 98
},
"flags": {},
"order": 4,
"mode": 0,
"outputs": [
{
"name": "MODEL",
"type": "MODEL",
"links": [
10
],
"shape": 3,
"tooltip": "The model used for denoising latents.",
"slot_index": 0
},
{
"name": "CLIP",
"type": "CLIP",
"links": null,
"shape": 3,
"tooltip": "The CLIP model used for encoding text prompts."
},
{
"name": "VAE",
"type": "VAE",
"links": null,
"shape": 3,
"tooltip": "The VAE model used for encoding and decoding images to and from latent space."
}
],
"properties": {
"Node name for S&R": "CheckpointLoaderSimple"
},
"widgets_values": [
"malaSmoothPonyxl_v20.safetensors"
]
},
{
"id": 10,
"type": "ModelMergeBlocks",
"pos": [
2423,
277
],
"size": {
"0": 315,
"1": 126
},
"flags": {},
"order": 19,
"mode": 0,
"inputs": [
{
"name": "model1",
"type": "MODEL",
"link": 9
},
{
"name": "model2",
"type": "MODEL",
"link": 10
}
],
"outputs": [
{
"name": "MODEL",
"type": "MODEL",
"links": [
11
],
"shape": 3,
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "ModelMergeBlocks"
},
"widgets_values": [
0.4,
0.25,
0.37
]
},
{
"id": 12,
"type": "CheckpointLoaderSimple",
"pos": [
2773,
534
],
"size": {
"0": 315,
"1": 98
},
"flags": {},
"order": 5,
"mode": 0,
"outputs": [
{
"name": "MODEL",
"type": "MODEL",
"links": [
12
],
"shape": 3,
"tooltip": "The model used for denoising latents.",
"slot_index": 0
},
{
"name": "CLIP",
"type": "CLIP",
"links": null,
"shape": 3,
"tooltip": "The CLIP model used for encoding text prompts."
},
{
"name": "VAE",
"type": "VAE",
"links": null,
"shape": 3,
"tooltip": "The VAE model used for encoding and decoding images to and from latent space."
}
],
"properties": {
"Node name for S&R": "CheckpointLoaderSimple"
},
"widgets_values": [
"meichidarkmixReload_meichidarkmixSensual.safetensors"
]
},
{
"id": 13,
"type": "ModelMergeBlocks",
"pos": [
2869,
267
],
"size": {
"0": 315,
"1": 126
},
"flags": {},
"order": 20,
"mode": 0,
"inputs": [
{
"name": "model1",
"type": "MODEL",
"link": 11
},
{
"name": "model2",
"type": "MODEL",
"link": 12
}
],
"outputs": [
{
"name": "MODEL",
"type": "MODEL",
"links": [
14
],
"shape": 3,
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "ModelMergeBlocks"
},
"widgets_values": [
0.37,
0.25,
0.33
]
},
{
"id": 14,
"type": "CheckpointLoaderSimple",
"pos": [
3243,
521
],
"size": {
"0": 315,
"1": 98
},
"flags": {},
"order": 6,
"mode": 0,
"outputs": [
{
"name": "MODEL",
"type": "MODEL",
"links": [
13
],
"shape": 3,
"tooltip": "The model used for denoising latents.",
"slot_index": 0
},
{
"name": "CLIP",
"type": "CLIP",
"links": null,
"shape": 3,
"tooltip": "The CLIP model used for encoding text prompts."
},
{
"name": "VAE",
"type": "VAE",
"links": null,
"shape": 3,
"tooltip": "The VAE model used for encoding and decoding images to and from latent space."
}
],
"properties": {
"Node name for S&R": "CheckpointLoaderSimple"
},
"widgets_values": [
"mistoonXLCopper_v20Fast.safetensors"
]
},
{
"id": 17,
"type": "CheckpointLoaderSimple",
"pos": [
3647,
522
],
"size": {
"0": 315,
"1": 98
},
"flags": {},
"order": 7,
"mode": 0,
"outputs": [
{
"name": "MODEL",
"type": "MODEL",
"links": [
15
],
"shape": 3,
"tooltip": "The model used for denoising latents.",
"slot_index": 0
},
{
"name": "CLIP",
"type": "CLIP",
"links": null,
"shape": 3,
"tooltip": "The CLIP model used for encoding text prompts."
},
{
"name": "VAE",
"type": "VAE",
"links": null,
"shape": 3,
"tooltip": "The VAE model used for encoding and decoding images to and from latent space."
}
],
"properties": {
"Node name for S&R": "CheckpointLoaderSimple"
},
"widgets_values": [
"mocasemix_v10.safetensors"
]
},
{
"id": 15,
"type": "ModelMergeBlocks",
"pos": [
3286,
267
],
"size": {
"0": 315,
"1": 126
},
"flags": {},
"order": 21,
"mode": 0,
"inputs": [
{
"name": "model1",
"type": "MODEL",
"link": 13
},
{
"name": "model2",
"type": "MODEL",
"link": 14
}
],
"outputs": [
{
"name": "MODEL",
"type": "MODEL",
"links": [
16
],
"shape": 3,
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "ModelMergeBlocks"
},
"widgets_values": [
0.35000000000000003,
0.22,
0.3
]
},
{
"id": 16,
"type": "ModelMergeBlocks",
"pos": [
3673,
263
],
"size": {
"0": 315,
"1": 126
},
"flags": {},
"order": 22,
"mode": 0,
"inputs": [
{
"name": "model1",
"type": "MODEL",
"link": 15
},
{
"name": "model2",
"type": "MODEL",
"link": 16
}
],
"outputs": [
{
"name": "MODEL",
"type": "MODEL",
"links": [
18
],
"shape": 3,
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "ModelMergeBlocks"
},
"widgets_values": [
0.33,
0.2,
0.27
]
},
{
"id": 18,
"type": "CheckpointLoaderSimple",
"pos": [
4015,
518
],
"size": {
"0": 315,
"1": 98
},
"flags": {},
"order": 8,
"mode": 0,
"outputs": [
{
"name": "MODEL",
"type": "MODEL",
"links": [
17,
21
],
"shape": 3,
"tooltip": "The model used for denoising latents.",
"slot_index": 0
},
{
"name": "CLIP",
"type": "CLIP",
"links": null,
"shape": 3,
"tooltip": "The CLIP model used for encoding text prompts."
},
{
"name": "VAE",
"type": "VAE",
"links": null,
"shape": 3,
"tooltip": "The VAE model used for encoding and decoding images to and from latent space."
}
],
"properties": {
"Node name for S&R": "CheckpointLoaderSimple"
},
"widgets_values": [
"alphonseWhiteDatura_Pony.safetensors"
]
},
{
"id": 19,
"type": "ModelMergeBlocks",
"pos": [
4078,
260
],
"size": {
"0": 315,
"1": 126
},
"flags": {},
"order": 23,
"mode": 0,
"inputs": [
{
"name": "model1",
"type": "MODEL",
"link": 17
},
{
"name": "model2",
"type": "MODEL",
"link": 18
}
],
"outputs": [
{
"name": "MODEL",
"type": "MODEL",
"links": [
22
],
"shape": 3,
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "ModelMergeBlocks"
},
"widgets_values": [
0.31,
0.18,
0.25
]
},
{
"id": 20,
"type": "ModelMergeBlocks",
"pos": [
4479,
264
],
"size": {
"0": 315,
"1": 126
},
"flags": {},
"order": 24,
"mode": 0,
"inputs": [
{
"name": "model1",
"type": "MODEL",
"link": 21
},
{
"name": "model2",
"type": "MODEL",
"link": 22
}
],
"outputs": [
{
"name": "MODEL",
"type": "MODEL",
"links": [
23
],
"shape": 3,
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "ModelMergeBlocks"
},
"widgets_values": [
0.3,
0.16,
0.23
]
},
{
"id": 22,
"type": "CheckpointLoaderSimple",
"pos": [
4475,
522
],
"size": {
"0": 315,
"1": 98
},
"flags": {},
"order": 9,
"mode": 0,
"outputs": [
{
"name": "MODEL",
"type": "MODEL",
"links": [
24
],
"shape": 3,
"tooltip": "The model used for denoising latents.",
"slot_index": 0
},
{
"name": "CLIP",
"type": "CLIP",
"links": null,
"shape": 3,
"tooltip": "The CLIP model used for encoding text prompts."
},
{
"name": "VAE",
"type": "VAE",
"links": null,
"shape": 3,
"tooltip": "The VAE model used for encoding and decoding images to and from latent space."
}
],
"properties": {
"Node name for S&R": "CheckpointLoaderSimple"
},
"widgets_values": [
"pixivponyepitamix_v10.safetensors"
]
},
{
"id": 24,
"type": "CheckpointLoaderSimple",
"pos": [
4889,
500
],
"size": {
"0": 315,
"1": 98
},
"flags": {},
"order": 10,
"mode": 0,
"outputs": [
{
"name": "MODEL",
"type": "MODEL",
"links": [
25
],
"shape": 3,
"tooltip": "The model used for denoising latents.",
"slot_index": 0
},
{
"name": "CLIP",
"type": "CLIP",
"links": null,
"shape": 3,
"tooltip": "The CLIP model used for encoding text prompts."
},
{
"name": "VAE",
"type": "VAE",
"links": null,
"shape": 3,
"tooltip": "The VAE model used for encoding and decoding images to and from latent space."
}
],
"properties": {
"Node name for S&R": "CheckpointLoaderSimple"
},
"widgets_values": [
"pixivponyepitamix_v10_1.safetensors"
]
},
{
"id": 21,
"type": "ModelMergeBlocks",
"pos": [
4890,
281
],
"size": {
"0": 315,
"1": 126
},
"flags": {},
"order": 25,
"mode": 0,
"inputs": [
{
"name": "model1",
"type": "MODEL",
"link": 24
},
{
"name": "model2",
"type": "MODEL",
"link": 23
}
],
"outputs": [
{
"name": "MODEL",
"type": "MODEL",
"links": [
26
],
"shape": 3,
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "ModelMergeBlocks"
},
"widgets_values": [
0.28,
0.15,
0.21
]
},
{
"id": 23,
"type": "ModelMergeBlocks",
"pos": [
5265,
282
],
"size": {
"0": 315,
"1": 126
},
"flags": {},
"order": 26,
"mode": 0,
"inputs": [
{
"name": "model1",
"type": "MODEL",
"link": 25
},
{
"name": "model2",
"type": "MODEL",
"link": 26
}
],
"outputs": [
{
"name": "MODEL",
"type": "MODEL",
"links": [
27
],
"shape": 3,
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "ModelMergeBlocks"
},
"widgets_values": [
0.27,
0.14,
0.2
]
},
{
"id": 25,
"type": "CheckpointLoaderSimple",
"pos": [
5290,
485
],
"size": {
"0": 315,
"1": 98
},
"flags": {},
"order": 11,
"mode": 0,
"outputs": [
{
"name": "MODEL",
"type": "MODEL",
"links": [
28
],
"shape": 3,
"tooltip": "The model used for denoising latents.",
"slot_index": 0
},
{
"name": "CLIP",
"type": "CLIP",
"links": null,
"shape": 3,
"tooltip": "The CLIP model used for encoding text prompts."
},
{
"name": "VAE",
"type": "VAE",
"links": null,
"shape": 3,
"tooltip": "The VAE model used for encoding and decoding images to and from latent space."
}
],
"properties": {
"Node name for S&R": "CheckpointLoaderSimple"
},
"widgets_values": [
"thickCoatingStyle_pdxl10.safetensors"
]
},
{
"id": 26,
"type": "ModelMergeBlocks",
"pos": [
5680,
260
],
"size": {
"0": 315,
"1": 126
},
"flags": {},
"order": 27,
"mode": 0,
"inputs": [
{
"name": "model1",
"type": "MODEL",
"link": 28
},
{
"name": "model2",
"type": "MODEL",
"link": 27
}
],
"outputs": [
{
"name": "MODEL",
"type": "MODEL",
"links": [
29
],
"shape": 3,
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "ModelMergeBlocks"
},
"widgets_values": [
0.26,
0.13,
0.18
]
},
{
"id": 27,
"type": "CheckpointLoaderSimple",
"pos": [
5683,
456
],
"size": {
"0": 315,
"1": 98
},
"flags": {},
"order": 12,
"mode": 0,
"outputs": [
{
"name": "MODEL",
"type": "MODEL",
"links": [
30
],
"shape": 3,
"tooltip": "The model used for denoising latents.",
"slot_index": 0
},
{
"name": "CLIP",
"type": "CLIP",
"links": null,
"shape": 3,
"tooltip": "The CLIP model used for encoding text prompts."
},
{
"name": "VAE",
"type": "VAE",
"links": null,
"shape": 3,
"tooltip": "The VAE model used for encoding and decoding images to and from latent space."
}
],
"properties": {
"Node name for S&R": "CheckpointLoaderSimple"
},
"widgets_values": [
"vendoPonyRealistic_v13Lora.safetensors"
]
},
{
"id": 28,
"type": "ModelMergeBlocks",
"pos": [
6063,
249
],
"size": {
"0": 315,
"1": 126
},
"flags": {},
"order": 28,
"mode": 0,
"inputs": [
{
"name": "model1",
"type": "MODEL",
"link": 30
},
{
"name": "model2",
"type": "MODEL",
"link": 29
}
],
"outputs": [
{
"name": "MODEL",
"type": "MODEL",
"links": [
31
],
"shape": 3,
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "ModelMergeBlocks"
},
"widgets_values": [
0.25,
0.12,
0.17
]
},
{
"id": 31,
"type": "CheckpointSave",
"pos": [
6887,
258
],
"size": {
"0": 315,
"1": 98
},
"flags": {},
"order": 30,
"mode": 0,
"inputs": [
{
"name": "model",
"type": "MODEL",
"link": 33
},
{
"name": "clip",
"type": "CLIP",
"link": 34
},
{
"name": "vae",
"type": "VAE",
"link": 35
}
],
"properties": {
"Node name for S&R": "CheckpointSave"
},
"widgets_values": [
"checkpoints/ComfyUI"
]
},
{
"id": 29,
"type": "ModelMergeBlocks",
"pos": [
6425,
247
],
"size": {
"0": 315,
"1": 126
},
"flags": {},
"order": 29,
"mode": 0,
"inputs": [
{
"name": "model1",
"type": "MODEL",
"link": 32
},
{
"name": "model2",
"type": "MODEL",
"link": 31
}
],
"outputs": [
{
"name": "MODEL",
"type": "MODEL",
"links": [
33
],
"shape": 3,
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "ModelMergeBlocks"
},
"widgets_values": [
0.25,
0.11,
0.16
]
},
{
"id": 30,
"type": "CheckpointLoaderSimple",
"pos": [
6048,
452
],
"size": {
"0": 315,
"1": 98
},
"flags": {},
"order": 13,
"mode": 0,
"outputs": [
{
"name": "MODEL",
"type": "MODEL",
"links": [
32
],
"shape": 3,
"tooltip": "The model used for denoising latents.",
"slot_index": 0
},
{
"name": "CLIP",
"type": "CLIP",
"links": null,
"shape": 3,
"tooltip": "The CLIP model used for encoding text prompts.",
"slot_index": 1
},
{
"name": "VAE",
"type": "VAE",
"links": null,
"shape": 3,
"tooltip": "The VAE model used for encoding and decoding images to and from latent space."
}
],
"properties": {
"Node name for S&R": "CheckpointLoaderSimple"
},
"widgets_values": [
"whiteUnicorn_v30.safetensors"
]
},
{
"id": 5,
"type": "CheckpointLoaderSimple",
"pos": [
1035,
519
],
"size": {
"0": 315,
"1": 98
},
"flags": {},
"order": 14,
"mode": 0,
"outputs": [
{
"name": "MODEL",
"type": "MODEL",
"links": [
3
],
"shape": 3,
"tooltip": "The model used for denoising latents.",
"slot_index": 0
},
{
"name": "CLIP",
"type": "CLIP",
"links": [
34
],
"shape": 3,
"tooltip": "The CLIP model used for encoding text prompts.",
"slot_index": 1
},
{
"name": "VAE",
"type": "VAE",
"links": [
35
],
"shape": 3,
"tooltip": "The VAE model used for encoding and decoding images to and from latent space.",
"slot_index": 2
}
],
"properties": {
"Node name for S&R": "CheckpointLoaderSimple"
},
"widgets_values": [
"pixivponyepitamix_v10.safetensors"
]
}
],
"links": [
[
1,
1,
0,
3,
0,
"MODEL"
],
[
2,
2,
0,
3,
1,
"MODEL"
],
[
3,
5,
0,
4,
0,
"MODEL"
],
[
4,
3,
0,
4,
1,
"MODEL"
],
[
5,
4,
0,
7,
0,
"MODEL"
],
[
6,
6,
0,
7,
1,
"MODEL"
],
[
7,
7,
0,
8,
0,
"MODEL"
],
[
8,
9,
0,
8,
1,
"MODEL"
],
[
9,
8,
0,
10,
0,
"MODEL"
],
[
10,
11,
0,
10,
1,
"MODEL"
],
[
11,
10,
0,
13,
0,
"MODEL"
],
[
12,
12,
0,
13,
1,
"MODEL"
],
[
13,
14,
0,
15,
0,
"MODEL"
],
[
14,
13,
0,
15,
1,
"MODEL"
],
[
15,
17,
0,
16,
0,
"MODEL"
],
[
16,
15,
0,
16,
1,
"MODEL"
],
[
17,
18,
0,
19,
0,
"MODEL"
],
[
18,
16,
0,
19,
1,
"MODEL"
],
[
21,
18,
0,
20,
0,
"MODEL"
],
[
22,
19,
0,
20,
1,
"MODEL"
],
[
23,
20,
0,
21,
1,
"MODEL"
],
[
24,
22,
0,
21,
0,
"MODEL"
],
[
25,
24,
0,
23,
0,
"MODEL"
],
[
26,
21,
0,
23,
1,
"MODEL"
],
[
27,
23,
0,
26,
1,
"MODEL"
],
[
28,
25,
0,
26,
0,
"MODEL"
],
[
29,
26,
0,
28,
1,
"MODEL"
],
[
30,
27,
0,
28,
0,
"MODEL"
],
[
31,
28,
0,
29,
1,
"MODEL"
],
[
32,
30,
0,
29,
0,
"MODEL"
],
[
33,
29,
0,
31,
0,
"MODEL"
],
[
34,
5,
1,
31,
1,
"CLIP"
],
[
35,
5,
2,
31,
2,
"VAE"
]
],
"groups": [],
"config": {},
"extra": {
"ds": {
"scale": 1,
"offset": [
-5635.498001420395,
96.06443919089634
]
}
},
"version": 0.4
}