Advertisement
rockets6

SDXL test vs Cascade

Feb 13th, 2024
36
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 25.66 KB | None | 0 0
  1. {
  2. "last_node_id": 48,
  3. "last_link_id": 44,
  4. "nodes": [
  5. {
  6. "id": 15,
  7. "type": "CLIPTextEncode",
  8. "pos": [
  9. 1139.1103087741085,
  10. -121.78948886648053
  11. ],
  12. "size": {
  13. "0": 210,
  14. "1": 54
  15. },
  16. "flags": {},
  17. "order": 19,
  18. "mode": 0,
  19. "inputs": [
  20. {
  21. "name": "clip",
  22. "type": "CLIP",
  23. "link": 19
  24. },
  25. {
  26. "name": "text",
  27. "type": "STRING",
  28. "link": 21,
  29. "widget": {
  30. "name": "text"
  31. },
  32. "slot_index": 1
  33. }
  34. ],
  35. "outputs": [
  36. {
  37. "name": "CONDITIONING",
  38. "type": "CONDITIONING",
  39. "links": [
  40. 23
  41. ],
  42. "slot_index": 0
  43. }
  44. ],
  45. "properties": {
  46. "Node name for S&R": "CLIPTextEncode"
  47. },
  48. "widgets_values": [
  49. "a 35 year old Tongan woman standing in a food court at a mall"
  50. ],
  51. "color": "#232",
  52. "bgcolor": "#353"
  53. },
  54. {
  55. "id": 16,
  56. "type": "CLIPTextEncode",
  57. "pos": [
  58. 1139.1103087741085,
  59. -31.789488866480543
  60. ],
  61. "size": {
  62. "0": 210,
  63. "1": 54
  64. },
  65. "flags": {},
  66. "order": 17,
  67. "mode": 0,
  68. "inputs": [
  69. {
  70. "name": "clip",
  71. "type": "CLIP",
  72. "link": 20
  73. },
  74. {
  75. "name": "text",
  76. "type": "STRING",
  77. "link": 22,
  78. "widget": {
  79. "name": "text"
  80. },
  81. "slot_index": 1
  82. }
  83. ],
  84. "outputs": [
  85. {
  86. "name": "CONDITIONING",
  87. "type": "CONDITIONING",
  88. "links": [
  89. 24
  90. ],
  91. "slot_index": 0
  92. }
  93. ],
  94. "properties": {
  95. "Node name for S&R": "CLIPTextEncode"
  96. },
  97. "widgets_values": [
  98. ""
  99. ],
  100. "color": "#322",
  101. "bgcolor": "#533"
  102. },
  103. {
  104. "id": 36,
  105. "type": "Note",
  106. "pos": [
  107. -74,
  108. -470
  109. ],
  110. "size": {
  111. "0": 315.70074462890625,
  112. "1": 147.9551239013672
  113. },
  114. "flags": {},
  115. "order": 0,
  116. "mode": 0,
  117. "title": "Note - Load Checkpoint BASE",
  118. "properties": {
  119. "text": ""
  120. },
  121. "widgets_values": [
  122. "This is a checkpoint model loader. \n - This is set up automatically with the optimal settings for whatever SD model version you choose to use.\n - In this example, it is for the Base SDXL model\n - This node is also used for SD1.5 and SD2.x models\n \nNOTE: When loading in another person's workflow, be sure to manually choose your own *local* model. This also applies to LoRas and all their deviations"
  123. ],
  124. "color": "#323",
  125. "bgcolor": "#535"
  126. },
  127. {
  128. "id": 37,
  129. "type": "Note",
  130. "pos": [
  131. 610,
  132. -460
  133. ],
  134. "size": {
  135. "0": 330,
  136. "1": 140
  137. },
  138. "flags": {},
  139. "order": 1,
  140. "mode": 0,
  141. "title": "Note - Load Checkpoint REFINER",
  142. "properties": {
  143. "text": ""
  144. },
  145. "widgets_values": [
  146. "This is a checkpoint model loader. \n - This is set up automatically with the optimal settings for whatever SD model version you choose to use.\n - In this example, it is for the Refiner SDXL model\n\nNOTE: When loading in another person's workflow, be sure to manually choose your own *local* model. This also applies to LoRas and all their deviations."
  147. ],
  148. "color": "#323",
  149. "bgcolor": "#535"
  150. },
  151. {
  152. "id": 38,
  153. "type": "Note",
  154. "pos": [
  155. 126.74066078186036,
  156. 534.1799162292467
  157. ],
  158. "size": {
  159. "0": 284.3257141113281,
  160. "1": 123.88604736328125
  161. },
  162. "flags": {},
  163. "order": 2,
  164. "mode": 0,
  165. "title": "Note - Text Prompts",
  166. "properties": {
  167. "text": ""
  168. },
  169. "widgets_values": [
  170. "These nodes are where you include the text for:\n - what you want in the picture (Positive Prompt, Green)\n - or what you don't want in the picture (Negative Prompt, Red)\n\nThis node type is called a \"PrimitiveNode\" if you are searching for the node type."
  171. ],
  172. "color": "#323",
  173. "bgcolor": "#535"
  174. },
  175. {
  176. "id": 40,
  177. "type": "Note",
  178. "pos": [
  179. 1325,
  180. 234
  181. ],
  182. "size": {
  183. "0": 451.5049743652344,
  184. "1": 424.4164123535156
  185. },
  186. "flags": {},
  187. "order": 3,
  188. "mode": 0,
  189. "title": "Note - KSampler ADVANCED General Information",
  190. "properties": {
  191. "text": ""
  192. },
  193. "widgets_values": [
  194. "Here are the settings that SHOULD stay in place if you want this workflow to work correctly:\n - add_noise: enable = This adds random noise into the picture so the model can denoise it\n\n - return_with_leftover_noise: enable = This sends the latent image data and all it's leftover noise to the next KSampler node.\n\nThe settings to pay attention to:\n - control_after_generate = generates a new random seed after each workflow job completed.\n - steps = This is the amount of iterations you would like to run the positive and negative CLIP prompts through. Each Step will add (positive) or remove (negative) pixels based on what stable diffusion \"thinks\" should be there according to the model's training\n - cfg = This is how much you want SDXL to adhere to the prompt. Lower CFG gives you more creative but often blurrier results. Higher CFG (recommended max 10) gives you stricter results according to the CLIP prompt. If the CFG value is too high, it can also result in \"burn-in\" where the edges of the picture become even stronger, often highlighting details in unnatural ways.\n - sampler_name = This is the sampler type, and unfortunately different samplers and schedulers have better results with fewer steps, while others have better success with higher steps. This will require experimentation on your part!\n - scheduler = The algorithm/method used to choose the timesteps to denoise the picture.\n - start_at_step = This is the step number the KSampler will start out it's process of de-noising the picture or \"removing the random noise to reveal the picture within\". The first KSampler usually starts with Step 0. Starting at step 0 is the same as setting denoise to 1.0 in the regular Sampler node.\n - end_at_step = This is the step number the KSampler will stop it's process of de-noising the picture. If there is any remaining leftover noise and return_with_leftover_noise is enabled, then it will pass on the left over noise to the next KSampler (assuming there is another one)."
  195. ],
  196. "color": "#223",
  197. "bgcolor": "#335"
  198. },
  199. {
  200. "id": 17,
  201. "type": "VAEDecode",
  202. "pos": [
  203. 2220.7710413363047,
  204. 129.6025938601699
  205. ],
  206. "size": {
  207. "0": 200,
  208. "1": 50
  209. },
  210. "flags": {},
  211. "order": 22,
  212. "mode": 0,
  213. "inputs": [
  214. {
  215. "name": "samples",
  216. "type": "LATENT",
  217. "link": 25
  218. },
  219. {
  220. "name": "vae",
  221. "type": "VAE",
  222. "link": 34
  223. }
  224. ],
  225. "outputs": [
  226. {
  227. "name": "IMAGE",
  228. "type": "IMAGE",
  229. "links": [
  230. 28
  231. ],
  232. "shape": 3,
  233. "slot_index": 0
  234. }
  235. ],
  236. "properties": {
  237. "Node name for S&R": "VAEDecode"
  238. },
  239. "color": "#332922",
  240. "bgcolor": "#593930"
  241. },
  242. {
  243. "id": 41,
  244. "type": "Note",
  245. "pos": [
  246. 2160.7710413363047,
  247. 229.60259386016995
  248. ],
  249. "size": {
  250. "0": 320,
  251. "1": 120
  252. },
  253. "flags": {},
  254. "order": 4,
  255. "mode": 0,
  256. "title": "Note - VAE Decoder",
  257. "properties": {
  258. "text": ""
  259. },
  260. "widgets_values": [
  261. "This node will take the latent data from the KSampler and, using the VAE, it will decode it into visible data\n\nVAE = Latent --> Visible\n\nThis can then be sent to the Save Image node to be saved as a PNG."
  262. ],
  263. "color": "#332922",
  264. "bgcolor": "#593930"
  265. },
  266. {
  267. "id": 42,
  268. "type": "Note",
  269. "pos": [
  270. 564.5041024540307,
  271. 801.1200708259006
  272. ],
  273. "size": {
  274. "0": 260,
  275. "1": 210
  276. },
  277. "flags": {},
  278. "order": 5,
  279. "mode": 0,
  280. "title": "Note - Empty Latent Image",
  281. "properties": {
  282. "text": ""
  283. },
  284. "widgets_values": [
  285. "This node sets the image's resolution in Width and Height.\n\nNOTE: For SDXL, it is recommended to use trained values listed below:\n - 1024 x 1024\n - 1152 x 896\n - 896 x 1152\n - 1216 x 832\n - 832 x 1216\n - 1344 x 768\n - 768 x 1344\n - 1536 x 640\n - 640 x 1536"
  286. ],
  287. "color": "#323",
  288. "bgcolor": "#535"
  289. },
  290. {
  291. "id": 43,
  292. "type": "Note",
  293. "pos": [
  294. 1125,
  295. 70
  296. ],
  297. "size": {
  298. "0": 240,
  299. "1": 80
  300. },
  301. "flags": {},
  302. "order": 6,
  303. "mode": 0,
  304. "title": "Note - CLIP Encode (REFINER)",
  305. "properties": {
  306. "text": ""
  307. },
  308. "widgets_values": [
  309. "These nodes receive the text from the prompt and use the optimal CLIP settings for the specified checkpoint model (in this case: SDXL Refiner)"
  310. ],
  311. "color": "#323",
  312. "bgcolor": "#535"
  313. },
  314. {
  315. "id": 6,
  316. "type": "CLIPTextEncode",
  317. "pos": [
  318. 599.4967909953033,
  319. 269.4780241240285
  320. ],
  321. "size": {
  322. "0": 210,
  323. "1": 54
  324. },
  325. "flags": {},
  326. "order": 18,
  327. "mode": 0,
  328. "inputs": [
  329. {
  330. "name": "clip",
  331. "type": "CLIP",
  332. "link": 3
  333. },
  334. {
  335. "name": "text",
  336. "type": "STRING",
  337. "link": 16,
  338. "widget": {
  339. "name": "text"
  340. },
  341. "slot_index": 1
  342. }
  343. ],
  344. "outputs": [
  345. {
  346. "name": "CONDITIONING",
  347. "type": "CONDITIONING",
  348. "links": [
  349. 11
  350. ],
  351. "slot_index": 0
  352. }
  353. ],
  354. "properties": {
  355. "Node name for S&R": "CLIPTextEncode"
  356. },
  357. "widgets_values": [
  358. "a 35 year old Tongan woman standing in a food court at a mall"
  359. ],
  360. "color": "#232",
  361. "bgcolor": "#353"
  362. },
  363. {
  364. "id": 7,
  365. "type": "CLIPTextEncode",
  366. "pos": [
  367. 599.4967909953033,
  368. 359.4780241240287
  369. ],
  370. "size": {
  371. "0": 210,
  372. "1": 54
  373. },
  374. "flags": {},
  375. "order": 16,
  376. "mode": 0,
  377. "inputs": [
  378. {
  379. "name": "clip",
  380. "type": "CLIP",
  381. "link": 5
  382. },
  383. {
  384. "name": "text",
  385. "type": "STRING",
  386. "link": 18,
  387. "widget": {
  388. "name": "text"
  389. },
  390. "slot_index": 1
  391. }
  392. ],
  393. "outputs": [
  394. {
  395. "name": "CONDITIONING",
  396. "type": "CONDITIONING",
  397. "links": [
  398. 12
  399. ],
  400. "slot_index": 0
  401. }
  402. ],
  403. "properties": {
  404. "Node name for S&R": "CLIPTextEncode"
  405. },
  406. "widgets_values": [
  407. ""
  408. ],
  409. "color": "#322",
  410. "bgcolor": "#533"
  411. },
  412. {
  413. "id": 39,
  414. "type": "Note",
  415. "pos": [
  416. 599.4967909953033,
  417. 449.4780241240287
  418. ],
  419. "size": {
  420. "0": 210,
  421. "1": 80
  422. },
  423. "flags": {},
  424. "order": 7,
  425. "mode": 0,
  426. "title": "Note - CLIP Encode (BASE)",
  427. "properties": {
  428. "text": ""
  429. },
  430. "widgets_values": [
  431. "These nodes receive the text from the prompt and use the optimal CLIP settings for the specified checkpoint model (in this case: SDXL Base)"
  432. ],
  433. "color": "#323",
  434. "bgcolor": "#535"
  435. },
  436. {
  437. "id": 47,
  438. "type": "PrimitiveNode",
  439. "pos": [
  440. 1037.5286840013239,
  441. 881.6113881513106
  442. ],
  443. "size": {
  444. "0": 210,
  445. "1": 82
  446. },
  447. "flags": {},
  448. "order": 8,
  449. "mode": 0,
  450. "outputs": [
  451. {
  452. "name": "INT",
  453. "type": "INT",
  454. "links": [
  455. 43,
  456. 44
  457. ],
  458. "widget": {
  459. "name": "end_at_step"
  460. },
  461. "slot_index": 0
  462. }
  463. ],
  464. "title": "end_at_step",
  465. "properties": {
  466. "Run widget replace on values": false
  467. },
  468. "widgets_values": [
  469. 20,
  470. "fixed"
  471. ],
  472. "color": "#432",
  473. "bgcolor": "#653"
  474. },
  475. {
  476. "id": 48,
  477. "type": "Note",
  478. "pos": [
  479. 1036,
  480. 1018
  481. ],
  482. "size": {
  483. "0": 213.90769958496094,
  484. "1": 110.17156982421875
  485. },
  486. "flags": {},
  487. "order": 9,
  488. "mode": 0,
  489. "properties": {
  490. "text": ""
  491. },
  492. "widgets_values": [
  493. "These can be used to control the total sampling steps and the step at which the sampling switches to the refiner."
  494. ],
  495. "color": "#432",
  496. "bgcolor": "#653"
  497. },
  498. {
  499. "id": 4,
  500. "type": "CheckpointLoaderSimple",
  501. "pos": [
  502. -90,
  503. -620
  504. ],
  505. "size": {
  506. "0": 350,
  507. "1": 100
  508. },
  509. "flags": {},
  510. "order": 10,
  511. "mode": 0,
  512. "outputs": [
  513. {
  514. "name": "MODEL",
  515. "type": "MODEL",
  516. "links": [
  517. 10
  518. ],
  519. "slot_index": 0
  520. },
  521. {
  522. "name": "CLIP",
  523. "type": "CLIP",
  524. "links": [
  525. 3,
  526. 5
  527. ],
  528. "slot_index": 1
  529. },
  530. {
  531. "name": "VAE",
  532. "type": "VAE",
  533. "links": [],
  534. "slot_index": 2
  535. }
  536. ],
  537. "title": "Load Checkpoint - BASE",
  538. "properties": {
  539. "Node name for S&R": "CheckpointLoaderSimple"
  540. },
  541. "widgets_values": [
  542. "sdXL_v10VAEFix.safetensors"
  543. ],
  544. "color": "#323",
  545. "bgcolor": "#535"
  546. },
  547. {
  548. "id": 12,
  549. "type": "CheckpointLoaderSimple",
  550. "pos": [
  551. 600,
  552. -611
  553. ],
  554. "size": {
  555. "0": 350,
  556. "1": 100
  557. },
  558. "flags": {},
  559. "order": 11,
  560. "mode": 0,
  561. "outputs": [
  562. {
  563. "name": "MODEL",
  564. "type": "MODEL",
  565. "links": [
  566. 14
  567. ],
  568. "shape": 3,
  569. "slot_index": 0
  570. },
  571. {
  572. "name": "CLIP",
  573. "type": "CLIP",
  574. "links": [
  575. 19,
  576. 20
  577. ],
  578. "shape": 3,
  579. "slot_index": 1
  580. },
  581. {
  582. "name": "VAE",
  583. "type": "VAE",
  584. "links": [
  585. 34
  586. ],
  587. "shape": 3,
  588. "slot_index": 2
  589. }
  590. ],
  591. "title": "Load Checkpoint - REFINER",
  592. "properties": {
  593. "Node name for S&R": "CheckpointLoaderSimple"
  594. },
  595. "widgets_values": [
  596. "sdXL_v10RefinerVAEFix.safetensors"
  597. ],
  598. "color": "#323",
  599. "bgcolor": "#535"
  600. },
  601. {
  602. "id": 14,
  603. "type": "PrimitiveNode",
  604. "pos": [
  605. 117.74066078186034,
  606. 335.1799162292478
  607. ],
  608. "size": {
  609. "0": 300,
  610. "1": 160
  611. },
  612. "flags": {},
  613. "order": 12,
  614. "mode": 0,
  615. "outputs": [
  616. {
  617. "name": "STRING",
  618. "type": "STRING",
  619. "links": [
  620. 18,
  621. 22
  622. ],
  623. "widget": {
  624. "name": "text"
  625. },
  626. "slot_index": 0
  627. }
  628. ],
  629. "title": "Negative Prompt (Text)",
  630. "properties": {
  631. "Run widget replace on values": false
  632. },
  633. "widgets_values": [
  634. ""
  635. ],
  636. "color": "#322",
  637. "bgcolor": "#533"
  638. },
  639. {
  640. "id": 13,
  641. "type": "PrimitiveNode",
  642. "pos": [
  643. 117.74066078186034,
  644. 135.179916229248
  645. ],
  646. "size": {
  647. "0": 300,
  648. "1": 160
  649. },
  650. "flags": {},
  651. "order": 13,
  652. "mode": 0,
  653. "outputs": [
  654. {
  655. "name": "STRING",
  656. "type": "STRING",
  657. "links": [
  658. 16,
  659. 21
  660. ],
  661. "widget": {
  662. "name": "text"
  663. },
  664. "slot_index": 0
  665. }
  666. ],
  667. "title": "Positive Prompt (Text)",
  668. "properties": {
  669. "Run widget replace on values": false
  670. },
  671. "widgets_values": [
  672. "a 35 year old Tongan woman standing in a food court at a mall"
  673. ],
  674. "color": "#232",
  675. "bgcolor": "#353"
  676. },
  677. {
  678. "id": 5,
  679. "type": "EmptyLatentImage",
  680. "pos": [
  681. 544.5041024540301,
  682. 651.1200708259006
  683. ],
  684. "size": {
  685. "0": 300,
  686. "1": 110
  687. },
  688. "flags": {},
  689. "order": 14,
  690. "mode": 0,
  691. "outputs": [
  692. {
  693. "name": "LATENT",
  694. "type": "LATENT",
  695. "links": [
  696. 27
  697. ],
  698. "slot_index": 0
  699. }
  700. ],
  701. "properties": {
  702. "Node name for S&R": "EmptyLatentImage"
  703. },
  704. "widgets_values": [
  705. 1024,
  706. 1152,
  707. 1
  708. ],
  709. "color": "#323",
  710. "bgcolor": "#535"
  711. },
  712. {
  713. "id": 19,
  714. "type": "SaveImage",
  715. "pos": [
  716. 2600,
  717. 130
  718. ],
  719. "size": {
  720. "0": 565.774658203125,
  721. "1": 596.3757934570312
  722. },
  723. "flags": {},
  724. "order": 23,
  725. "mode": 0,
  726. "inputs": [
  727. {
  728. "name": "images",
  729. "type": "IMAGE",
  730. "link": 28
  731. }
  732. ],
  733. "properties": {},
  734. "widgets_values": [
  735. "cascade compare/img_"
  736. ],
  737. "color": "#222",
  738. "bgcolor": "#000"
  739. },
  740. {
  741. "id": 10,
  742. "type": "KSamplerAdvanced",
  743. "pos": [
  744. 1000,
  745. 230
  746. ],
  747. "size": {
  748. "0": 300,
  749. "1": 334
  750. },
  751. "flags": {},
  752. "order": 20,
  753. "mode": 0,
  754. "inputs": [
  755. {
  756. "name": "model",
  757. "type": "MODEL",
  758. "link": 10
  759. },
  760. {
  761. "name": "positive",
  762. "type": "CONDITIONING",
  763. "link": 11
  764. },
  765. {
  766. "name": "negative",
  767. "type": "CONDITIONING",
  768. "link": 12
  769. },
  770. {
  771. "name": "latent_image",
  772. "type": "LATENT",
  773. "link": 27
  774. },
  775. {
  776. "name": "steps",
  777. "type": "INT",
  778. "link": 41,
  779. "widget": {
  780. "name": "steps"
  781. },
  782. "slot_index": 4
  783. },
  784. {
  785. "name": "end_at_step",
  786. "type": "INT",
  787. "link": 43,
  788. "widget": {
  789. "name": "end_at_step"
  790. },
  791. "slot_index": 5
  792. }
  793. ],
  794. "outputs": [
  795. {
  796. "name": "LATENT",
  797. "type": "LATENT",
  798. "links": [
  799. 13
  800. ],
  801. "shape": 3,
  802. "slot_index": 0
  803. }
  804. ],
  805. "title": "KSampler (Advanced) - BASE",
  806. "properties": {
  807. "Node name for S&R": "KSamplerAdvanced"
  808. },
  809. "widgets_values": [
  810. "enable",
  811. 721897303308196,
  812. "randomize",
  813. 30,
  814. 4,
  815. "dpmpp_sde",
  816. "karras",
  817. 0,
  818. 20,
  819. "enable"
  820. ],
  821. "color": "#223",
  822. "bgcolor": "#335"
  823. },
  824. {
  825. "id": 45,
  826. "type": "PrimitiveNode",
  827. "pos": [
  828. 1039.5286840013239,
  829. 734.6113881513106
  830. ],
  831. "size": {
  832. "0": 210,
  833. "1": 82
  834. },
  835. "flags": {},
  836. "order": 15,
  837. "mode": 0,
  838. "outputs": [
  839. {
  840. "name": "INT",
  841. "type": "INT",
  842. "links": [
  843. 38,
  844. 41
  845. ],
  846. "widget": {
  847. "name": "steps"
  848. }
  849. }
  850. ],
  851. "title": "steps",
  852. "properties": {
  853. "Run widget replace on values": false
  854. },
  855. "widgets_values": [
  856. 30,
  857. "fixed"
  858. ],
  859. "color": "#432",
  860. "bgcolor": "#653"
  861. },
  862. {
  863. "id": 11,
  864. "type": "KSamplerAdvanced",
  865. "pos": [
  866. 1800,
  867. 130
  868. ],
  869. "size": {
  870. "0": 300,
  871. "1": 340
  872. },
  873. "flags": {},
  874. "order": 21,
  875. "mode": 0,
  876. "inputs": [
  877. {
  878. "name": "model",
  879. "type": "MODEL",
  880. "link": 14,
  881. "slot_index": 0
  882. },
  883. {
  884. "name": "positive",
  885. "type": "CONDITIONING",
  886. "link": 23
  887. },
  888. {
  889. "name": "negative",
  890. "type": "CONDITIONING",
  891. "link": 24
  892. },
  893. {
  894. "name": "latent_image",
  895. "type": "LATENT",
  896. "link": 13
  897. },
  898. {
  899. "name": "steps",
  900. "type": "INT",
  901. "link": 38,
  902. "widget": {
  903. "name": "steps"
  904. },
  905. "slot_index": 4
  906. },
  907. {
  908. "name": "start_at_step",
  909. "type": "INT",
  910. "link": 44,
  911. "widget": {
  912. "name": "start_at_step"
  913. }
  914. }
  915. ],
  916. "outputs": [
  917. {
  918. "name": "LATENT",
  919. "type": "LATENT",
  920. "links": [
  921. 25
  922. ],
  923. "shape": 3,
  924. "slot_index": 0
  925. }
  926. ],
  927. "title": "KSampler (Advanced) - REFINER",
  928. "properties": {
  929. "Node name for S&R": "KSamplerAdvanced"
  930. },
  931. "widgets_values": [
  932. "disable",
  933. 0,
  934. "fixed",
  935. 30,
  936. 7,
  937. "dpmpp_sde",
  938. "karras",
  939. 20,
  940. 10000,
  941. "disable"
  942. ],
  943. "color": "#223",
  944. "bgcolor": "#335"
  945. }
  946. ],
  947. "links": [
  948. [
  949. 3,
  950. 4,
  951. 1,
  952. 6,
  953. 0,
  954. "CLIP"
  955. ],
  956. [
  957. 5,
  958. 4,
  959. 1,
  960. 7,
  961. 0,
  962. "CLIP"
  963. ],
  964. [
  965. 10,
  966. 4,
  967. 0,
  968. 10,
  969. 0,
  970. "MODEL"
  971. ],
  972. [
  973. 11,
  974. 6,
  975. 0,
  976. 10,
  977. 1,
  978. "CONDITIONING"
  979. ],
  980. [
  981. 12,
  982. 7,
  983. 0,
  984. 10,
  985. 2,
  986. "CONDITIONING"
  987. ],
  988. [
  989. 13,
  990. 10,
  991. 0,
  992. 11,
  993. 3,
  994. "LATENT"
  995. ],
  996. [
  997. 14,
  998. 12,
  999. 0,
  1000. 11,
  1001. 0,
  1002. "MODEL"
  1003. ],
  1004. [
  1005. 16,
  1006. 13,
  1007. 0,
  1008. 6,
  1009. 1,
  1010. "STRING"
  1011. ],
  1012. [
  1013. 18,
  1014. 14,
  1015. 0,
  1016. 7,
  1017. 1,
  1018. "STRING"
  1019. ],
  1020. [
  1021. 19,
  1022. 12,
  1023. 1,
  1024. 15,
  1025. 0,
  1026. "CLIP"
  1027. ],
  1028. [
  1029. 20,
  1030. 12,
  1031. 1,
  1032. 16,
  1033. 0,
  1034. "CLIP"
  1035. ],
  1036. [
  1037. 21,
  1038. 13,
  1039. 0,
  1040. 15,
  1041. 1,
  1042. "STRING"
  1043. ],
  1044. [
  1045. 22,
  1046. 14,
  1047. 0,
  1048. 16,
  1049. 1,
  1050. "STRING"
  1051. ],
  1052. [
  1053. 23,
  1054. 15,
  1055. 0,
  1056. 11,
  1057. 1,
  1058. "CONDITIONING"
  1059. ],
  1060. [
  1061. 24,
  1062. 16,
  1063. 0,
  1064. 11,
  1065. 2,
  1066. "CONDITIONING"
  1067. ],
  1068. [
  1069. 25,
  1070. 11,
  1071. 0,
  1072. 17,
  1073. 0,
  1074. "LATENT"
  1075. ],
  1076. [
  1077. 27,
  1078. 5,
  1079. 0,
  1080. 10,
  1081. 3,
  1082. "LATENT"
  1083. ],
  1084. [
  1085. 28,
  1086. 17,
  1087. 0,
  1088. 19,
  1089. 0,
  1090. "IMAGE"
  1091. ],
  1092. [
  1093. 34,
  1094. 12,
  1095. 2,
  1096. 17,
  1097. 1,
  1098. "VAE"
  1099. ],
  1100. [
  1101. 38,
  1102. 45,
  1103. 0,
  1104. 11,
  1105. 4,
  1106. "INT"
  1107. ],
  1108. [
  1109. 41,
  1110. 45,
  1111. 0,
  1112. 10,
  1113. 4,
  1114. "INT"
  1115. ],
  1116. [
  1117. 43,
  1118. 47,
  1119. 0,
  1120. 10,
  1121. 5,
  1122. "INT"
  1123. ],
  1124. [
  1125. 44,
  1126. 47,
  1127. 0,
  1128. 11,
  1129. 5,
  1130. "INT"
  1131. ]
  1132. ],
  1133. "groups": [
  1134. {
  1135. "title": "Base Prompt",
  1136. "bounding": [
  1137. 579,
  1138. 189,
  1139. 252,
  1140. 361
  1141. ],
  1142. "color": "#3f789e",
  1143. "font_size": 24,
  1144. "locked": false
  1145. },
  1146. {
  1147. "title": "Refiner Prompt",
  1148. "bounding": [
  1149. 1102,
  1150. -201,
  1151. 282,
  1152. 372
  1153. ],
  1154. "color": "#3f789e",
  1155. "font_size": 24,
  1156. "locked": false
  1157. },
  1158. {
  1159. "title": "Text Prompts",
  1160. "bounding": [
  1161. 98,
  1162. 52,
  1163. 339,
  1164. 622
  1165. ],
  1166. "color": "#3f789e",
  1167. "font_size": 24,
  1168. "locked": false
  1169. },
  1170. {
  1171. "title": "Load in BASE SDXL Model",
  1172. "bounding": [
  1173. -100,
  1174. -700,
  1175. 369,
  1176. 399
  1177. ],
  1178. "color": "#a1309b",
  1179. "font_size": 24,
  1180. "locked": false
  1181. },
  1182. {
  1183. "title": "Load in REFINER SDXL Model",
  1184. "bounding": [
  1185. 581,
  1186. -700,
  1187. 391,
  1188. 400
  1189. ],
  1190. "color": "#a1309b",
  1191. "font_size": 24,
  1192. "locked": false
  1193. },
  1194. {
  1195. "title": "Empty Latent Image",
  1196. "bounding": [
  1197. 524,
  1198. 577,
  1199. 339,
  1200. 443
  1201. ],
  1202. "color": "#a1309b",
  1203. "font_size": 24,
  1204. "locked": false
  1205. },
  1206. {
  1207. "title": "VAE Decoder",
  1208. "bounding": [
  1209. 2142,
  1210. 51,
  1211. 360,
  1212. 350
  1213. ],
  1214. "color": "#b06634",
  1215. "font_size": 24,
  1216. "locked": false
  1217. },
  1218. {
  1219. "title": "Step Control",
  1220. "bounding": [
  1221. 1005,
  1222. 623,
  1223. 284,
  1224. 524
  1225. ],
  1226. "color": "#3f789e",
  1227. "font_size": 24,
  1228. "locked": false
  1229. }
  1230. ],
  1231. "config": {},
  1232. "extra": {},
  1233. "version": 0.4
  1234. }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement