sd_comfy_api_v2.py 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192
  1. #This is an example that uses the websockets api to know when a prompt execution is done
  2. #Once the prompt execution is done it downloads the images using the /history endpoint
  3. import websocket #NOTE: websocket-client (https://github.com/websocket-client/websocket-client)
  4. import uuid
  5. import json
  6. import urllib.request
  7. import urllib.parse
  8. from PIL import Image
  9. import io
  10. import random
  11. import sys
  12. import base64
  13. server_address = "127.0.0.1:8188"
  14. client_id = str(uuid.uuid4())
  15. def load_debug_ai_scene_info():
  16. #open ai_scene_info.json
  17. with open("D:/Git/ap-canvas-creation-module/04_stable_diffusion/ai_scene_info.json", "r") as f:
  18. ai_scene_info = json.load(f)
  19. return ai_scene_info
  20. def convert_base64_string_to_object(base64_string):
  21. bytes = base64.b64decode(base64_string)
  22. string = bytes.decode("ascii")
  23. return json.loads(string)
  24. def set_filename(json_obj, title, new_prefix):
  25. for key, value in json_obj.items():
  26. if isinstance(value, dict):
  27. if value.get("_meta", {}).get("title") == title:
  28. if "inputs" in value and "filename_prefix" in value["inputs"]:
  29. value["inputs"]["filename_prefix"] = new_prefix
  30. return new_prefix
  31. else:
  32. result = set_filename(value, title, new_prefix)
  33. if result:
  34. return result
  35. return None
  36. def find_node(json_obj, title):
  37. for key, value in json_obj.items():
  38. if isinstance(value, dict):
  39. if value.get("_meta", {}).get("title") == title:
  40. return value
  41. else:
  42. result = find_node(value, title)
  43. if result:
  44. return result
  45. return None
  46. def queue_prompt(prompt):
  47. p = {"prompt": prompt, "client_id": client_id}
  48. data = json.dumps(p).encode('utf-8')
  49. req = urllib.request.Request("http://{}/prompt".format(server_address), data=data)
  50. return json.loads(urllib.request.urlopen(req).read())
  51. def get_prompt(ai_scene_info):
  52. with open(
  53. "D://Git//ap-canvas-creation-module//04_stable_diffusion//workflows//canvas_3d_to_img_standard_V1.json",
  54. "r",
  55. ) as f:
  56. prompt_text_json = f.read()
  57. prompt = json.loads(prompt_text_json)
  58. #set the text prompt for our positive CLIPTextEncode
  59. positive_text = ai_scene_info["ai_scene"]["prompt"]["positive_prompt"]
  60. negative_text = ai_scene_info["ai_scene"]["prompt"]["negative_prompt"]
  61. image_path = "D://Git//ap-canvas-creation-module//03_blender//sd_blender//sample_scene//Renders//15a314a1-8ba1-4e0e-ad0c-f605b06f89f8//"
  62. image_base_path = image_path + "base0001.jpg"
  63. image_alpha_products_path = image_path + "alpha_products0001.jpg"
  64. # image_depth_path = image_path + "depth0001.png"
  65. prompt = json.loads(prompt_text_json)
  66. file_name = set_filename(prompt, "Save Image", "{project_id}/basic_api_example".format(project_id=ai_scene_info["project_id"]))
  67. ksampler_main = find_node(prompt, "KSampler")
  68. ksampler_main["inputs"]["noise_seed"] = random.randint(0, 1000000)
  69. ksampler_main = find_node(prompt, "KSampler")
  70. ksampler_main["inputs"]["steps"] = ai_scene_info["ai_scene"]["settings"]["steps"]
  71. ksampler_main["inputs"]["cfg"] = ai_scene_info["ai_scene"]["settings"]["cfg"]
  72. prompt_positive = find_node(prompt, "positive_CLIPTextEncodeSDXL")
  73. prompt_positive["inputs"]["text_g"] = positive_text
  74. prompt_positive["inputs"]["text_l"] = positive_text
  75. prompt_negative = find_node(prompt, "negative_CLIPTextEncodeSDXL")
  76. prompt_negative["inputs"]["text_g"] = negative_text
  77. prompt_negative["inputs"]["text_l"] = negative_text
  78. image_base = find_node(prompt, "image_base")
  79. image_base["inputs"]["image"] = image_base_path
  80. image_base = find_node(prompt, "image_product_mask")
  81. image_base["inputs"]["image"] = image_alpha_products_path
  82. image_base = find_node(prompt, "image_depth")
  83. # image_base["inputs"]["image"] = image_depth_path
  84. return prompt
  85. def get_image(filename, subfolder, folder_type):
  86. data = {"filename": filename, "subfolder": subfolder, "type": folder_type}
  87. url_values = urllib.parse.urlencode(data)
  88. with urllib.request.urlopen("http://{}/view?{}".format(server_address, url_values)) as response:
  89. return response.read()
  90. def get_history(prompt_id):
  91. with urllib.request.urlopen("http://{}/history/{}".format(server_address, prompt_id)) as response:
  92. return json.loads(response.read())
  93. def get_images(ws, prompt):
  94. prompt_id = queue_prompt(prompt)['prompt_id']
  95. output_images = {}
  96. while True:
  97. out = ws.recv()
  98. if isinstance(out, str):
  99. message = json.loads(out)
  100. if message['type'] == 'executing':
  101. data = message['data']
  102. if data['node'] is None and data['prompt_id'] == prompt_id:
  103. break #Execution is done
  104. else:
  105. continue #previews are binary data
  106. history = get_history(prompt_id)[prompt_id]
  107. for node_id in history['outputs']:
  108. node_output = history['outputs'][node_id]
  109. images_output = []
  110. if 'images' in node_output:
  111. for image in node_output['images']:
  112. image_data = get_image(image['filename'], image['subfolder'], image['type'])
  113. images_output.append(image_data)
  114. output_images[node_id] = images_output
  115. return output_images
  116. def main():
  117. argv = sys.argv
  118. try:
  119. argv = argv[argv.index("--") + 1 :]
  120. ai_scene_info = convert_base64_string_to_object(argv[0])
  121. print("loading scene data", ai_scene_info)
  122. except Exception as e:
  123. print("Error:", e)
  124. ai_scene_info = load_debug_ai_scene_info()
  125. prompt = get_prompt(ai_scene_info)
  126. ws = websocket.WebSocket()
  127. ws.connect("ws://{}/ws?clientId={}".format(server_address, client_id))
  128. images = get_images(ws, prompt)
  129. prompt_id = queue_prompt(prompt)['prompt_id']
  130. #Commented out code to display the output images:
  131. # for node_id in images:
  132. # for image_data in images[node_id]:
  133. # image = Image.open(io.BytesIO(image_data))
  134. # image.show()
  135. if __name__ == "__main__":
  136. main()