浏览代码

try to fix rotation

Sergiu 8 月之前
父节点
当前提交
7ef3fd0303

+ 6 - 7
03_blender/prepare_folder_for export.py

@@ -2,7 +2,7 @@ import os
 import shutil
 
 
-def copy_and_rename_folder(source_folder, target_folder, new_folder_name):
+def copy_and_rename_folder(source_folder, target_folder, new_folder_name, content_id):
     # Copy the source folder to the target folder
     copied_folder_path = os.path.join(target_folder, new_folder_name)
     shutil.copytree(source_folder, copied_folder_path)
@@ -21,7 +21,7 @@ def copy_and_rename_folder(source_folder, target_folder, new_folder_name):
 
     # Construct the new .blend file name
     new_blend_file_name = (
-        f"{brand}_{asset_name}_{year}_ALL_AOVA_MAT_{asset_number}.blend"
+        f"{brand}_{asset_name}_{year}_{content_id}_{asset_number}.blend"
     )
 
     # Rename the .blend file inside the copied folder
@@ -41,9 +41,8 @@ def copy_and_rename_folder(source_folder, target_folder, new_folder_name):
 source_folder = (
     r"Z:/01_Production_AP/01_Library/03_Elements/_Brand_AssetName_Year_AssetNumber"
 )
-target_folder = r"C:/Target/Folder"
-new_folder_name = "Brand_AssetName_Year_AssetNumber"
+target_folder = r"Z:/01_Production_AP/01_Library/03_Elements/AP/Products"
+new_folder_name = "AP_DRLC_2024_0001"
+content_id = "ALL_DT_MAT"
 
-copy_and_rename_folder(source_folder, target_folder, new_folder_name)
-
-copy_and_rename_folder(source_folder, target_folder, new_folder_name)
+copy_and_rename_folder(source_folder, target_folder, new_folder_name, content_id)

+ 192 - 57
03_blender/sd_blender/__init__.py

@@ -19,9 +19,12 @@ else:
     import json
     from pathlib import Path
     from dataclasses import dataclass
-    from mathutils import Euler
+    from mathutils import Euler, Vector, Quaternion, Matrix
     import math
     import os
+    import base64
+    import numpy as np
+    from pyquaternion import Quaternion
 
     # from . import zs_renderscene as zsrs  # noqa
 
@@ -37,6 +40,38 @@ bl_info = {
 }
 
 
+# -------------------------------------------------------------------
+# Convert Data Functions
+# -------------------------------------------------------------------
+def convert_loc(x):
+    return u * Vector([x[0], -x[2], x[1]])
+
+
+def convert_quat(q):
+    return Quaternion([q[3], q[0], -q[2], q[1]])
+
+
+def convert_scale(s):
+    return Vector([s[0], s[2], s[1]])
+
+
+def local_rotation(obj, rotation_before, rot):
+    """Appends a local rotation to vnode's world transform:
+    (new world transform) = (old world transform) @ (rot)
+    without changing the world transform of vnode's children.
+
+    For correctness, rot must be a signed permutation of the axes
+    (eg. (X Y Z)->(X -Z Y)) OR vnode's scale must always be uniform.
+    """
+    rotation_before = Quaternion((1, 0, 0, 0))
+    obj.rotation_before @= rot
+
+    # # Append the inverse rotation after children's TRS to cancel it out.
+    # rot_inv = rot.conjugated()
+    # for child in gltf.vnodes[vnode_id].children:
+    #     gltf.vnodes[child].rotation_after = rot_inv @ gltf.vnodes[child].rotation_after
+
+
 # -------------------------------------------------------------------
 # Functions
 # -------------------------------------------------------------------
@@ -62,10 +97,21 @@ basic_shapes_asset = AssetData(
 )
 
 
+def convert_base64_string_to_object(base64_string):
+    bytes = base64.b64decode(base64_string)
+    string = bytes.decode("ascii")
+
+    return json.loads(string)
+    return string
+
+
 def load_scene():
+
     print("Loading Scene")
     # load scene data
+
     scene_data = load_scene_data()
+    print(scene_data)
     # create parent collections
     create_parent_collections(product_asset.collection_name)
     create_parent_collections(element_asset.collection_name)
@@ -101,18 +147,41 @@ def load_scene():
     set_cryptomatte_objects("01_Products", "mask_product")
 
 
+def invert_id_name(json_data):
+    for obj in json_data["scene"]["objects"]:
+        obj["name"], obj["id"] = obj["id"], obj["name"]
+    return json_data
+
+
 def load_scene_data():
-    print("Loading Scene Data")
+    # print("Loading Scene Data")
+    # # load scene data
+
+    # # to be replaced with actual data
+    # # open scene_info.json
+    # script_path = Path(__file__).resolve()
+    # scene_data_path = script_path.parent / "sample_scene" / "scene_info.json"
+    # with scene_data_path.open() as file:
+    #     scene_data = json.load(file)
+    #     print(scene_data)
+    # return scene_data
+
     # load scene data
+    print("Loading Scene Data")
+    if bpy.context.scene.load_local_DB:
+        loaded_scene_data = bpy.context.scene.config_string
+        # check if loaded_scene_data is base64 encoded
+        if loaded_scene_data.startswith("ey"):
+            scene_data = convert_base64_string_to_object(loaded_scene_data)
+        else:
+            scene_data = json.loads(loaded_scene_data)
 
-    # to be replaced with actual data
-    # open scene_info.json
-    script_path = Path(__file__).resolve()
-    scene_data_path = script_path.parent / "sample_scene" / "scene_info.json"
-    with scene_data_path.open() as file:
-        scene_data = json.load(file)
-        print(scene_data)
-    return scene_data
+    else:
+        scene_data = json.loads(bpy.context.scene.shot_info_ai)
+
+    invert_scene_data = invert_id_name(scene_data)
+
+    return invert_scene_data
 
 
 def load_objects_data(scene_data, object_type: str):
@@ -123,6 +192,8 @@ def load_objects_data(scene_data, object_type: str):
         if object["group_type"] == object_type:
             # get additional object data by id and combine with object data
             object_data = get_object_data_by_id(object["id"])
+            # temporary fix
+            # object_data = get_object_data_by_id(object["name"])
             object.update(object_data)
             objects_data.append(object)
     return objects_data
@@ -231,11 +302,23 @@ def append_active_layers(newCollectionName, product_info, asset_data: AssetData)
 
             # need to redo this in the future
             if "Animation_Target" in collection.name:
-                # print object name from collection
 
-                collection.objects[0].location = product_info["properties"][
+                # set the x location
+                collection.objects[0].location.x = product_info["properties"][
+                    "transform"
+                ]["position"][0]
+                # set the y location
+                collection.objects[0].location.y = -product_info["properties"][
                     "transform"
-                ]["position"]
+                ]["position"][2]
+                # set the z location
+                collection.objects[0].location.z = product_info["properties"][
+                    "transform"
+                ]["position"][1]
+
+                # collection.objects[0].location = product_info["properties"][
+                #     "transform"
+                # ]["position"]
 
                 # collection.objects[0].rotation_euler = product_info["properties"][
                 #     "transform"
@@ -244,9 +327,15 @@ def append_active_layers(newCollectionName, product_info, asset_data: AssetData)
                 rotation_in_degrees = product_info["properties"]["transform"][
                     "rotation"
                 ]
-                rotation_in_radians = [math.radians(deg) for deg in rotation_in_degrees]
 
-                collection.objects[0].rotation_euler = rotation_in_radians
+                rotation_in_degrees[0] = rotation_in_degrees[0] + 90
+
+                # set object rotation in euler from radians
+                collection.objects[0].rotation_euler = (
+                    math.radians(rotation_in_degrees[0]),
+                    math.radians(rotation_in_degrees[2]),
+                    math.radians(rotation_in_degrees[1]),
+                )
 
                 collection.objects[0].scale = product_info["properties"]["transform"][
                     "scale"
@@ -334,50 +423,74 @@ def create_cameras(scene_data):
     else:
         return
 
-    for camera_data in scene_data["scene"]["cameras"]:
-        # Create a new camera object
-        bpy.ops.object.camera_add()
-
-        # Get the newly created camera
-        camera = bpy.context.object
-
-        # Set the camera's name
-        camera.name = camera_data["name"]
-
-        # Set the camera's position
-        position = camera_data["properties"]["transform"]["position"]
-        camera.location.x = position[0]
-        camera.location.y = position[1]
-        camera.location.z = position[2]
-
-        # Set the camera's rotation
-        rotation = camera_data["properties"]["transform"]["rotation"]
-        # Convert the rotation from degrees to radians
-        rotation = [math.radians(r) for r in rotation]
-        camera.rotation_euler = Euler(rotation, "XYZ")
-
-        # Set the camera's lens properties
-        lens = camera_data["properties"]["lens"]
-        type_mapping = {
-            "PERSPECTIVE": "PERSP",
-            "ORTHOGRAPHIC": "ORTHO",
-            "PANORAMIC": "PANO",
-        }
-        camera.data.type = type_mapping.get(lens["type"].upper(), "PERSP")
-        camera.data.angle = math.radians(lens["fov"])
-        camera.data.clip_start = lens["near"]
-        camera.data.clip_end = lens["far"]
-
-        # Add the camera to the 05_Cameras collection
-        collection.objects.link(camera)
-        bpy.context.scene.collection.objects.unlink(camera)
-
-        # Set the camera as the active camera if "active" is true
-        if camera_data["properties"]["active"]:
-            bpy.context.scene.camera = camera
+
+# Assuming `scene_data` and `collection` are already defined
+for camera_data in scene_data["scene"]["cameras"]:
+    # Create a new camera object
+    bpy.ops.object.camera_add()
+
+    # Get the newly created camera
+    camera = bpy.context.object
+
+    # Set the camera's name
+    camera.name = camera_data["name"]
+
+    # Set the camera's position
+    position = camera_data["properties"]["transform"]["position"]
+    camera.location.x = position[0]
+    camera.location.y = -position[2]
+    camera.location.z = position[1]
+
+    # Set the camera's rotation
+    rotation = camera_data["properties"]["transform"]["rotation"]
+    local_rotation = Euler(
+        (
+            math.radians(rotation[0]),
+            math.radians(rotation[1]),
+            math.radians(rotation[2]),
+        ),
+        "XYZ",
+    )
+
+    # Apply the local rotation to the camera
+    camera.rotation_euler = local_rotation
+
+    # Update the camera's matrix_world to apply the local transformation
+    camera.matrix_world = camera.matrix_basis
+
+    # Calculate the global rotation
+    global_rotation = camera.matrix_world.to_euler()
+
+    # Set the camera's rotation to the global rotation
+    camera.rotation_euler = global_rotation
+
+    # Set the camera's lens properties
+    lens = camera_data["properties"]["lens"]
+    type_mapping = {
+        "PERSPECTIVE": "PERSP",
+        "ORTHOGRAPHIC": "ORTHO",
+        "PANORAMIC": "PANO",
+    }
+    camera.data.type = type_mapping.get(lens["type"].upper(), "PERSP")
+    camera.data.angle = math.radians(lens["fov"])
+    camera.data.clip_start = lens["near"]
+    camera.data.clip_end = lens["far"]
+
+    # Add the camera to the 05_Cameras collection
+    collection.objects.link(camera)
+    bpy.context.scene.collection.objects.unlink(camera)
+
+    # Set the camera as the active camera if "active" is true
+    if camera_data["properties"]["active"]:
+        bpy.context.scene.camera = camera
 
 
 def set_output_paths(base_path, project_name):
+
+    # check if folder exist, if not create it
+    folder_path = base_path + "//" + project_name
+    if not os.path.exists(folder_path):
+        os.makedirs(folder_path)
     # Get the current scene
     scene = bpy.context.scene
 
@@ -388,7 +501,7 @@ def set_output_paths(base_path, project_name):
             # Check if the node is an output node
             if node.type == "OUTPUT_FILE":
                 # Set the base path of the output node
-                node.base_path = base_path + "//" + project_name
+                node.base_path = folder_path
                 # Iterate over all file slots of the output node
                 # for file_slot in node.file_slots:
                 #     # Set the path of the file slot
@@ -811,9 +924,16 @@ class ZSSD_PT_Main(ZSSDPanel, bpy.types.Panel):
 
         col.label(text="Stable Diffusion Connection")
 
+        col.prop(context.scene, "load_local_DB")
+
+        col.prop(context.scene, "config_string")
+
         # load scene button
+
         col.operator("zs_sd_loader.load_scene", text="Load Scene")
 
+        col.separator()
+
         # export assets button
         col.operator("zs_canvas.export_assets", text="Export Assets")
 
@@ -832,6 +952,18 @@ def register():
     for blender_class in blender_classes:
         bpy.utils.register_class(blender_class)
 
+    bpy.types.Scene.shot_info_ai = bpy.props.StringProperty(
+        name="Shot Info",
+    )
+
+    bpy.types.Scene.config_string = bpy.props.StringProperty(  # type: ignore
+        name="Configuration String",
+    )
+
+    bpy.types.Scene.load_local_DB = bpy.props.BoolProperty(  # type: ignore
+        name="Load Local DB",
+    )
+
     # Has to be afqter class registering to correctly register property
 
     # register global properties
@@ -852,6 +984,9 @@ def unregister():
     for blender_class in blender_classes:
         bpy.utils.unregister_class(blender_class)
     # unregister global properties
+    del bpy.types.Scene.shot_info_ai
+    del bpy.types.Scene.config_string
+    del bpy.types.Scene.load_local_DB
 
     # unregister list items
     # del bpy.types.Scene.my_list

二进制
03_blender/sd_blender/__pycache__/__init__.cpython-310.pyc


二进制
03_blender/sd_blender/__pycache__/__init__.cpython-311.pyc


二进制
03_blender/sd_blender/sample_scene/Canvas_Render_Scene.blend


+ 5 - 5
03_blender/sd_blender/sample_scene/assets_database.json

@@ -1,18 +1,18 @@
 [
   {
-    "id": "f8761a95-c28a-47ec-9254-e76cc3b693c7",
+    "id": "LNG Sleeping Mask",
     "name": "LNG_AntiAging_SleepingMask_MY2023",
     "web_path": "https://www.laneige.com/LNG_AntiAging_SleepingMask_MY2023.glb",
     "type": "product"
   },
   {
-    "id": "15a314a1-8ba1-4e0e-ad0c-f605b06f89f8",
+    "id": "LNG PerfectRenew",
     "name": "LNG_PerfectRenew_Serum_MY2023",
     "web_path": "https://www.laneige.com/LNG_PerfectRenew_Serum_MY2023.glb",
     "type": "product"
   },
   {
-    "id": "15a314a1-8ba1-4e0e-ad0c-f605b06f89g9",
+    "id": "Circular Cover",
     "name": "SWS_TablesAndDisplay_CircularCover_0001",
     "web_path": "https://www.laneige.com/SWS_TablesAndDisplay_CircularCover_0001.glb",
     "type": "asset"
@@ -24,9 +24,9 @@
     "type": "shape"
   },
   {
-    "id": "15a314a1-8ba1-4e0e-ad0c-f605b06f89h3",
+    "id": "Sphere",
     "name": "BasicShapes_Sphere_0001",
     "web_path": "https://www.laneige.com/BasicShapes_Cube_0001.glb",
     "type": "shape"
   }
-]
+]

+ 23 - 90
03_blender/sd_blender/sample_scene/scene_info.json

@@ -2,10 +2,10 @@
   "scene": {
     "objects": [
       {
-        "name": "LNG Serum",
+        "name": "LNG Sleeping Mask",
         "type": "group",
         "group_type": "product",
-        "id": "15a314a1-8ba1-4e0e-ad0c-f605b06f89f8",
+        "id": "LNG Sleeping Mask 4",
         "properties": {
           "transform": {
             "position": [
@@ -13,129 +13,62 @@
               0,
               0
             ],
-            "rotation": [
-              -8,
-              -6.9,
-              -42
-            ],
-            "scale": [
-              1,
-              1,
-              1
-            ]
-          },
-          "visible": true
-        }
-      },
-      {
-        "name": "Sphere",
-        "type": "group",
-        "group_type": "shape",
-        "id": "15a314a1-8ba1-4e0e-ad0c-f605b06f89h3",
-        "properties": {
-          "transform": {
-            "position": [
-              -0.041122,
-              -0.036135,
-              0.155559
-            ],
             "rotation": [
               0,
               0,
               0
             ],
             "scale": [
-              0.74098,
-              0.74098,
-              0.74098
+              10,
+              10,
+              10
             ]
           },
-          "visible": true,
-          "color": {
-            "r": 0.5,
-            "g": 0.5,
-            "b": 0.5
-          }
+          "visible": true
         }
       },
       {
-        "name": "Sphere",
+        "name": "LNG Sleeping Mask",
         "type": "group",
-        "group_type": "shape",
-        "id": "15a314a1-8ba1-4e0e-ad0c-f605b06f89h3",
+        "group_type": "product",
+        "id": "LNG Sleeping Mask 4",
         "properties": {
           "transform": {
             "position": [
-              0.067047,
-              0.088912,
-              -0.023188
-            ],
-            "rotation": [
               0,
               0,
-              0
-            ],
-            "scale": [
-              0.4103,
-              0.4103,
-              0.4103
-            ]
-          },
-          "visible": true,
-          "color": {
-            "r": 0.5,
-            "g": 0.5,
-            "b": 0.5
-          }
-        }
-      },
-      {
-        "name": "Sphere",
-        "type": "group",
-        "group_type": "shape",
-        "id": "15a314a1-8ba1-4e0e-ad0c-f605b06f89h3",
-        "properties": {
-          "transform": {
-            "position": [
-              -0.041122,
-              -0.132029,
-              0.078844
+              0.8122250653238932
             ],
             "rotation": [
-              0,
+              45.00000000000001,
               0,
               0
             ],
             "scale": [
-              0.16679,
-              0.16679,
-              0.16679
+              10,
+              10,
+              10
             ]
           },
-          "visible": true,
-          "color": {
-            "r": 0.5,
-            "g": 0.5,
-            "b": 0.5
-          }
+          "visible": true
         }
       }
     ],
     "cameras": [
       {
-        "name": "Camera",
+        "name": "Camera 1",
         "type": "camera",
         "properties": {
           "transform": {
             "position": [
-              0.432918,
-              -0.202823,
-              0.08365
+              3.077126336045053,
+              0.4348491214286826,
+              0.3981070047067921
             ],
             "rotation": [
-              88.8032,
-              0.786282,
-              66.6831
+              -179.99999999999997,
+              88.77522425185553,
+              180
             ]
           },
           "lens": {
@@ -169,5 +102,5 @@
     }
   },
   "user_id": "1125441",
-  "project_id": "15a314a1-8ba1-4e0e-ad0c-f605b06f89f8"
+  "project_id": "ebae7542-1794-4c07-8902-343db346dd39"
 }

+ 12 - 1
03_blender/sd_blender/zs_ai_render_script.py

@@ -4,6 +4,15 @@ import sys
 import json
 import base64
 
+
+def convert_base64_string_to_object(base64_string):
+    bytes = base64.b64decode(base64_string)
+    string = bytes.decode("ascii")
+
+    # return json.loads(string)
+    return string
+
+
 argv = sys.argv
 try:
 
@@ -16,7 +25,9 @@ try:
 
     scene_info_string = argv[0]
 
-    print("loading scene data", scene_info_string)
+    # print("loading scene data", scene_info_string)
+
+    bpy.context.scene.shot_info_ai = convert_base64_string_to_object(scene_info_string)
 
     bpy.ops.zs_sd_loader.load_scene()
 

+ 67 - 47
04_stable_diffusion/sd_comfy_api_v2.py

@@ -1,7 +1,7 @@
-#This is an example that uses the websockets api to know when a prompt execution is done
-#Once the prompt execution is done it downloads the images using the /history endpoint
+# This is an example that uses the websockets api to know when a prompt execution is done
+# Once the prompt execution is done it downloads the images using the /history endpoint
 
-import websocket #NOTE: websocket-client (https://github.com/websocket-client/websocket-client)
+import websocket  # NOTE: websocket-client (https://github.com/websocket-client/websocket-client)
 import uuid
 import json
 import urllib.request
@@ -17,7 +17,10 @@ server_address = "127.0.0.1:8188"
 client_id = str(uuid.uuid4())
 api_path = "https://canvas-api-test.anvil.app/_/api"
 
-image_path = "D:/Temp/ComfyUI_windows_portable_nvidia/ComfyUI_windows_portable/ComfyUI/output/"
+image_path = (
+    "D:/Temp/ComfyUI_windows_portable_nvidia/ComfyUI_windows_portable/ComfyUI/output/"
+)
+
 
 def update_ai_image_task_status(row_id, new_status):
     # Define the URL for the API endpoint
@@ -39,6 +42,7 @@ def update_ai_image_task_status(row_id, new_status):
         print("Response:", response.text)
         return None
 
+
 def get_ai_image_task(row_id):
     # Define the URL for the API endpoint
     url = "{}/creation-module/ai-image/{}".format(api_path, row_id)
@@ -57,12 +61,14 @@ def get_ai_image_task(row_id):
         print("Response:", response.text)
         return None
 
+
 def find_image_and_convert_to_base64(image_path):
     with open(image_path, "rb") as image_file:
         image_data = image_file.read()
         image_base64 = base64.b64encode(image_data).decode("utf-8")
         return image_base64
 
+
 def upload_image_to_anvil(row_id, image_base64):
     url = "{}/creation-module/ai-image/upload-preview".format(api_path)
     payload = {"row_id": row_id, "image_base64": image_base64}
@@ -80,11 +86,14 @@ def upload_image_to_anvil(row_id, image_base64):
 
 
 def load_debug_ai_scene_info():
-    #open ai_scene_info.json
-    with open("D:/Git/ap-canvas-creation-module/04_stable_diffusion/ai_scene_info.json", "r") as f:
+    # open ai_scene_info.json
+    with open(
+        "D:/Git/ap-canvas-creation-module/04_stable_diffusion/ai_scene_info.json", "r"
+    ) as f:
         ai_scene_info = json.load(f)
     return ai_scene_info
 
+
 def convert_base64_string_to_object(base64_string):
     bytes = base64.b64decode(base64_string)
     string = bytes.decode("ascii")
@@ -92,7 +101,6 @@ def convert_base64_string_to_object(base64_string):
     return json.loads(string)
 
 
-
 def set_filename(json_obj, title, new_prefix):
     for key, value in json_obj.items():
         if isinstance(value, dict):
@@ -106,6 +114,7 @@ def set_filename(json_obj, title, new_prefix):
                         return result
     return None
 
+
 def find_node(json_obj, title):
     for key, value in json_obj.items():
         if isinstance(value, dict):
@@ -118,34 +127,39 @@ def find_node(json_obj, title):
     return None
 
 
-
 def queue_prompt(prompt):
     p = {"prompt": prompt, "client_id": client_id}
-    data = json.dumps(p).encode('utf-8')
-    req =  urllib.request.Request("http://{}/prompt".format(server_address), data=data)
+    data = json.dumps(p).encode("utf-8")
+    req = urllib.request.Request("http://{}/prompt".format(server_address), data=data)
     return json.loads(urllib.request.urlopen(req).read())
 
+
 def get_prompt(ai_scene_info):
     with open(
-    "D://Git//ap-canvas-creation-module//04_stable_diffusion//workflows//canvas_3d_to_img_standard_V1.json",
-    "r",
+        "D://Git//ap-canvas-creation-module//04_stable_diffusion//workflows//canvas_3d_to_img_standard_V1.json",
+        "r",
     ) as f:
         prompt_text_json = f.read()
 
     prompt = json.loads(prompt_text_json)
-    #set the text prompt for our positive CLIPTextEncode
+    # set the text prompt for our positive CLIPTextEncode
     positive_text = ai_scene_info["ai_scene"]["prompt"]["positive_prompt"]
     negative_text = ai_scene_info["ai_scene"]["prompt"]["negative_prompt"]
 
-    image_path = "D://Git//ap-canvas-creation-module//03_blender//sd_blender//sample_scene//Renders//15a314a1-8ba1-4e0e-ad0c-f605b06f89f8//"
+    base_path = "D://Git//ap-canvas-creation-module//03_blender//sd_blender//sample_scene//Renders//"
+
+    image_path = base_path + ai_scene_info["project_id"] + "/"
 
     image_base_path = image_path + "base0001.jpg"
     image_alpha_products_path = image_path + "alpha_products0001.jpg"
     # image_depth_path = image_path + "depth0001.png"
 
     prompt = json.loads(prompt_text_json)
-    file_name =  set_filename(prompt, "Save Image", "{project_id}/basic_api_example".format(project_id=ai_scene_info["project_id"]))
-    
+    file_name = set_filename(
+        prompt,
+        "Save Image",
+        "{project_id}/basic_api_example".format(project_id=ai_scene_info["project_id"]),
+    )
 
     ksampler_main = find_node(prompt, "KSampler")
     ksampler_main["inputs"]["noise_seed"] = random.randint(0, 1000000)
@@ -174,50 +188,59 @@ def get_prompt(ai_scene_info):
 
     return prompt
 
+
 def get_image(filename, subfolder, folder_type):
     data = {"filename": filename, "subfolder": subfolder, "type": folder_type}
     url_values = urllib.parse.urlencode(data)
-    with urllib.request.urlopen("http://{}/view?{}".format(server_address, url_values)) as response:
+    with urllib.request.urlopen(
+        "http://{}/view?{}".format(server_address, url_values)
+    ) as response:
         return response.read()
 
+
 def get_history(prompt_id):
-    with urllib.request.urlopen("http://{}/history/{}".format(server_address, prompt_id)) as response:
+    with urllib.request.urlopen(
+        "http://{}/history/{}".format(server_address, prompt_id)
+    ) as response:
         return json.loads(response.read())
 
+
 def get_images(ws, prompt):
-    prompt_id = queue_prompt(prompt)['prompt_id']
+    prompt_id = queue_prompt(prompt)["prompt_id"]
     output_images = {}
     while True:
         out = ws.recv()
         if isinstance(out, str):
             message = json.loads(out)
-            if message['type'] == 'executing':
-                data = message['data']
-                if data['node'] is None and data['prompt_id'] == prompt_id:
+            if message["type"] == "executing":
+                data = message["data"]
+                if data["node"] is None and data["prompt_id"] == prompt_id:
                     break  # Execution is done
         else:
             continue  # previews are binary data
 
     history = get_history(prompt_id)[prompt_id]
-    for node_id in history['outputs']:
-        node_output = history['outputs'][node_id]
+    for node_id in history["outputs"]:
+        node_output = history["outputs"][node_id]
         images_output = []
-        if 'images' in node_output:
-            for image in node_output['images']:
-                image_data = get_image(image['filename'], image['subfolder'], image['type'])
-                images_output.append({
-                    'filename': image['filename'],
-                    'data': image_data,
-                    'type': image['type']
-                })
+        if "images" in node_output:
+            for image in node_output["images"]:
+                image_data = get_image(
+                    image["filename"], image["subfolder"], image["type"]
+                )
+                images_output.append(
+                    {
+                        "filename": image["filename"],
+                        "data": image_data,
+                        "type": image["type"],
+                    }
+                )
         output_images[node_id] = images_output
 
     return output_images
 
-def main(*args):
 
-    
-    
+def main(*args):
 
     argv = sys.argv
 
@@ -234,38 +257,35 @@ def main(*args):
     except Exception as e:
         print("Error:", e)
 
-    
-
     # ai_scene_info = load_debug_ai_scene_info()
 
-    prompt = get_prompt(ai_scene_info)    
+    prompt = get_prompt(ai_scene_info)
 
     ws = websocket.WebSocket()
     ws.connect("ws://{}/ws?clientId={}".format(server_address, client_id))
 
     update_ai_image_task_status(row_id, 2)
 
-
-    images = get_images(ws, prompt)        
+    images = get_images(ws, prompt)
 
     for node_id in images:
         for image_info in images[node_id]:
-            if image_info['type'] == 'output':
+            if image_info["type"] == "output":
                 response = get_ai_image_task(row_id)
                 data = json.loads(response["data"])
 
                 project_id = data["project_id"]
 
-                complete_image_path = image_path + "{}/{}".format(project_id,image_info['filename'])
+                complete_image_path = image_path + "{}/{}".format(
+                    project_id, image_info["filename"]
+                )
                 print(complete_image_path)
 
                 image_base64 = find_image_and_convert_to_base64(
-                    image_path + "{}/{}".format(project_id,image_info['filename'])
-    )
+                    image_path + "{}/{}".format(project_id, image_info["filename"])
+                )
                 upload_image_to_anvil(row_id, image_base64)
-                
+
 
 if __name__ == "__main__":
     main(sys.argv)
-
-