|  | @@ -60,8 +60,8 @@ def get_prompt(ai_scene_info):
 | 
	
		
			
				|  |  |  
 | 
	
		
			
				|  |  |      prompt = json.loads(prompt_text_json)
 | 
	
		
			
				|  |  |      #set the text prompt for our positive CLIPTextEncode
 | 
	
		
			
				|  |  | -    positive_text = ai_scene_info["ai_scene"]["settings"]["positive_prompt"]
 | 
	
		
			
				|  |  | -    negative_text = ai_scene_info["ai_scene"]["settings"]["negative_prompt"]
 | 
	
		
			
				|  |  | +    positive_text = ai_scene_info["ai_scene"]["prompt"]["positive_prompt"]
 | 
	
		
			
				|  |  | +    negative_text = ai_scene_info["ai_scene"]["prompt"]["negative_prompt"]
 | 
	
		
			
				|  |  |  
 | 
	
		
			
				|  |  |      image_path = "D://Git//ap-canvas-creation-module//03_blender//sd_blender//sample_scene//Renders//15a314a1-8ba1-4e0e-ad0c-f605b06f89f8//"
 | 
	
		
			
				|  |  |  
 | 
	
	
		
			
				|  | @@ -76,7 +76,8 @@ def get_prompt(ai_scene_info):
 | 
	
		
			
				|  |  |      ksampler_main["inputs"]["noise_seed"] = random.randint(0, 1000000)
 | 
	
		
			
				|  |  |  
 | 
	
		
			
				|  |  |      ksampler_main = find_node(prompt, "KSampler")
 | 
	
		
			
				|  |  | -    ksampler_main["inputs"]["steps"] = 30
 | 
	
		
			
				|  |  | +    ksampler_main["inputs"]["steps"] = ai_scene_info["ai_scene"]["settings"]["steps"]
 | 
	
		
			
				|  |  | +    ksampler_main["inputs"]["cfg"] = ai_scene_info["ai_scene"]["settings"]["cfg"]
 | 
	
		
			
				|  |  |  
 | 
	
		
			
				|  |  |      prompt_positive = find_node(prompt, "positive_CLIPTextEncodeSDXL")
 | 
	
		
			
				|  |  |      prompt_positive["inputs"]["text_g"] = positive_text
 | 
	
	
		
			
				|  | @@ -149,15 +150,13 @@ def main():
 | 
	
		
			
				|  |  |      except Exception as e:
 | 
	
		
			
				|  |  |          print("Error:", e)
 | 
	
		
			
				|  |  |  
 | 
	
		
			
				|  |  | -    prompt = get_prompt(
 | 
	
		
			
				|  |  | -        
 | 
	
		
			
				|  |  | -    )    
 | 
	
		
			
				|  |  | +    prompt = get_prompt(ai_scene_info)    
 | 
	
		
			
				|  |  |  
 | 
	
		
			
				|  |  |      ws = websocket.WebSocket()
 | 
	
		
			
				|  |  |      ws.connect("ws://{}/ws?clientId={}".format(server_address, client_id))
 | 
	
		
			
				|  |  |      images = get_images(ws, prompt)
 | 
	
		
			
				|  |  |  
 | 
	
		
			
				|  |  | -    print("Workflow completed, images are stored in the images variable")
 | 
	
		
			
				|  |  | +    
 | 
	
		
			
				|  |  |  
 | 
	
		
			
				|  |  |      #Commented out code to display the output images:
 | 
	
		
			
				|  |  |  
 |